text stringlengths 8 6.05M |
|---|
"""
剑指 Offer 66. 构建乘积数组
给定一个数组 A[0,1,…,n-1],请构建一个数组 B[0,1,…,n-1],其中 B 中的元素 B[i]=A[0]×A[1]×…×A[i-1]×A[i+1]×…×A[n-1]。不能使用除法。
"""
"""
这个不能用除法还真的是神仙操作呢,可以稍微研究一下,其实首先完全可以暴力破解,就根据这个公式来写,但是这样时间复杂度很高,不是最优解法.
得通过观察,假如A = [1,2,3,4,5],那么:
B1 = 1 * 2 * 3 * 4 * 5
B2 = 1 * 1 * 3 * 4 * 5
B3 = 1 * 2 * 1 * 4 * 5
B4 = 1 * 2 * 3 * 1 * 5
B5 = 1 * 2 * 3 * 4 * 1
显然,乘积里边其实是一个矩阵,并且可以分为上三角矩阵和下三角矩阵.而且上下三角之间是可以类似动态规划的规则,所以这就给了我们可乘之机.
"""
def constructArr(a:list) -> list:
listLength = len(a)
b = [1] * listLength
for i in range(1,listLength):
b[i] = b[i-1] * a[i-1]
temp = 1
for i in range(listLength-1,0,-1):
temp = temp * a[i]
b[i-1] = b[i-1] * temp
return b
if __name__ == '__main__':
res = constructArr([1,2,3,4,5])
print(res) |
import pytest
from heap import build_heap, heap
from copy import copy
@pytest.fixture
def hp():
keys = [4, 1, 3, 2, 16, 9, 10, 14, 8, 7]
return build_heap(keys)
def _test_e2i(hp):
for i in range(hp.size):
assert hp.e2i[hp.es[i]] == i
def _test_heap_property(hp):
for i in range(1, hp.size):
assert hp.keys[hp.parent(i)] > hp.keys[i]
# assert hp.es[hp.parent(i)] > hp.es[i]
def _test_basic(hp):
_test_e2i(hp)
_test_heap_property(hp)
assert hp.size == len(hp.keys)
assert hp.size == len(hp.es)
assert hp.size == len(hp.e2i)
def test_build_heap(hp):
_test_basic(hp)
def test_pop_max(hp):
i = 0
keys = list(sorted(copy(hp.keys), reverse=True))
while hp.size > 0:
k, e = hp.pop_max()
assert keys[i] == k
assert keys[i] == e
i += 1
assert e not in hp.e2i
_test_basic(hp)
def test_insert(hp):
keys = copy(hp.keys)
new_hp = heap([], [])
for k in keys:
new_hp.insert(k, k)
assert new_hp.keys == hp.keys
_test_basic(hp)
def test_increase_key(hp):
hp.increase_key(1, 20)
assert hp.peep_max() == (20, 1)
assert hp.e2i[1] == 0
_test_basic(hp)
def test_decrease_key(hp):
hp.decrease_key(16, 0)
assert hp.peep_max() == (14, 14)
_test_basic(hp)
def test_update_key(hp):
hp.update_key(16, 0)
assert hp.peep_max() == (14, 14)
hp.increase_key(1, 20)
assert hp.peep_max() == (20, 1)
_test_basic(hp)
|
import math
import csv
import numpy as np
import random as rand
import spectral
import matplotlib.pyplot as plt
print 'Fast Planted Vector'
def test_file():
Y = np.loadtxt(open("Y.csv","rb"),delimiter=",",skiprows=0)
q = np.loadtxt(open("q.csv","rb"),delimiter=",",skiprows=0)
YSY = get_YSY(Y)
sparse_rec(Y, YSY, q, 0)
def test_mult():
Y = np.loadtxt(open("Y.csv","rb"),delimiter=",",skiprows=0)
s = np.array([1,2,3])
print s * Y
# Y: p x n, q: n x 1
# def mult_diag(d, mtx, left=True):
# """Multiply a full matrix by a diagonal matrix.
# This function should always be faster than dot.
# Input:
# d -- 1D (N,) array (contains the diagonal elements)
# mtx -- 2D (N,N) array
# Output:
# mult_diag(d, mts, left=True) == dot(diag(d), mtx)
# mult_diag(d, mts, left=False) == dot(mtx, diag(d))
# """
# if left:
# return (d*mtx.T).T
# else:
# return d*mtx
def get_s(Y):
Y_shape = Y.shape
p = Y_shape[0]
s = []
acc = 0
for row in Y:
acc += (np.linalg.norm(row, 2)) ** 2
acc = acc / p
for row in Y:
yi_sq = (np.linalg.norm(row, 2)) ** 2
s.append(yi_sq - acc)
return np.array(s)
# (n x p)(p x p)(p x n)
# return: n x n
def get_YSY(Y):
s = get_s(Y)
YT = Y.T
YS = s * YT
YSY = YS.dot(Y)
return YSY
def sparse_rec(Y, YSY, q, num_it):
if num_it > 0:
for k in range(0, num_it):
num = YSY.dot(q)
denom = np.linalg.norm(num, 2)
q = num / denom
else:
q_old = q
while True:
# print 'q: ', q
num = YSY.dot(q)
denom = np.linalg.norm(num, 2)
q = num / denom
if np.linalg.norm(q - q_old, 2) < 1e-8:
break
q_old = q
return Y.dot(q)
def gen_k_sparse(k, p):
# print 'gen sparse, ', k, p
if k >= p:
return [1] * p
indicies = range(0, p)
rand.shuffle(indicies)
x = [0] * p
for i in range(0, k):
# if indicies[i] >= len(x):
# print 'out of range: ', indicies[i], len(x)
x[indicies[i]] = 1
return x
# rows of return matrix are basis vectors
def gen_subspace(p, n, k):
stdev = 1/float(p)
G = np.random.normal(0, scale = stdev, size = (n - 1, p))
x0 = gen_k_sparse(k, p)
S = np.vstack([G, x0])
return S, x0
def GS(S):
Y = spectral.orthogonalize(S)
return Y
def error(x0, x):
x0_normalized = x0 / np.linalg.norm(x0, 2)
return np.linalg.norm(x0_normalized - x, 2)
def error_2(x0, x):
return np.linalg.norm(x0 - x, 2)
def deriv_p(n):
return int(5 * n * math.log(n))
def gen_instance(n, k):
p = deriv_p(n)
S, x0 = gen_subspace(p, n, k) # n x p
# print 'S, x0: ', S, x0
Y = GS(S) # n x p
Y = Y.T # p x n
return Y, x0
def run_random(n, k, num_it_alg, num_it_test, Y_rows = True):
Y, x0 = gen_instance(n, k)
YSY = get_YSY(Y)
best = float("inf")
recs = []
for j in range(0, num_it_test):
i = rand.randint(0, len(Y) - 1)
# print 'i: ', i
q = None
if Y_rows:
q = Y[i]
else:
q = np.random.normal(0, scale = 1, size = (1, len(Y[i])))[0]
x = sparse_rec(Y, YSY, q, num_it_alg)
err = error(x0, x)
if err < best:
# print 'x0: ', x0
# print 'Yq: ', Yq
best = err
# if len(recs) <= j:
# recs.append((x0, x))
# else:
# recs[j - 1] = (x0, x)
return best, recs
# def run(n, k, num_it_alg):
# Y, x0 = gen_instance(n, k)
# YSY = get_YSY(Y)
# best = float("inf")
# recs = []
# for j in range(0, len(Y)):
# q = Y[j]
# x = sparse_rec(Y, YSY, q, num_it_alg)
# err = error(x0, x)
# if err < best:
# # print 'x0: ', x0
# # print 'Yq: ', Yq
# best = err
# # if len(recs) <= j:
# # recs.append((x0, x))
# # else:
# # recs[j - 1] = (x0, x)
# return best, recs
# def test_random(n_range, k_range, num_it_alg, ep, num_it_test, num_pts):
# succ = ([], [])
# fail = ([], [])
# errs = []
# for t in range(0, num_pts):
# print 'point number: ', t
# n = rand.randint(n_range[0], n_range[-1])
# p = int(deriv_p(n))
# k = rand.randint(k_range[0], k_range[-1])
# err, recs = run_random(n, k, num_it_alg, num_it_test)
# errs.append(err)
# if err <= ep:
# succ[0].append(p)
# succ[1].append(k)
# else:
# fail[0].append(p)
# fail[1].append(k)
# return succ, fail, errs
def test_rand2(n_range, k_range, num_it_alg, num_it_test, num_pts, Y_rows = True):
errs = ([], [], [])
for t in range(0, num_pts):
print 'point number: ', t
n = rand.randint(n_range[0], n_range[-1])
p = int(deriv_p(n))
k = rand.randint(k_range[0], k_range[-1])
err, recs = run_random(n, k, num_it_alg, num_it_test, Y_rows)
errs[0].append(p)
errs[1].append(k)
errs[2].append(err)
return errs
def test_single_rand(n, k, num_it_alg, num_it_test):
p = int(deriv_p(n))
err, recs = run_random(n, k, num_it_alg, num_it_test)
return err
def test_single(n, k, num_it_alg):
p = int(deriv_p(n))
err, recs = run(n, k, num_it_alg)
return err, recs
# def plot(succ, fail):
# plt.axis([0, 2200, 0, 550])
# plt.xlabel('Ambient Dimension p')
# plt.ylabel('Sparsity k')
# plt.plot(succ[0], succ[1], 'bs')
# plt.plot(fail[0], fail[1], 'rs')
# plt.show()
# x = input()
def plot2(errs):
plt.axis([0, 2200, 0, 550])
plt.xlabel('Ambient Dimension p')
plt.ylabel('Sparsity k')
plt.scatter(errs[0], errs[1], c = errs[2], s = 100, marker = 's')
plt.gray()
plt.show()
x = input()
def normalize_errs(errs):
for i in range(0, len(errs[0])):
if errs[2][i] >= 1:
errs[2][i] = 0
else:
errs[2][i] = 1 - errs[2][i]
errs[0].append(0)
errs[1].append(0)
errs[2].append(1)
# succ, fail, errs = test_random(range(2,90), range(2,400), 0, 0.001, 20, 200)
# print succ, fail, errs
# plot(succ, fail)
# print test_single(5, 3, 0, 10)
# n = 60
# k = 30
# err, recs = test_single(n, k, 0)
# print recs
# print err
# print deriv_p(n)
# test_file()
errs = test_rand2(range(2,90), range(2,300), 0, 50, 50, Y_rows = True)
print errs
normalize_errs(errs)
# print errs
plot2(errs)
# test_mult()
|
# -*- coding: utf-8 -*-
def compute():
num=int(input())
if num == 1:
print("Not Prime")
elif num == 2:
print("Prime")
elif num < 0:
print("Not Prime")
elif (num-1)%2==0:
print("Prime")
else:
print("Not Prime")
compute()
# if (num % 2 !=0) and (num % 3 !=0) and (num % 5 !=0) and (num % 7 !=0) and (num % 9 !=0) and (num % 11 !=0) and (num % 13 !=0) and (num % 17 !=0) and (num % 19 !=0):
# print("Prime")
# else:
# print("Not Prime")
|
import time
from logging import log
import functools
from elasticsearch import Elasticsearch
from elasticsearch.exceptions import ConnectionError as ElasticConnectionError, ConnectionTimeout as ElasticTimeOutError, TransportError, \
ConnectionError
def timeout_fallback(result, enable=False, last_time=5 * 60):
def _func(func):
@functools.wraps(func)
def wrap(*args, **kwargs):
key = func.__name__
now = int(time.time())
try:
fallback = getattr(func, 'fallback_'+key, None)
if fallback and fallback+last_time > now:
log(10, '##FALLBACK##%s##%s##%s##%s##', 'fallback: %s, now: %s' % (fallback, now), func.__name__, str(args), str(kwargs))
func_result = result
else:
func_result = func(*args, **kwargs)
except Exception as ex:
if enable:
setattr(func, 'fallback_'+key, now)
func_result = result
else:
raise ex
return func_result
return wrap
return _func
def suppress_exception(result, enable=False):
def _func(func):
@functools.wraps(func)
def wrap(*args, **kwargs):
try:
func_result = func(*args, **kwargs)
except Exception as e:
if enable:
func_result = result
else:
raise e
return func_result
return wrap
return _func
@suppress_exception({}, enable=True)
@timeout_fallback({}, enable=True)
def test():
raise Exception("aaaaaa")
test()
ELASTICSEARCH_CONFIG = [
{'host': '10.65.128.120', 'port': '9210'},
{'host': '10.65.128.121', 'port': '9210'},
{'host': '10.65.128.122', 'port': '9210'},
#{'host': '10.65.128.123', 'port': '9200'},
{'host': '10.65.128.124', 'port': '9210'},
{'host': '10.65.128.125', 'port': '9210'},
]
es = Elasticsearch(ELASTICSEARCH_CONFIG, max_retries=1, timeout=3, retry_on_timeout=True)
#
# try:
# response = es.index(index="aaaa", doc_type='default', id=None, body={})
# except TransportError as ex:
# raise
|
from customers.Aurora.medication_admin.medication_admin_mappings import (
DISCONT_REASON, DOSE, MAR_ACTION, MAR_DURATION, REASON, SITE, UNIT)
from lib.master_fake_data_generator import FakeDataGenerator
class AURORAMedicationAdminFakeDataGenerator(FakeDataGenerator):
def generate_pipeline_row(self, row: str, file_size: int) -> dict:
f = self._faker
r = self._random
start, end = self.create_start_end_date()
medication_admin = {
"PAT_ID": f"{r.randint(1,int(file_size))}",
"ORDER_MED_ID": f"{r.randint(1,int(file_size))}",
"LINE": f.random_number(),
"PAT_ENC_CSN_ID": self.random_or_empty(f"{r.randint(1,int(file_size))}"),
"MEDICATION_ID": f.random_number(5),
"SCHEDULED_TIME": self.random_or_empty(f"{start}T{self.get_time_string()}:000-05:00"),
"TAKEN_TIME": self.random_or_empty(f"{start}T{self.get_time_string()}:000-05:00"),
"DOSE_VALUE": f.random_element(DOSE),
"DOSE_UNIT": f.random_element(UNIT),
"MAR_ACTION": f.random_element(list(MAR_ACTION)),
"REASON": f.random_element(REASON),
"SITE": f.random_element(SITE),
"MAR_DURATION": f.random_element(MAR_DURATION),
"DURATION": f.random_element(['', 'Hours', 'Minutes', 'Days']),
"DISCON_TIME": self.random_or_empty(f"{end}T{self.get_time_string()}:000-05:00"),
"RSN_FOR_DISCON": f.random_element(DISCONT_REASON),
"PROCESS_EXTRACT_DTTM": f"{self.get_current_date()}:{r.randint(100,999)}-05:00",
}
return medication_admin
|
# import the necessary packages
from report_handler import handle_report
from datetime import datetime
from pyimagesearch.utils import Conf
class TrackableObject:
def __init__(self, objectID, centroid, licenseNumber=str()):
# store the object ID, then initialize a list of centroids
# using the current centroid
self.objectID = objectID
self.centroids = [centroid]
self.licenseNumber = licenseNumber
self.alarmThread = None
# initialize the start time of tracking in seconds
self.startTime = None
# initialize elapsed time from tracking
self.elapsedTime = None
# initialize marker color
self.markerColor = (0, 255, 0) # green
# initialize boolean used to indicate if time passed
self.isBlocking = False
# initialize boolean used to indicate if the the object passed the line
self.passedTheLine = False
# initialize boolean used to indicate the trackable object being created in illegal zone
self.createdInIllegalZone = False
# initialize start and end dates of blocking event
self.startEventDate = None
self.endEventDate = None
self.isApproved = False
self.blockFrame = None
def handle_enter_illegal_zone(self, startTime):
self.passedTheLine = True
self.startTime = startTime
self.markerColor = (0, 255, 255)
print("[INFO] ID {} entered illegal zone".format(self.objectID))
def handle_time_passed(self, alarmThread, frame, isAlarmFeatureOn, isReportFeatureOn, whiteList=[]):
print("[INFO] ID {} passed permitted time in illegal zone".format(self.objectID))
self.isBlocking = True
self.markerColor = (255, 0, 0)
self.startEventDate = datetime.today().strftime('%Y-%m-%d-%H-%M-%S')
self.endEventDate = None
self.blockFrame = frame
self.isApproved = self.is_in_white_list(whiteList)
if isReportFeatureOn:
handle_report(self, frame)
if isAlarmFeatureOn and not self.isApproved:
self.alarmThread = alarmThread
self.alarmThread.start(self.objectID)
def handle_returning_to_legal_zone(self, isAlarmFeatureOn, isReportFeatureOn):
print("[INFO] ID {} back to legal zone".format(self.objectID))
self.passedTheLine = False
self.elapsedTime = 0
self.markerColor = (0, 255, 0)
if self.isBlocking:
self.isBlocking = False
self.endEventDate = datetime.today().strftime('%Y-%m-%d-%H-%M-%S')
print("[INFO] ID {} End Alarm".format(self.objectID))
if isReportFeatureOn:
handle_report(self, self.blockFrame)
if isAlarmFeatureOn and self.alarmThread:
self.alarmThread.stop()
def is_in_white_list(self, whiteList):
if self.licenseNumber and whiteList:
for approvedNumber in whiteList:
if self.licenseNumber == approvedNumber:
return True
return False
|
from leetcode.tree import printtree
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def reverseTree(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
if __name__ == "__main__":
root = TreeNode(5)
n1 = TreeNode(4)
n2 = TreeNode(8)
root.left = n1
root.right = n2
n3 = TreeNode(11)
n4 = TreeNode(7)
n5 = TreeNode(2)
n1.left = n3
n3.left = n4
n3.right = n5
n6 = TreeNode(13)
n6.right = TreeNode(9)
n7 = TreeNode(4)
n7.left = TreeNode(15)
n2.left = n6
n2.right = n7
n7.right = TreeNode(1)
printtree.Pretty_print(root)
solu = Solution |
from django.core.exceptions import ValidationError
from django.db import models
from borg_utils.publish_status import EnabledStatus
class PublishAction(object):
"""
Represent all the pending actions after last publish
"""
publish_all_action = 1
publish_data_action = 4
publish_feature_action = 256
publish_gwc_action = 512
_change_type_mapping = {
"sql": publish_data_action,
"input_table":publish_data_action,
"relation_1":publish_data_action,
"relation_2":publish_data_action,
"relation_3":publish_data_action,
"normal_tables":publish_data_action,
"create_extra_index_sql": publish_data_action,
"kmi_title": publish_feature_action,
"kmi_abstract":publish_feature_action,
"applications":publish_feature_action,
"geoserver_setting":publish_gwc_action
}
_forbidding_columns = ["name","workspace"]
def __init__(self,action=0):
self._action = action or 0
self._possible_data_changed = False
def __bool__(self):
return self._action == 0
def __nonzero__(self):
return self.__bool__()
def __str__(self):
result = ""
if self.publish_all:
result = "All"
else:
if self.publish_data:
result = "Data"
elif self._possible_data_changed:
result = "Data?"
if self.publish_feature or self.publish_gwc:
result += "Metadata" if result == "" else " , Metadata"
return result
@property
def possible_data_changed(self):
return self._possible_data_changed
@possible_data_changed.setter
def possible_data_changed(self,value):
self._possible_data_changed = value
def edit(self,instance):
existing_instance = None
from tablemanager.models import Publish
if instance.pk:
existing_instance = Publish.objects.get(pk = instance.pk)
self._action = 0
if existing_instance:
self._action = existing_instance.pending_actions or 0
for f in instance._meta.fields:
rel1 = getattr(instance,f.name)
rel2 = getattr(existing_instance,f.name)
if isinstance(f,models.OneToOneField):
if (rel1 == None or rel1.is_empty):
if (rel2 == None or rel2.is_empty):
pass
else:
self.column_changed(f.name)
elif (rel2 == None or rel2.is_empty):
self.column_changed(f.name)
else:
index = 0
for t in rel1.normal_tables:
if t == rel2.normal_tables[index]:
pass
else:
self.column_changed(f.name)
break;
index += 1
elif f.name == "status":
if rel1 != EnabledStatus.instance().name:
self._action = 0
break
elif rel1 != rel2:
self._action = self.publish_all_action
break
else:
if rel1 != rel2:
self.column_changed(f.name)
else:
self._action = self.publish_all_action
return self
def column_changed(self,column):
if self._action == self.publish_all_action:
return self
if column in self._forbidding_columns:
raise ValidationError("Changing the column ({0}) value is not supportted".format(column))
self._action |= self._change_type_mapping.get(column,0)
return self
def _clear_action(self,action):
self._action &= ~action
@property
def actions(self):
return self._action or None
@property
def has_action(self):
return self._action > 0
def clear_all_action(self):
self._action = 0
@property
def publish_all(self):
return self._action & self.publish_all_action == self.publish_all_action
def clear_all_action(self):
self._action = 0
return self
@property
def publish_gwc(self):
return self._action & self.publish_gwc_action == self.publish_gwc_action
def clear_gwc_action(self):
self._clear_action(self.publish_gwc_action)
return self
@property
def publish_data(self):
return self._action & self.publish_data_action == self.publish_data_action
def clear_data_action(self):
self._clear_action(self.publish_data_action)
return self
@property
def publish_feature(self):
return self._action & self.publish_feature_action == self.publish_feature_action
def clear_feature_action(self):
self._clear_action(self.publish_feature_action)
return self
|
#!/usr/bin/env python3
# NeoPixel library strandtest example
# Author: Tony DiCola (tony@tonydicola.com)
#
# Direct port of the Arduino NeoPixel library strandtest example. Showcases
# various animations on a strip of NeoPixels.
import argparse
import base64
import hashlib
import json
import logging
import os
import random
import socket
import ssl
import sys
import requests
import time
import fcntl
import struct
from collections import defaultdict
from http.server import BaseHTTPRequestHandler, HTTPServer
from rpi_ws281x import *
from socketserver import ThreadingMixIn
from socketserver import ThreadingMixIn
from subprocess import Popen, check_output, call
from threading import Thread
from time import sleep, strftime
# LED strip configuration:
LED_COUNT = 120 # Number of LED pixels.
LED_PIN = 18 # GPIO pin connected to the pixels (18 uses PWM!).
#LED_PIN = 10 # GPIO pin connected to the pixels (10 uses SPI /dev/spidev0.0).
LED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)
LED_DMA = 10 # DMA channel to use for generating signal (try 10)
LED_BRIGHTNESS = 255 # Set to 0 for darkest and 255 for brightest
LED_INVERT = False # True to invert the signal (when using NPN transistor level shift)
LED_CHANNEL = 0 # set to '1' for GPIOs 13, 19, 41, 45 or 53
LEDS_PER_HUE_LIGHT = 30
HUE_LIGHTS_COUNT = LED_COUNT / LEDS_PER_HUE_LIGHT
# Define functions which animate LEDs in various ways.
def colorWipe(strip, color, wait_ms=50):
"""Wipe color across display a pixel at a time."""
for i in range(strip.numPixels()):
strip.setPixelColor(i, color)
strip.show()
time.sleep(wait_ms/1000.0)
def theaterChase(strip, color, wait_ms=50, iterations=10):
"""Movie theater light style chaser animation."""
for j in range(iterations):
for q in range(3):
for i in range(0, strip.numPixels(), 3):
strip.setPixelColor(i+q, color)
strip.show()
time.sleep(wait_ms/1000.0)
for i in range(0, strip.numPixels(), 3):
strip.setPixelColor(i+q, 0)
def wheel(pos):
"""Generate rainbow colors across 0-255 positions."""
if pos < 85:
return Color(pos * 3, 255 - pos * 3, 0)
elif pos < 170:
pos -= 85
return Color(255 - pos * 3, 0, pos * 3)
else:
pos -= 170
return Color(0, pos * 3, 255 - pos * 3)
def rainbow(strip, wait_ms=20, iterations=1):
"""Draw rainbow that fades across all pixels at once."""
for j in range(256*iterations):
for i in range(strip.numPixels()):
strip.setPixelColor(i, wheel((i+j) & 255))
strip.show()
time.sleep(wait_ms/1000.0)
def rainbowCycle(strip, wait_ms=20, iterations=5):
"""Draw rainbow that uniformly distributes itself across all pixels."""
for j in range(256*iterations):
for i in range(strip.numPixels()):
strip.setPixelColor(i, wheel((int(i * 256 / strip.numPixels()) + j) & 255))
strip.show()
time.sleep(wait_ms/1000.0)
def theaterChaseRainbow(strip, wait_ms=50):
"""Rainbow movie theater light style chaser animation."""
for j in range(256):
for q in range(3):
for i in range(0, strip.numPixels(), 3):
strip.setPixelColor(i+q, wheel((i+j) % 255))
strip.show()
time.sleep(wait_ms/1000.0)
for i in range(0, strip.numPixels(), 3):
strip.setPixelColor(i+q, 0)
# protocols = [yeelight, tasmota, native_single, native_multi]
cwd = os.path.split(os.path.abspath(__file__))[0]
def pretty_json(data):
return json.dumps(data, sort_keys=True, indent=4, separators=(',', ': '))
run_service = True
bridge_config = defaultdict(lambda:defaultdict(str))
new_lights = {}
sensors_state = {}
class HueHandler(BaseHTTPRequestHandler):
protocol_version = 'HTTP/1.1'
server_version = 'nginx'
sys_version = ''
def _set_headers(self, plain=False):
self.send_response(200)
mimetypes = {"json": "application/json", "map": "application/json", "html": "text/html", "xml": "application/xml", "js": "text/javascript", "css": "text/css", "png": "image/png"}
if plain:
self.send_header('Content-type', 'text/plain')
elif self.path.endswith((".html",".json",".css",".map",".png",".js", ".xml")):
self.send_header('Content-type', mimetypes[self.path.split(".")[-1]])
elif self.path.startswith("/api"):
self.send_header('Content-type', mimetypes["json"])
else:
self.send_header('Content-type', mimetypes["html"])
def _set_AUTHHEAD(self):
self.send_response(401)
self.send_header('WWW-Authenticate', 'Basic realm=\"Hue\"')
self.send_header('Content-type', 'text/html')
self.end_headers()
def _set_end_headers(self, data):
self.send_header('Content-Length', len(data))
self.end_headers()
self.wfile.write(data)
def do_GET(self):
#Some older Philips Tv's sent non-standard HTTP GET requests with a Content-Length and a
# body. The HTTP body needs to be consumed and ignored in order to request be handle correctly.
global bridge_config
self.read_http_request_body()
if self.path == '/' or self.path == '/index.html':
self._set_headers()
f = open(cwd + '/web-ui/index.html')
self._set_end_headers(bytes(f.read(), "utf8"))
elif self.path.endswith((".css",".map",".png",".js")):
self._set_headers()
f = open(cwd + '/web-ui' + self.path, 'rb')
self._set_end_headers(f.read())
elif self.path == '/detect':
self._set_headers(plain=True)
self._set_end_headers(bytes(json.dumps(
{"hue": "strip","lights": HUE_LIGHTS_COUNT ,"name": 'StriPi',"modelid": "LST002", "mac": bridge_config["config"]["mac"]}
), "utf8"))
# elif self.path == '/get':
# self._set_headers(plain=True)
# self._set_end_headers(bytes(json.dumps([{"on": power_status,
# "bri": bri[light],
# "xy": [x[light], y[light]],
# "ct": ct[light],
# "sat": sat[light],
# "hue": hue[light],
# "colormode": colormode}])))
elif self.path == '/on':
self._set_headers()
colorWipe(strip, Color(255, 0, 0)) # Red wipe
self._set_end_headers(bytes(json.dumps([{"success":{"configuration":"saved","filename":"/opt/hue-emulator/config.json"}}] ,separators=(',', ':')), "utf8"))
elif self.path == '/off':
self._set_headers()
colorWipe(strip, Color(0, 0, 0)) # Clear wipe
self._set_end_headers(bytes(json.dumps([{"success":{"configuration":"saved","filename":"/opt/hue-emulator/config.json"}}] ,separators=(',', ':')), "utf8"))
else:
url_pices = self.path.rstrip('/').split('/')
if len(url_pices) < 3:
#self._set_headers_error()
self.send_error(404, 'not found')
return
else:
self._set_headers()
if url_pices[2] in bridge_config["config"]["whitelist"]: #if username is in whitelist
bridge_config["config"]["UTC"] = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S")
bridge_config["config"]["localtime"] = datetime.now().strftime("%Y-%m-%dT%H:%M:%S")
bridge_config["config"]["whitelist"][url_pices[2]]["last use date"] = datetime.now().strftime("%Y-%m-%dT%H:%M:%S")
bridge_config["config"]["linkbutton"] = int(bridge_config["linkbutton"]["lastlinkbuttonpushed"]) + 30 >= int(datetime.now().strftime("%s"))
if len(url_pices) == 3: #print entire config
self._set_end_headers(bytes(json.dumps({"lights": bridge_config["lights"], "groups": bridge_config["groups"], "config": bridge_config["config"], "scenes": bridge_config["scenes"], "schedules": bridge_config["schedules"], "rules": bridge_config["rules"], "sensors": bridge_config["sensors"], "resourcelinks": bridge_config["resourcelinks"]},separators=(',', ':')), "utf8"))
elif len(url_pices) == 4: #print specified object config
self._set_end_headers(bytes(json.dumps(bridge_config[url_pices[3]],separators=(',', ':')), "utf8"))
elif len(url_pices) == 5:
if url_pices[4] == "new": #return new lights and sensors only
new_lights.update({"lastscan": datetime.now().strftime("%Y-%m-%dT%H:%M:%S")})
self._set_end_headers(bytes(json.dumps(new_lights ,separators=(',', ':')), "utf8"))
new_lights.clear()
elif url_pices[3] == "groups" and url_pices[4] == "0":
any_on = False
all_on = True
for group_state in bridge_config["groups"].keys():
if bridge_config["groups"][group_state]["state"]["any_on"] == True:
any_on = True
else:
all_on = False
self._set_end_headers(bytes(json.dumps({"name":"Group 0","lights": [l for l in bridge_config["lights"]],"sensors": [s for s in bridge_config["sensors"]],"type":"LightGroup","state":{"all_on":all_on,"any_on":any_on},"recycle":False,"action":{"on":False,"alert":"none"}},separators=(',', ':')), "utf8"))
elif url_pices[3] == "info" and url_pices[4] == "timezones":
self._set_end_headers(bytes(json.dumps(bridge_config["capabilities"][url_pices[4]]["values"],separators=(',', ':')), "utf8"))
else:
self._set_end_headers(bytes(json.dumps(bridge_config[url_pices[3]][url_pices[4]],separators=(',', ':')), "utf8"))
elif (url_pices[2] == "nouser" or url_pices[2] == "none" or url_pices[2] == "config"): #used by applications to discover the bridge
self._set_end_headers(bytes(json.dumps({"name": bridge_config["config"]["name"],"datastoreversion": 70, "swversion": bridge_config["config"]["swversion"], "apiversion": bridge_config["config"]["apiversion"], "mac": bridge_config["config"]["mac"], "bridgeid": bridge_config["config"]["bridgeid"], "factorynew": False, "replacesbridgeid": None, "modelid": bridge_config["config"]["modelid"],"starterkitid":""},separators=(',', ':')), "utf8"))
else: #user is not in whitelist
self._set_end_headers(bytes(json.dumps([{"error": {"type": 1, "address": self.path, "description": "unauthorized user" }}],separators=(',', ':')), "utf8"))
def read_http_request_body(self):
return b"{}" if self.headers['Content-Length'] is None or self.headers[
'Content-Length'] == '0' else self.rfile.read(int(self.headers['Content-Length']))
def do_POST(self):
self._set_headers()
logging.info("in post method")
logging.info(self.path)
self.data_string = self.read_http_request_body()
if self.path == "/updater":
logging.info("check for updates")
update_data = json.loads(sendRequest("https://raw.githubusercontent.com/diyhue/diyHue/master/BridgeEmulator/updater", "GET", "{}"))
for category in update_data.keys():
for key in update_data[category].keys():
logging.info("patch " + category + " -> " + key )
bridge_config[category][key] = update_data[category][key]
self._set_end_headers(bytes(json.dumps([{"success": {"/config/swupdate/checkforupdate": True}}],separators=(',', ':')), "utf8"))
else:
raw_json = self.data_string.decode('utf8')
raw_json = raw_json.replace("\t","")
raw_json = raw_json.replace("\n","")
post_dictionary = json.loads(raw_json)
logging.info(self.data_string)
url_pices = self.path.rstrip('/').split('/')
if len(url_pices) == 4: #data was posted to a location
if url_pices[2] in bridge_config["config"]["whitelist"]:
if ((url_pices[3] == "lights" or url_pices[3] == "sensors") and not bool(post_dictionary)):
#if was a request to scan for lights of sensors
Thread(target=scanForLights).start()
sleep(7) #give no more than 5 seconds for light scanning (otherwise will face app disconnection timeout)
self._set_end_headers(bytes(json.dumps([{"success": {"/" + url_pices[3]: "Searching for new devices"}}],separators=(',', ':')), "utf8"))
elif url_pices[3] == "":
self._set_end_headers(bytes(json.dumps([{"success": {"clientkey": "321c0c2ebfa7361e55491095b2f5f9db"}}],separators=(',', ':')), "utf8"))
else: #create object
# find the first unused id for new object
new_object_id = nextFreeId(bridge_config, url_pices[3])
if url_pices[3] == "scenes":
post_dictionary.update({"lightstates": {}, "version": 2, "picture": "", "lastupdated": datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S"), "owner" :url_pices[2]})
if "locked" not in post_dictionary:
post_dictionary["locked"] = False
elif url_pices[3] == "groups":
post_dictionary.update({"action": {"on": False}, "state": {"any_on": False, "all_on": False}})
elif url_pices[3] == "schedules":
try:
post_dictionary.update({"created": datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S"), "time": post_dictionary["localtime"]})
except KeyError:
post_dictionary.update({"created": datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S"), "localtime": post_dictionary["time"]})
if post_dictionary["localtime"].startswith("PT"):
post_dictionary.update({"starttime": datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S")})
if not "status" in post_dictionary:
post_dictionary.update({"status": "enabled"})
elif url_pices[3] == "rules":
post_dictionary.update({"owner": url_pices[2], "lasttriggered" : "none", "created": datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S"), "timestriggered": 0})
if not "status" in post_dictionary:
post_dictionary.update({"status": "enabled"})
elif url_pices[3] == "sensors":
if "state" not in post_dictionary:
post_dictionary["state"] = {}
if post_dictionary["modelid"] == "PHWA01":
post_dictionary.update({"state": {"status": 0}})
elif post_dictionary["modelid"] == "PHA_CTRL_START":
post_dictionary.update({"state": {"flag": False, "lastupdated": datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S")}, "config": {"on": True,"reachable": True}})
elif url_pices[3] == "resourcelinks":
post_dictionary.update({"owner" :url_pices[2]})
generateSensorsState()
bridge_config[url_pices[3]][new_object_id] = post_dictionary
logging.info(json.dumps([{"success": {"id": new_object_id}}], sort_keys=True, indent=4, separators=(',', ': ')))
self._set_end_headers(bytes(json.dumps([{"success": {"id": new_object_id}}], separators=(',', ':')), "utf8"))
else:
self._set_end_headers(bytes(json.dumps([{"error": {"type": 1, "address": self.path, "description": "unauthorized user" }}], separators=(',', ':')), "utf8"))
logging.info(json.dumps([{"error": {"type": 1, "address": self.path, "description": "unauthorized user" }}],sort_keys=True, indent=4, separators=(',', ': ')))
elif self.path.startswith("/api") and "devicetype" in post_dictionary: #new registration by linkbutton
if int(bridge_config["linkbutton"]["lastlinkbuttonpushed"])+30 >= int(datetime.now().strftime("%s")) or bridge_config["config"]["linkbutton"]:
username = hashlib.new('ripemd160', post_dictionary["devicetype"][0].encode('utf-8')).hexdigest()[:32]
bridge_config["config"]["whitelist"][username] = {"last use date": datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S"),"create date": datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S"),"name": post_dictionary["devicetype"]}
response = [{"success": {"username": username}}]
if "generateclientkey" in post_dictionary and post_dictionary["generateclientkey"]:
response[0]["success"]["clientkey"] = "321c0c2ebfa7361e55491095b2f5f9db"
self._set_end_headers(bytes(json.dumps(response,separators=(',', ':')), "utf8"))
logging.info(json.dumps(response, sort_keys=True, indent=4, separators=(',', ': ')))
else:
self._set_end_headers(bytes(json.dumps([{"error": {"type": 101, "address": self.path, "description": "link button not pressed" }}], separators=(',', ':')), "utf8"))
saveConfig()
def do_PUT(self):
self._set_headers()
logging.info("in PUT method")
self.data_string = self.rfile.read(int(self.headers['Content-Length']))
put_dictionary = json.loads(self.data_string.decode('utf8'))
url_pices = self.path.rstrip('/').split('/')
logging.info(self.path)
logging.info(self.data_string)
if url_pices[2] in bridge_config["config"]["whitelist"]:
if len(url_pices) == 4:
bridge_config[url_pices[3]].update(put_dictionary)
response_location = "/" + url_pices[3] + "/"
if len(url_pices) == 5:
if url_pices[3] == "schedules":
if "status" in put_dictionary and put_dictionary["status"] == "enabled" and bridge_config["schedules"][url_pices[4]]["localtime"].startswith("PT"):
put_dictionary.update({"starttime": (datetime.utcnow()).strftime("%Y-%m-%dT%H:%M:%S")})
elif url_pices[3] == "scenes":
if "storelightstate" in put_dictionary:
for light in bridge_config["scenes"][url_pices[4]]["lightstates"]:
bridge_config["scenes"][url_pices[4]]["lightstates"][light] = {}
bridge_config["scenes"][url_pices[4]]["lightstates"][light]["on"] = bridge_config["lights"][light]["state"]["on"]
bridge_config["scenes"][url_pices[4]]["lightstates"][light]["bri"] = bridge_config["lights"][light]["state"]["bri"]
if "colormode" in bridge_config["lights"][light]["state"]:
if bridge_config["lights"][light]["state"]["colormode"] in ["ct", "xy"]:
bridge_config["scenes"][url_pices[4]]["lightstates"][light][bridge_config["lights"][light]["state"]["colormode"]] = bridge_config["lights"][light]["state"][bridge_config["lights"][light]["state"]["colormode"]]
elif bridge_config["lights"][light]["state"]["colormode"] == "hs" and "hue" in bridge_config["scenes"][url_pices[4]]["lightstates"][light]:
bridge_config["scenes"][url_pices[4]]["lightstates"][light]["hue"] = bridge_config["lights"][light]["state"]["hue"]
bridge_config["scenes"][url_pices[4]]["lightstates"][light]["sat"] = bridge_config["lights"][light]["state"]["sat"]
if url_pices[3] == "sensors":
current_time = datetime.now()
for key, value in put_dictionary.items():
if key not in sensors_state[url_pices[4]]:
sensors_state[url_pices[4]][key] = {}
if type(value) is dict:
bridge_config["sensors"][url_pices[4]][key].update(value)
for element in value.keys():
sensors_state[url_pices[4]][key][element] = current_time
else:
bridge_config["sensors"][url_pices[4]][key] = value
sensors_state[url_pices[3]][url_pices[4]][key] = current_time
rulesProcessor(url_pices[4], current_time)
if url_pices[4] == "1" and bridge_config[url_pices[3]][url_pices[4]]["modelid"] == "PHDL00":
bridge_config["sensors"]["1"]["config"]["configured"] = True ##mark daylight sensor as configured
elif url_pices[3] == "groups" and "stream" in put_dictionary:
if "active" in put_dictionary["stream"]:
if put_dictionary["stream"]["active"]:
logging.info("start hue entertainment")
Popen(["/opt/hue-emulator/entertainment-srv", "server_port=2100", "dtls=1", "psk_list=" + url_pices[2] + ",321c0c2ebfa7361e55491095b2f5f9db"])
sleep(0.2)
bridge_config["groups"][url_pices[4]]["stream"].update({"active": True, "owner": url_pices[2], "proxymode": "auto", "proxynode": "/bridge"})
else:
logging.info("stop hue entertainent")
Popen(["killall", "entertainment-srv"])
bridge_config["groups"][url_pices[4]]["stream"].update({"active": False, "owner": None})
else:
bridge_config[url_pices[3]][url_pices[4]].update(put_dictionary)
elif url_pices[3] == "lights" and "config" in put_dictionary:
bridge_config["lights"][url_pices[4]]["config"].update(put_dictionary["config"])
if "startup" in put_dictionary["config"] and bridge_config["lights_address"][url_pices[4]]["protocol"] == "native":
if put_dictionary["config"]["startup"]["mode"] == "safety":
sendRequest("http://" + bridge_config["lights_address"][url_pices[4]]["ip"] + "/", "POST", {"startup": 1})
elif put_dictionary["config"]["startup"]["mode"] == "powerfail":
sendRequest("http://" + bridge_config["lights_address"][url_pices[4]]["ip"] + "/", "POST", {"startup": 0})
#add exception on json output as this dictionary has tree levels
response_dictionary = {"success":{"/lights/" + url_pices[4] + "/config/startup": {"mode": put_dictionary["config"]["startup"]["mode"]}}}
self._set_end_headers(bytes(json.dumps(response_dictionary,separators=(',', ':')), "utf8"))
logging.info(json.dumps(response_dictionary, sort_keys=True, indent=4, separators=(',', ': ')))
return
else:
bridge_config[url_pices[3]][url_pices[4]].update(put_dictionary)
response_location = "/" + url_pices[3] + "/" + url_pices[4] + "/"
if len(url_pices) == 6:
if url_pices[3] == "groups": #state is applied to a group
if url_pices[5] == "stream":
if "active" in put_dictionary:
if put_dictionary["active"]:
logging.info("start hue entertainment")
Popen(["/opt/hue-emulator/entertainment-srv", "server_port=2100", "dtls=1", "psk_list=" + url_pices[2] + ",321c0c2ebfa7361e55491095b2f5f9db"])
sleep(0.2)
bridge_config["groups"][url_pices[4]]["stream"].update({"active": True, "owner": url_pices[2], "proxymode": "auto", "proxynode": "/bridge"})
else:
Popen(["killall", "entertainment-srv"])
bridge_config["groups"][url_pices[4]]["stream"].update({"active": False, "owner": None})
elif "scene" in put_dictionary: #scene applied to group
splitLightsToDevices(url_pices[4], {}, bridge_config["scenes"][put_dictionary["scene"]]["lightstates"])
elif "bri_inc" in put_dictionary or "ct_inc" in put_dictionary:
splitLightsToDevices(url_pices[4], put_dictionary)
elif "scene_inc" in put_dictionary:
switchScene(url_pices[4], put_dictionary["scene_inc"])
elif url_pices[4] == "0": #if group is 0 the scene applied to all lights
groupZero(put_dictionary)
else: # the state is applied to particular group (url_pices[4])
if "on" in put_dictionary:
bridge_config["groups"][url_pices[4]]["state"]["any_on"] = put_dictionary["on"]
bridge_config["groups"][url_pices[4]]["state"]["all_on"] = put_dictionary["on"]
bridge_config["groups"][url_pices[4]][url_pices[5]].update(put_dictionary)
splitLightsToDevices(url_pices[4], put_dictionary)
elif url_pices[3] == "lights": #state is applied to a light
for key in put_dictionary.keys():
if key in ["ct", "xy"]: #colormode must be set by bridge
bridge_config["lights"][url_pices[4]]["state"]["colormode"] = key
elif key in ["hue", "sat"]:
bridge_config["lights"][url_pices[4]]["state"]["colormode"] = "hs"
updateGroupStats(url_pices[4])
sendLightRequest(url_pices[4], put_dictionary)
if not url_pices[4] == "0": #group 0 is virtual, must not be saved in bridge configuration
try:
bridge_config[url_pices[3]][url_pices[4]][url_pices[5]].update(put_dictionary)
except KeyError:
bridge_config[url_pices[3]][url_pices[4]][url_pices[5]] = put_dictionary
if url_pices[3] == "sensors" and url_pices[5] == "state":
current_time = datetime.now()
for key in put_dictionary.keys():
sensors_state[url_pices[4]]["state"].update({key: current_time})
rulesProcessor(url_pices[4], current_time)
response_location = "/" + url_pices[3] + "/" + url_pices[4] + "/" + url_pices[5] + "/"
if len(url_pices) == 7:
try:
bridge_config[url_pices[3]][url_pices[4]][url_pices[5]][url_pices[6]].update(put_dictionary)
except KeyError:
bridge_config[url_pices[3]][url_pices[4]][url_pices[5]][url_pices[6]] = put_dictionary
bridge_config[url_pices[3]][url_pices[4]][url_pices[5]][url_pices[6]] = put_dictionary
response_location = "/" + url_pices[3] + "/" + url_pices[4] + "/" + url_pices[5] + "/" + url_pices[6] + "/"
response_dictionary = []
for key, value in put_dictionary.items():
response_dictionary.append({"success":{response_location + key: value}})
self._set_end_headers(bytes(json.dumps(response_dictionary,separators=(',', ':')), "utf8"))
logging.info(json.dumps(response_dictionary, sort_keys=True, indent=4, separators=(',', ': ')))
else:
self._set_end_headers(bytes(json.dumps([{"error": {"type": 1, "address": self.path, "description": "unauthorized user" }}],separators=(',', ':')), "utf8"))
def do_DELETE(self):
self._set_headers()
url_pices = self.path.rstrip('/').split('/')
if url_pices[2] in bridge_config["config"]["whitelist"]:
if len(url_pices) == 6:
del bridge_config[url_pices[3]][url_pices[4]][url_pices[5]]
else:
if url_pices[3] == "resourcelinks":
Thread(target=resourceRecycle).start()
elif url_pices[3] == "sensors":
## delete also related sensors
for sensor in list(bridge_config["sensors"]):
if sensor != url_pices[4] and "uniqueid" in bridge_config["sensors"][sensor] and bridge_config["sensors"][sensor]["uniqueid"].startswith(bridge_config["sensors"][url_pices[4]]["uniqueid"][:26]):
del bridge_config["sensors"][sensor]
logging.info('Delete related sensor ' + sensor)
del bridge_config[url_pices[3]][url_pices[4]]
if url_pices[3] == "lights":
del bridge_config["lights_address"][url_pices[4]]
for light in list(bridge_config["deconz"]["lights"]):
if bridge_config["deconz"]["lights"][light]["bridgeid"] == url_pices[4]:
del bridge_config["deconz"]["lights"][light]
for scene in list(bridge_config["scenes"]):
if "lights" in bridge_config["scenes"][scene] and url_pices[4] in bridge_config["scenes"][scene]["lights"]:
bridge_config["scenes"][scene]["lights"].remove(url_pices[4])
del bridge_config["scenes"][scene]["lightstates"][url_pices[4]]
if len(bridge_config["scenes"][scene]["lights"]) == 0:
del bridge_config["scenes"][scene]
elif url_pices[3] == "sensors":
for sensor in list(bridge_config["deconz"]["sensors"]):
if bridge_config["deconz"]["sensors"][sensor]["bridgeid"] == url_pices[4]:
del bridge_config["deconz"]["sensors"][sensor]
self._set_end_headers(bytes(json.dumps([{"success": "/" + url_pices[3] + "/" + url_pices[4] + " deleted."}],separators=(',', ':')), "utf8"))
class ThreadingSimpleServer(ThreadingMixIn, HTTPServer):
pass
def get_ip_address(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', bytes(ifname[:15], 'utf-8')))[20:24])
def run(iface, https, server_class=ThreadingSimpleServer, handler_class=HueHandler):
ip = get_ip_address(iface)
print ('ip address: ', ip)
if https:
server_address = (ip, 443)
httpd = server_class(server_address, handler_class)
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ctx.load_cert_chain(certfile="./cert.pem")
ctx.options |= ssl.OP_NO_TLSv1
ctx.options |= ssl.OP_NO_TLSv1_1
ctx.options |= ssl.OP_CIPHER_SERVER_PREFERENCE
ctx.set_ciphers('ECDHE-ECDSA-AES128-GCM-SHA256')
ctx.set_ecdh_curve('prime256v1')
#ctx.set_alpn_protocols(['h2', 'http/1.1'])
httpd.socket = ctx.wrap_socket(httpd.socket, server_side=True)
logging.info('Starting ssl httpd...')
else:
server_address = (ip, 80)
httpd = server_class(server_address, handler_class)
logging.info('Starting httpd...')
httpd.serve_forever()
httpd.server_close()
if __name__ == '__main__':
# Process arguments
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--clear', action='store_true', help='clear the display on exit')
parser.add_argument('-i', '--interface', default='wlan0.1', help='free network interface to use')
args = parser.parse_args()
# Create NeoPixel object with appropriate configuration.
strip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT, LED_BRIGHTNESS, LED_CHANNEL)
# Intialize the library (must be called once before other functions).
strip.begin()
print ('Press Ctrl-C to quit.')
if not args.clear:
print('Use "-c" argument to clear LEDs on exit')
try:
Thread(target=run, args=[args.interface, False]).start()
# Thread(target=run, args=[args.interface, True]).start()
while True:
sleep(10)
# print ('Color wipe animations.')
# colorWipe(strip, Color(255, 0, 0)) # Red wipe
# colorWipe(strip, Color(0, 255, 0)) # Blue wipe
# colorWipe(strip, Color(0, 0, 255)) # Green wipe
# print ('Theater chase animations.')
# theaterChase(strip, Color(127, 127, 127)) # White theater chase
# theaterChase(strip, Color(127, 0, 0)) # Red theater chase
# theaterChase(strip, Color( 0, 0, 127)) # Blue theater chase
# print ('Rainbow animations.')
# rainbow(strip)
# rainbowCycle(strip)
# theaterChaseRainbow(strip)
except KeyboardInterrupt:
if args.clear:
colorWipe(strip, Color(0,0,0), 10)
logging.exception('Server Stopped')
finally:
run_service = False
|
from requests_ import groups_get, friends_get, groups_list_info, check_user
from functions import json_to_file, print_json_file, sort_groups
import datetime
"""
Ввод данных (идентификатор пользователя) осуществляется через консоль. Аргументом может быть как id так и screen_name.
Идентификаторы, которые я использовал для проверки программы:
372957 - Пользователь удалён
anyagrapes - Пользователь заблокирован
eshmargunov - открытый профиль, программа отрабатывает полностью
arbore - закрытый профиль
"""
if __name__ == '__main__':
print('Введите id пользователя или его screen name')
user_input = input('>>>').lower()
user_id = check_user(user_input)
# проверяем доступ к данным пользователя (функция принимает и screen name и user id)
groups = groups_get(user_id) # все группы пользователя
friends = friends_get(user_id) # все друзья пользователя
timer = str(datetime.timedelta(seconds=(len(groups) * 0.5)))
"""вычисляю примерное время ожидания 0.4 сек задержка между запросами + 0.1 сек примерное время сопутствующих
операций * кол-во запросов"""
print(f'Примерное время выполнение кода: {timer} \n Ожидайте...')
sorted_groups = sort_groups(groups, friends)
print('Группы в которых состоит пользователь, но не состоят его друзья:\n' + str(sorted_groups) + '\n')
groups_info = groups_list_info(sorted_groups) # запрашиваем у API VK информацию о группах из получившегося списка
json_to_file(groups_info) # сериализуем данные в файл .json формата
print('Для вывода в консоль содержимого файла "groups.json" нажмите Enter \n\nДля окончания работы '
'программы введите любой символ и нажмите Enter\n>>>')
user_input = input()
if user_input == '':
print_json_file()
else:
print('До свидания!')
|
import pygame
import json
from ship import Ship
# TO-DO: reset frame-timer to 0 everytime the scoreboard transition thing is called.
class Scoreboard:
"""Display level, ship_left, high-score, and current score"""
def __init__(self, main_game):
self.main_game = main_game
self.settings = self.main_game.settings
self.stats = self.main_game.stats
self.screen = main_game.screen
self.screen_rect = self.main_game.screen_rect
# Info font
self.text_color = (235, 236, 240)
self.font = pygame.font.Font("font/TravelingTypewriter.ttf", 30)
self.hs_font = pygame.font.Font("font/TravelingTypewriter.ttf", 20)
self.update_score()
self.update_high_score()
self.update_ships()
self.update_level()
self.scoreboard_height = self.high_score_rect.bottom
# Invisibility settings
self.alpha = 255 # 255: opaque, 0: transparent
self.frame_timer = 0 # For decreasing and increasing alpha
self.alpha_modifier = 255 // (self.settings.FPS // 10)
self.up = False # Boolean to detect whether to increase opacity or not
def update_score(self):
rounded = round(self.stats.score)
score_formatted = f"{rounded:,}"
self.score_display = self.font.render(score_formatted, True, self.text_color, None)
# Display the score in the midtop of the screen
self.score_rect = self.score_display.get_rect()
self.score_rect.midtop = self.screen_rect.midtop
def update_high_score(self):
rounded = round(self.stats.high_score)
score_formatted = f"High score: {rounded:,}"
self.high_score_display = self.hs_font.render(score_formatted, True, self.text_color, None)
# Display the score in the left of the screen
self.high_score_rect = self.score_display.get_rect()
self.high_score_rect.left = self.screen_rect.left
self.high_score_rect.top = 50
def update_ships(self):
"""Show how many ship' lives"""
self.ships = pygame.sprite.Group()
for ship_number in range(self.stats.ships_left):
ship = Ship(self.main_game)
ship.rect.x = 5 + ship_number * ship.rect.width
ship.rect.y = 10
self.ships.add(ship)
def update_level(self):
"""Update the current level"""
level_str = f"Level: {self.stats.level}"
self.level_display = self.hs_font.render(level_str, True, self.text_color, None)
# Display the level below the high score
self.level_rect = self.level_display.get_rect()
self.level_rect.right = self.screen_rect.right - 20
self.level_rect.top = 20
def display_info(self):
if self.main_game.ship.y <= self.scoreboard_height:
self._transparent_scoreboard()
else:
self._opaque_scoreboard()
self.screen.blit(self.score_display, self.score_rect)
self.screen.blit(self.high_score_display, self.high_score_rect)
self.screen.blit(self.level_display, self.level_rect)
self.ships.draw(self.screen)
def check_high_score(self):
"""Check to see if there is a new high score"""
if self.stats.score > self.stats.high_score:
self.stats.high_score = self.stats.score
self.update_high_score()
def save_high_score(self):
"""Save high score to a JSON file"""
filepath = "high_score.json"
with open(filepath, mode="w") as f:
json.dump(self.stats.high_score, f)
def load_high_score(self):
"""Load high score from a JSON file"""
filepath = "high_score.json"
try:
with open(filepath, mode="r") as f:
self.stats.high_score = json.load(f)
self.update_high_score()
except FileNotFoundError:
return
def _transparent_scoreboard(self):
if self.frame_timer == self.settings.FPS // 4:
self.frame_timer = 0
self.up = not self.up
if self.up:
self.alpha -= self.alpha_modifier
if self.alpha < 0:
self.alpha = 0
self.frame_timer = 0
self._set_alpha()
self.frame_timer += 1
def _opaque_scoreboard(self):
if self.frame_timer == self.settings.FPS // 4:
self.frame_timer = 0
self.up = not self.up # Reverse boolean. Ship will go transparent first anyway
if not self.up:
self.alpha += self.alpha_modifier
if self.alpha > 255:
self.alpha = 255
self.frame_timer = 0
self._set_alpha()
self.frame_timer += 1
def _set_alpha(self):
self.score_display.set_alpha(self.alpha)
self.high_score_display.set_alpha(self.alpha)
self.level_display.set_alpha(self.alpha)
for ship in self.ships:
ship.image.set_alpha(self.alpha) |
from distutils.core import setup
setup(
name='vent',
version='0.2.1',
packages=['vent', 'vent.core', 'vent.core.file-drop', 'vent.core.rq-worker', 'vent.core.rq-dashboard',
'vent.core.template-change', 'vent.core.rmq-es-connector', 'vent.helpers', 'tests', 'scripts',
'scripts.info_tools', 'scripts.service_urls'],
url='',
license='Apache License',
author='arpit',
author_email='',
description=''
)
|
#this will work
# Melih Özşeker
islem=input("islemi giriniz:")
sayi1=int(input("Sayi1:"))
sayi2=int(input("Sayi2:"))
if islem=="+":
sonuc=int(sayi1)+int(sayi2)
print("Sonuc:",str(sonuc))
elif islem=="-":
sonuc=int(sayi1)-int(sayi2)
print("Sonuc:",str(sonuc))
elif islem=="*":
sonuc=int(sayi1)-int(sayi2)
print("Sonuc:",str(sonuc))
elif islem=="/":
sonuc=int(sayi1)-int(sayi2)
print("Sonuc:",str(sonuc))
|
import sys
import cv2
import numpy as np
from PyQt5.QtWidgets import *
from PyQt5 import uic
video_list = [None, "real_drive.mp4", "car_driving.mp4", "highway.mp4"]
class MyWindow(QMainWindow):
def __init__(self):
super().__init__()
self.ui = uic.loadUi("line_detect.ui", self)
self.horislider_list = [self.horizontalSlider_thre, self.horizontalSlider_line, self.horizontalSlider_x, self.horizontalSlider_y]
self.setup_ui()
def set_horizontal(self, i, min, max, path):
self.horislider_list[i].setMinimum(min)
self.horislider_list[i].setMaximum(max)
self.horislider_list[i].valueChanged.connect(path)
def setup_ui(self):
self.comboBox.addItems(['선택', '1번 화면', '2번 화면', '3번 화면'])
self.comboBox.currentIndexChanged.connect(self.select_video)
self.set_horizontal(0, 190, 250, self.change_threparm)
self.set_horizontal(1, 1, 60, self.change_lineparm)
self.set_horizontal(2, 100, 500, self.change_x)
self.set_horizontal(3, 100, 400, self.change_y)
self.textEdit.setText("콤보박스는 자동차 주행 영상을 선택\n\nThre param은 Thresh의 값 조절\n\nline param은 라인 값 조절\n\n x, y param는 x, y를 조절하여 차선 그리는 면적 설정 조절 가능\n")
self.textEdit.append("영상을 선택 후 다른 영상을 선택하려면 \nq를 눌러 종료 후 바꿔야 합니다")
def change_threparm(self, size):
self.label_thre.setText(f"Thre param: {size}")
self.thre = size
def change_lineparm(self, size):
self.label_line.setText(f"Line param: {size}")
self.line_param = size
def change_x(self, size):
self.label_x.setText(f"x param: {size}")
self.x = size
def change_y(self, size):
self.label_y.setText(f"y param: {size}")
self.y = size
def select_video(self, cur_index):
print(f"index: {cur_index}")
self.cap = cv2.VideoCapture(video_list[cur_index])
if cur_index == 1:
self.select_state(cur_index, 250, 210, 3, 250, 520, 760, 610)
elif cur_index == 2:
self.select_state(cur_index, 250, 210, 3, 350, 420, 750, 600)
elif cur_index == 3:
self.select_state(cur_index, 171, 96, 3, 400, 500, 900, 570)
self.view(cur_index)
def select_state(self, index, thre, canny, line_param, x, y, hold_x, hold_y):
self.index = index
self.thre = thre
self.canny = canny
self.line_param = line_param
self.x = x
self.y = y
self.hold_x = hold_x
self.hold_y = hold_y
print(f"thre: {self.thre}, canny: {self.canny}, line_parm: {self.line_param}")
def view(self, index):
while 1:
ret, video = self.cap.read()
ret, frame = self.cap.read()
if not ret:
print("영상 끝.")
cv2.destroyAllWindows()
break
pre_img = self.data_preprocessing(video, self.thre)
line_img = self.draw_line(pre_img, video, self.line_param)
frame = cv2.resize(frame, dsize=(500, 300))
pre_img = cv2.resize(pre_img, dsize=(500, 300))
line_img = cv2.resize(line_img, dsize=(500, 300))
cv2.imshow("original", frame)
cv2.imshow("pre_dataprocessing", pre_img)
cv2.imshow("draw_line", line_img)
if cv2.waitKey(33) == ord('q'):
cv2.destroyAllWindows()
break
def data_preprocessing(self, video, thre):
gray = cv2.cvtColor(video, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (3, 3), 0)
gray = cv2.GaussianBlur(gray, (3, 3), 0)
ret, binary_img = cv2.threshold(gray, thre, 255, cv2.THRESH_TOZERO_INV)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
binary_img = cv2.dilate(binary_img, kernel)
binary_img = cv2.erode(binary_img, kernel)
dst = cv2.Canny(binary_img, self.canny, 271)
return dst
def draw_line(self, video, frame, line_param):
if self.index != 3:
linesP = cv2.HoughLinesP(video, line_param, np.pi / 180, 50, None, 17, 15)
linesP = cv2.HoughLinesP(video, line_param, np.pi / 180, 50, None, 3, 3)
if linesP is not None:
for i in range(0, len(linesP)):
l = linesP[i][0]
# print(l)
if (self.x < l[0] < self.hold_x) and (self.y < l[1] < self.hold_y):
out = cv2.line(frame, (l[0], l[1]), (l[2], l[3]), (0, 255, 255), 3, cv2.LINE_AA)
return out
def main():
app = QApplication(sys.argv)
editor = MyWindow()
editor.show()
app.exec_()
if __name__ == "__main__":
main() |
# -*- coding: utf-8 -*-
#money=int(input())
#Amount=eval(input())
#Month=int(input())
#a=[]
#b=[]
#for i in range(1,6):
# a.append(i)
#print("{:5s}{:>11s}".format("Month","Amount"))
#for i in range(5):
# money=money+money*Amount/1200
# b.append(money)
# print("{:^5d}{:12.2f}".format(a[i],b[i]))
m=int(input())
p=eval(input())
mon=int(input())
i=1
total=0
print("%s %s"%("Month","Amount"))
while i < mon+1 :
total=m+m*p/1200
m=total
print("%3d %.2f"%(i,total))
i=i+1
#money=int(input())
#Amount=eval(input())
#Month=int(input())
#a=[]
#b=[]
#for i in range(1,6):
# a.append(i)
#print("{:<9s}{:>7s}".format("Month","Amount"))
#for i in range(5):
# money=money+money*Amount/1200
# b.append(money)
# print("{:^5d}{:>12.2f}".format(a[i],b[i])) |
import EoN
import numpy as np
def simulation(G, tau, gamma, rho, max_time, number_infected_before_release, release_number, background_inmate_turnover,
stop_inflow_at_intervention, p, death_rate, percent_infected, percent_recovered, social_distance,
social_distance_tau, initial_infected_list):
"""Runs a simulation on SIR model.
Args:
G: Networkx graph
tau: transmission rate
gamma: recovery rate
rho: percent of inmates that are initially infected
max_time: # of time steps to run simulation
number_infected_before_release: number of infected at which to perform release on next integer time
release_number: # of inmates to release at release intervention
background_inmate_turnover: background # of inmates added/released at each time step
stop_inflow_at_intervention: should we stop the background inflow of inmates at intervention time?
p: probability of contact between inmate and other inmates
death_rate: percent of recovered inmates that die
percent_infected: percent of general population that is infected
percent_recovered: percent of general population that is recovered
social_distance: boolean flag, if we lower transmission rate after major release
social_distance_tau: new transmission rate after major release
initial_infected_list: sets node numbers of initial infected (default is 0, this parameter is arbitrary)
Returns:
t: array of times at which events occur
S: # of susceptible inmates at each time
I: # of infected inmates at each time
R: # of recovered inmates at each time
D: # of dead inmates at each time step
"""
print('Starting simulation...')
release_occurred = False
background_release_number = background_inmate_turnover
data_list = []
recovered_list = []
delta_recovered_list = []
# Check we are using initial_infected_list
if initial_infected_list is not None:
print('Using initial infected list to set initial infected.')
infected_list = initial_infected_list.copy()
else: # Choose random initial infections based on rho
print('Using rho to set initial infected.')
infected_list = list(np.random.choice(list(G.nodes), int(np.ceil(rho * len(G.nodes))), replace=False))
# Loop over time
for i in range(max_time):
# Run 1 time unit of simulation
data = EoN.fast_SIR(G, tau, gamma, initial_infecteds=infected_list, initial_recovereds=recovered_list,
tmin=i, tmax=i + 1, return_full_data=True)
data_list.append(data)
# Update infected and recovered inmate lists
infected_list, recovered_list = get_infected(data, i + 1), get_recovered(data, i + 1)
# Check if release condition has been met
if not release_occurred and len(infected_list) >= number_infected_before_release:
background_inmate_turnover, r_n, tau = enact_interventions(background_inmate_turnover,
background_release_number, i + 1,
infected_list, release_number,
social_distance,
social_distance_tau,
stop_inflow_at_intervention,
tau)
release_occurred = True
else: # If not, use background release rate
r_n = background_release_number
# Add and release inmates
G, infected_list, recovered_list, delta_recovered = recalibrate_graph(G, infected_list, recovered_list,
background_inmate_turnover, r_n, p,
percent_infected, percent_recovered,
death_rate)
# Track the number of recovered inmates added or released at each time step
delta_recovered_list.append(delta_recovered)
# Process raw data into t, S, I, R, D arrays
t, S, I, R, D = process_data(data_list, delta_recovered_list, death_rate)
print('Simulation completed.\n')
return t, S, I, R, D
# Helper Functions
def enact_interventions(background_inmate_turnover, background_release_number, time, infected_list, release_number,
social_distance, social_distance_tau, stop_inflow_at_intervention, tau):
"""Enacts specified interventions."""
# Print intervention info
print(f'Release intervention condition met:\n\tTime: {time}\n\t# of infected: {len(infected_list)}')
# Release intervention
r_n = background_release_number
if release_number:
print(f'\tReleasing {release_number} inmates.')
r_n += release_number
# Stopping-inmate-inflow intervention
if stop_inflow_at_intervention:
print('\tStopping inmate inflow.')
background_inmate_turnover = 0
# Social distancing intervention
if social_distance:
print('\tEnacting social distancing.')
tau = social_distance_tau
return background_inmate_turnover, r_n, tau
def recalibrate_graph(G, infected_list, recovered_list, birth_number, release_number, p,
percent_infected, percent_recovered, death_rate):
"""Updates graph by adding new inmates and removing released inmates.
Args:
G: a Networkx graph
infected_list: list of infected nodes
recovered_list: list of recovered nodes
birth_number: # of inmates added at each time step
release_number: # of inmates to release
p: probability of contact between inmate and other inmates
percent_infected: percent of general population that is infected
percent_recovered: percent of general population that is recovered
death_rate: percent of recovered inmates that die
Returns:
G: Networkx graph with new inmates added and released inmates removed
infected_list: infected_list with released inmates removed
recovered_list: recovered_list with released inmates removed
"""
# Release inmates
G, infected_list, recovered_list, num_recovered_released = remove_nodes(G, infected_list, recovered_list,
release_number, death_rate)
# Add new inmates
G, num_recovered_added = add_nodes(G, infected_list, recovered_list, birth_number, p, percent_infected,
percent_recovered)
# Track how many recovered inmates were added and released
delta_recovered = num_recovered_added - num_recovered_released
return G, infected_list, recovered_list, delta_recovered
def process_data(data_list, delta_recovered_list, death_rate: float):
"""Processes raw simulation loop data list into plottable times, S, I, and R arrays.
Args:
data_list: list of Simulation_Investigation objects as output by simulation
delta_recovered_list: list of change in recovered inmates at each time step due to additions/releases
death_rate: percent of recovered inmates that die
Returns:
t: array of times at which events occur
S: # of susceptible inmates at each time step
I: # of infected inmates at each time step
R: # of recovered inmates at each time step
D: # of dead inmates at each time step
"""
# Get t, S, I, R data from first time step
first_time, first_dict_of_states = data_list[0].summary()
times_ll = [first_time]
susceptible_ll = [first_dict_of_states['S']]
infected_ll = [first_dict_of_states['I']]
recovered_ll = [first_dict_of_states['R']]
# For next time steps, get data, but delete first element of each time step to fix "recovered bug"
for data in data_list[1:]:
times, dict_of_states = data.summary()
# Append each time's data to appropriate list
times_ll.append(np.delete(times, 0))
susceptible_ll.append(np.delete(dict_of_states['S'], 0)) # Deletes first element because of "recovered bug"
infected_ll.append(np.delete(dict_of_states['I'], 0))
recovered_ll.append(np.delete(dict_of_states['R'], 0))
# Aggregate quantities into single lists
t, S, I, R = np.concatenate(times_ll), np.concatenate(susceptible_ll), \
np.concatenate(infected_ll), np.concatenate(recovered_ll)
# Calculate deaths
R, D = calculate_deaths(t, R, delta_recovered_list, death_rate)
return t, S, I, R, D
def remove_nodes(G, infected_list, recovered_list, release_number, death_rate):
"""Removes release_number inmates from G, selecting inmates of state proportional to the percentage of their
state in the prison."""
num_recovered_released = 0
# Get list of susceptible inmates
susceptible_list = list(np.setdiff1d(G.nodes, np.union1d(infected_list, recovered_list)))
for i in range(release_number):
# Calculate proportion of inmates that are susceptible, infected, or recovered (not dead)
num_of_recovered_not_dead = np.floor(len(recovered_list) * (1 - death_rate))
dm = len(susceptible_list) + len(infected_list) + num_of_recovered_not_dead
# Prevent division by 0
if dm == 0:
raise Exception(
'All inmates died or got released from prison :( Try turning down max_time or background '
'turnover rate')
# Proportion of state = # of inmates of state / # of alive inmates
ps = len(susceptible_list) / dm
pi = len(infected_list) / dm
pr = num_of_recovered_not_dead / dm
# Select state of inmate to remove according to their percentage of prison population
state = np.random.choice(['S', 'I', 'R'], p=[ps, pi, pr])
if state == 'S':
removed_inmate = susceptible_list.pop() # We assume lists of inmates are ordered randomly
elif state == 'I':
removed_inmate = infected_list.pop()
else:
removed_inmate = recovered_list.pop()
num_recovered_released += 1
G.remove_node(removed_inmate)
return G, infected_list, recovered_list, num_recovered_released
def add_nodes(G, infected_list, recovered_list, birth_number, p, percent_infected, percent_recovered):
"""Adds birth_number inmates to G, with probability p of an edge forming between new node and each existing node."""
num_recovered_added = 0
# Add birth_number new inmates
for i in range(birth_number):
inmate_id = list(G.nodes)[-1] + 1 # Make sure node ID doesn't already exist
G.add_node(inmate_id)
# Set state of new inmate
percent_susceptible = 1 - percent_infected - percent_recovered
state = np.random.choice(['S', 'I', 'R'], p=[percent_susceptible, percent_infected, percent_recovered])
if state == 'I':
infected_list.append(inmate_id)
elif state == 'R':
recovered_list.append(inmate_id)
num_recovered_added += 1
# Connect inmate to existing inmates
for other_inmate_id in G.nodes:
# Do not allow self-edges
if inmate_id == other_inmate_id:
continue
if np.random.rand() < p: # add edge with certain probability (G(n,p) model edge generation for new node)
G.add_edge(inmate_id, other_inmate_id)
return G, num_recovered_added
def calculate_deaths(t, recovered_inmates_and_dead_inmates, delta_recovered_list, death_rate):
"""Says percent of recovered individuals at each time step actually die, and updates R."""
# recovered_inmates_and_dead_inmates includes two groups:
# 1) Inmates that we know are recovered
# 2) Inmates that may be recovered or dead
recovered_or_dead_inmates = recovered_inmates_and_dead_inmates.copy()
# All added/released "recovered" inmates are not dead
for i in range(1, len(delta_recovered_list)):
# Check if any inmates changed state at the added/release time
if np.where(t == i)[0].size != 0:
# Find time index where additions/releases occurred
time_idx = np.where(t == i)[0][0]
# Adjust for added/released # recovered inmates
recovered_or_dead_inmates[time_idx:] -= delta_recovered_list[i - 1]
# Now we have the inmates that may be recovered or dead
# Calculate the amount of these inmates that are dead
D = np.ceil(recovered_or_dead_inmates * death_rate)
# Subtract all the dead inmates from the original R
R = recovered_inmates_and_dead_inmates - D
return R, D
def get_infected(data: EoN.Simulation_Investigation, end_time: int):
"""Returns list of infected nodes."""
return get_type_of_nodes(data, end_time, 'I')
def get_recovered(data: EoN.Simulation_Investigation, end_time: int):
"""Returns list of recovered nodes."""
return get_type_of_nodes(data, end_time, 'R')
def get_type_of_nodes(data: EoN.Simulation_Investigation, end_time: int, state: str):
"""Returns list of certain type of nodes."""
return [node for (node, s) in data.get_statuses(time=end_time).items() if s == state]
|
import numpy as np
from collections import defaultdict, OrderedDict
from . import uff_pb2 as uff_pb
from .data import FieldType, create_data
from .exceptions import UffException
from .node import Node
from .utils import extend_with_original_traceback, int_types
def _create_fields(default_fields, fields=None):
default_fields.update(fields if fields else {})
return default_fields
class Graph(object):
def __init__(self, meta_graph, name):
self.name = name
self.meta_graph = meta_graph
# here to preserve the orders of node for the pbtxt to be more readable
self.nodes = OrderedDict()
self.op_counts = defaultdict(int)
def to_uff(self, debug=False):
graph = uff_pb.Graph(id=self.name, nodes=self._check_graph_and_get_nodes())
if debug:
graph = uff_pb.Graph(id=self.name,
nodes=[node.to_uff(debug) for node in self.nodes.values()])
return graph
def _check_and_get_node(self, node):
node = node.to_uff()
for i in node.inputs:
if i not in self.nodes:
raise UffException("In node %s, %s input doesn't exist" % (node, i))
self.meta_graph.descriptor.check_node(node, self.meta_graph.referenced_data)
return node
def _check_graph_and_get_nodes(self):
nodes = []
for node in self.nodes.values():
try:
nodes.append(self._check_and_get_node(node))
except Exception as e:
raise extend_with_original_traceback(e, node._trace)
return nodes
def _use_or_generate_name(self, op, name):
if name is not None:
if name not in self.nodes:
return name
else:
name = op
idx = 0
while True:
key = "%s_%d" % (name, idx)
if key not in self.nodes:
return key
idx += 1
def _add_node(self, op, name, inputs=None, fields=None, extra_fields={}):
node = Node(self, op, name, inputs, fields, extra_fields)
if name in self.nodes:
raise UffException("node already exist")
self.nodes[name] = node
return node
def input(self, shape, dtype=np.float32, name=None, fields=None, extra_fields=None):
name = self._use_or_generate_name("Input", name)
fields = _create_fields({"shape": shape, "dtype": dtype}, fields)
return self._add_node("Input", name, fields=fields, extra_fields=extra_fields)
def identity(self, prev_node, name=None, fields=None, extra_fields=None):
name = self._use_or_generate_name("Identity", name)
return self._add_node("Identity", name, inputs=[prev_node], fields=fields, extra_fields=extra_fields)
def const(self, arr, name=None, fields=None, extra_fields=None):
name = self._use_or_generate_name("Const", name)
if not isinstance(arr, str):
data_blob = create_data(np.ascontiguousarray(arr).tobytes(), FieldType.blob)
else:
data_blob = create_data(arr, FieldType.s)
# data_blob = create_data(str.encode(arr), FieldType.blob)
data_blob_ref = self.meta_graph.create_ref("weights_" + name, data_blob)
fields = _create_fields({
"shape": arr.shape if hasattr(arr, "shape") else [],
"dtype": arr.dtype if hasattr(arr, "dtype") else type(arr),
"values": data_blob_ref
}, fields)
return self._add_node("Const", name, fields=fields, extra_fields=extra_fields)
def conv(self, left_node, right_node, strides,
padding=None, dilation=None, number_groups=None,
left_format="NC+", right_format="KC+",
name=None, fields=None, extra_fields=None):
name = self._use_or_generate_name("Conv", name)
fields = _create_fields({
"padding": padding,
"strides": strides,
"dilation": dilation,
"number_groups": number_groups,
"inputs_orders": self.meta_graph.create_orders_ref([left_format, right_format])
}, fields)
return self._add_node("Conv", name, inputs=[left_node, right_node],
fields=fields, extra_fields=extra_fields)
def conv_transpose(self, input_node, weights_node, shape_node, strides,
padding=None, dilation=None, number_groups=None,
left_format="NC+", right_format="KC+",
name=None, fields=None, extra_fields=None):
name = self._use_or_generate_name("ConvTranspose", name)
fields = _create_fields({
"padding": padding,
"strides": strides,
"dilation": dilation,
"number_groups": number_groups,
"inputs_orders": self.meta_graph.create_orders_ref([left_format, right_format])
}, fields)
return self._add_node("ConvTranspose", name, inputs=[input_node, weights_node, shape_node],
fields=fields, extra_fields=extra_fields)
def pool(self, prev_node, func, kernel, strides, padding=None, data_format="NC+",
name=None, fields=None, extra_fields=None):
name = self._use_or_generate_name("Pool", name)
fields = _create_fields({
"func": func.lower(),
"kernel": kernel,
"padding": padding,
"strides": strides,
"inputs_orders": self.meta_graph.create_orders_ref([data_format])
}, fields)
return self._add_node("Pool", name, inputs=[prev_node],
fields=fields, extra_fields=extra_fields)
def fully_connected(self, left_node, right_node, left_format="NC", right_format="KC",
name=None, fields=None, extra_fields=None):
name = self._use_or_generate_name("FullyConnected", name)
fields = _create_fields({
"inputs_orders": self.meta_graph.create_orders_ref([left_format, right_format])
}, fields)
return self._add_node("FullyConnected", name, inputs=[left_node, right_node],
fields=fields, extra_fields=extra_fields)
def lrn(self, prev_node, window_size, alpha, beta, k, data_format="NC+",
name=None, fields=None, extra_fields=None):
name = self._use_or_generate_name("LRN", name)
fields = _create_fields({
"window_size": window_size,
"alpha": alpha,
"beta": beta,
"k": k,
"inputs_orders": self.meta_graph.create_orders_ref([data_format])
}, fields)
return self._add_node("LRN", name, inputs=[prev_node],
fields=fields, extra_fields=extra_fields)
def binary(self, left_node, right_node, func, name=None, fields=None, extra_fields=None):
name = self._use_or_generate_name("Binary", name)
fields = _create_fields({"func": func}, fields)
return self._add_node("Binary", name, inputs=[left_node, right_node],
fields=fields, extra_fields=extra_fields)
def unary(self, prev_node, func, name=None, fields=None, extra_fields=None):
name = self._use_or_generate_name("Unary", name)
fields = _create_fields({"func": func}, fields)
return self._add_node("Unary", name, inputs=[prev_node],
fields=fields, extra_fields=extra_fields)
def expand_dims(self, prev_node, axis, name=None, fields=None, extra_fields=None):
name = self._use_or_generate_name("ExpandDims", name)
fields = _create_fields({"axis": axis}, fields)
return self._add_node("ExpandDims", name, inputs=[prev_node],
fields=fields, extra_fields=extra_fields)
def argmax(self, prev_node, axis, name=None, fields=None, extra_fields=None):
name = self._use_or_generate_name("ArgMax", name)
fields = _create_fields({"axis": axis}, fields)
return self._add_node("ArgMax", name, inputs=[prev_node],
fields=fields, extra_fields=extra_fields)
def argmin(self, prev_node, axis, name=None, fields=None, extra_fields=None):
name = self._use_or_generate_name("ArgMin", name)
fields = _create_fields({"axis": axis}, fields)
return self._add_node("ArgMin", name, inputs=[prev_node],
fields=fields, extra_fields=extra_fields)
def reshape(self, prev_node, shape, name=None, fields=None, extra_fields=None):
name = self._use_or_generate_name("Reshape", name)
return self._add_node("Reshape", name, inputs=[prev_node, shape],
fields=fields, extra_fields=extra_fields)
def transpose(self, prev_node, permutation, name=None, fields=None, extra_fields=None):
name = self._use_or_generate_name("Transpose", name)
fields = _create_fields({"permutation": permutation}, fields)
return self._add_node("Transpose", name, inputs=[prev_node],
fields=fields, extra_fields=extra_fields)
def concat(self, inputs, axis, name=None, fields=None, extra_fields=None):
name = self._use_or_generate_name("Concat", name)
fields = _create_fields({"axis": axis}, fields)
return self._add_node("Concat", name, inputs=inputs,
fields=fields, extra_fields=extra_fields)
def reduce(self, prev_node, func, axes, keepdims, name=None, fields=None, extra_fields=None):
name = self._use_or_generate_name("Reduce", name)
if isinstance(axes, int_types):
axes = [axes]
fields = _create_fields({"func": func, "axes": axes, "keepdims": keepdims}, fields)
return self._add_node("Reduce", name, inputs=[prev_node],
fields=fields, extra_fields=extra_fields)
def stack(self, inputs, axis, name=None, fields=None, extra_fields=None):
name = self._use_or_generate_name("Stack", name)
fields = _create_fields({"axis": axis}, fields)
return self._add_node("Stack", name, inputs=inputs,
fields=fields, extra_fields=extra_fields)
def shape(self, input_node, name=None, fields=None, extra_fields=None):
name = self._use_or_generate_name("Shape", name)
return self._add_node("Shape", name, inputs=[input_node],
fields=fields, extra_fields=extra_fields)
def strided_slice(self, input_node, begin_node, end_node, strides_node,
begin_mask=0, end_mask=0, shrink_axis_mask=0,
name=None, fields=None, extra_fields=None):
name = self._use_or_generate_name("StridedSlice", name)
fields = _create_fields({
"begin_mask": begin_mask,
"end_mask": end_mask,
"shrink_axis_mask": shrink_axis_mask
}, fields)
return self._add_node("StridedSlice", name,
inputs=[input_node, begin_node, end_node, strides_node],
fields=fields, extra_fields=extra_fields)
def mark_output(self, output, name=None, fields=None, extra_fields=None):
name = self._use_or_generate_name("MarkOutput", name)
return self._add_node("MarkOutput", name, inputs=[output],
fields=fields, extra_fields=extra_fields)
def custom_node(self, op, inputs, name=None, fields=None, extra_fields=None):
self.meta_graph.enable_custom_descriptor()
op = "_" + op
name = self._use_or_generate_name(op, name)
return self._add_node(op, name, inputs=inputs, fields=fields, extra_fields=extra_fields)
# TODO transform those into Sub-Graph
def activation(self, prev_node, func, name=None, fields=None, extra_fields=None):
name = self._use_or_generate_name("Activation", name)
fields = _create_fields({"func": func}, fields)
return self._add_node("Activation", name, inputs=[prev_node],
fields=fields, extra_fields=extra_fields)
def softmax(self, prev_node, axis, data_format, name=None, fields=None, extra_fields=None):
name = self._use_or_generate_name("Softmax", name)
fields = _create_fields({
"axis": axis,
"inputs_orders": self.meta_graph.create_orders_ref([data_format])
}, fields)
return self._add_node("Softmax", name, inputs=[prev_node],
fields=fields, extra_fields=extra_fields)
def batchnorm(self, prev_node, gamma, beta, moving_mean, moving_variance, epsilon,
data_format="NC+", name=None, fields=None, extra_fields=None):
name = self._use_or_generate_name("BatchNorm", name)
fields = _create_fields({
"epsilon": epsilon,
"inputs_orders": self.meta_graph.create_orders_ref([data_format])
}, fields)
return self._add_node("BatchNorm", name,
inputs=[prev_node, gamma, beta, moving_mean, moving_variance],
fields=fields, extra_fields=extra_fields)
def squeeze(self, prev_node, name=None, axis=None, fields=None, extra_fields=None):
name = self._use_or_generate_name("Squeeze", name)
fields = _create_fields({
"axes": axis,
}, fields)
return self._add_node("Squeeze", name, inputs=[prev_node],
fields=fields, extra_fields=extra_fields)
def flatten(self, prev_node, name=None, axis=None, fields=None, extra_fields=None):
name = self._use_or_generate_name("Flatten", name)
return self._add_node("Flatten", name, inputs=[prev_node],
fields=fields, extra_fields=extra_fields)
def pad(self, prev_node, pad, name=None, fields=None, extra_fields=None):
name = self._use_or_generate_name("Pad", name)
return self._add_node("Pad", name, inputs=[prev_node, pad],
fields=fields, extra_fields=extra_fields)
def gather(self, inputs, name=None, indices_dtype=None, params_dtype=None,
validate_indices=None, fields=None, extra_fields=None):
name = self._use_or_generate_name("Gather", name)
fields = _create_fields({
"indices_dtype": indices_dtype,
"params_dtype": params_dtype,
"validate_indices": validate_indices,
}, fields)
return self._add_node("Gather", name, inputs=inputs,
fields=fields, extra_fields=extra_fields)
def gather_v2(self, inputs, name=None, axis=0, indices_dtype=None, params_dtype=None,
fields=None, extra_fields=None):
name = self._use_or_generate_name("GatherV2", name)
fields = _create_fields({
"axis": axis,
"indices_dtype": indices_dtype,
"params_dtype": params_dtype,
}, fields)
return self._add_node("GatherV2", name, inputs=inputs,
fields=fields, extra_fields=extra_fields)
|
import numpy as np
import matplotlib.pyplot as plt
import numba
from pprint import pprint
import time
@numba.njit(fastmath=True)
def vdd(y, x0, alpha, noise):
x = x0
xs = np.empty_like(y)
for i in range(len(xs)):
x = alpha*x + y[i] + noise[i]
xs[i] = x
return xs
@numba.njit(fastmath=True)
def vdd_time(y, x0, alpha, noise, threshold):
act = vdd(y, x0, alpha, noise)
if x0 > threshold:
return 0
prev = x0
for i in range(len(act)):
if act[i] <= threshold:
prev = act[i]
continue
t = (threshold - prev)/(act[i] - prev)
return i + t
#return max(0.0, i + t)
return np.nan
@numba.njit(fastmath=True)
def vdd_times(y, x0, alpha, noises, threshold):
out = np.empty(noises.shape[0])
for i in range(len(noises)):
out[i] = vdd_time(y, x0, alpha, noises[i], threshold)
return out
def timeit(func, n=10):
times = []
for i in range(n):
start = time.perf_counter()
func()
times.append(time.perf_counter() - start)
return times
dt = 0.01
dur = 20
ts = np.arange(0, dur, dt)
tau0 = 4.0
speed = 30.0
dist = tau0*speed - ts*speed
tau = dist/speed
threshold = 4
scale = 0.5
tau[tau < 0] = 4.5
evidence = np.arctan((tau - threshold)/scale)
#plt.plot(ts, tau)
#plt.plot(ts, evidence + threshold)
#plt.show()
N = 10000
noises = np.random.randn(N, len(ts))*(3.0*np.sqrt(dt))
times = vdd_times(evidence, 0.0, 0.9, noises, 1.0)
print(timeit(lambda: vdd_times(evidence, 0.0, 0.9, noises, 1.0)))
plt.hist(times[np.isfinite(times)]*dt, bins=100, density=True)
plt.show()
for i in range(3):
act = vdd(evidence, 0, 0.9, noises[i])
#plt.plot(act)
plt.plot(ts, act, alpha=0.1)
plt.plot(ts, evidence)
#plt.plot(ts, tau)
plt.show()
|
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session, sessionmaker, scoped_session
from sqlalchemy import create_engine
from sqlalchemy import inspect
Base = automap_base()
database_username = 'root'
database_password = 'root'
database_ip = 'localhost'
database_name = 'mmr'
engine = create_engine('mysql+pymysql://{0}:{1}@{2}/{3}'.
format(database_username, database_password,
database_ip, database_name), pool_pre_ping=True, pool_size=50, max_overflow=100)
db_session = scoped_session(sessionmaker(autocommit=True,
autoflush=False,
bind=engine))
# reflect the tables
Base.prepare(engine, reflect=True)
# mapped classes are now created with names by default
print([i for i in Base.classes])
Virus = Base.classes.virus
RubellaEp = Base.classes.rubella_epitopes
MumpsEp = Base.classes.mumps_epitopes
MeaslesEp = Base.classes.measles_epitopes
Base.query = db_session.query_property()
def row2dict(obj):
return {c.key: getattr(obj, c.key)
for c in inspect(obj).mapper.column_attrs}
def result2dict(result):
return [row2dict(i) for i in result]
|
from urllib import request
url = 'https://www.flickr.com/search/?text=aurora%20polaris'
c = request.urlopen(url)
http = c.read().decode('utf-8')
imurls = []
start = 0
while True:
index = http.find('img.src', start)
if index == -1:
break
imurls.append('http:' + http[index+9 : http.find(';', index)-1])
start = index+1
for i,url in enumerate(imurls):
image = request.URLopener()
image.retrieve(url, 'images/' + str(i) + '.jpg')
print('downloading',url) |
from django.conf import settings as const
from api.models import Collection, Category, Make, \
Model, Artifact, CollectionArtifact, Image, Transfer, Notification
from api.serializers import CollectionSerializer, CategorySerializer, \
MakeSerializer, ModelSerializer, ArtifactSerializer, CollectionArtifactSerializer, ImageSerializer, \
NotificationSerializer, TransferSerializer
def get_content(type, sender, ref_id):
if type == const.NOTIFICATION_TRANSFER:
artifact = Artifact.objects.filter(id=ref_id).all().first()
content = """%s has transferred to you %s""" % (sender.username, artifact.title)
return content
elif type == const.NOTIFICATION_ALARM:
artifact = Artifact.objects.filter(id=ref_id).all().first()
content = """%s has accepted the %s you transferred""" % (sender.username, artifact.title)
return content |
#WAP TO INPUT THREE NUMBERS AND CHECK WHETHER THEY FORM A TRIANGLE
a=input("Enter first side : ")
b=input("Enter second side : ")
c=input("Enter third side : ")
if a<b+c and b<a+c and c<a+b:
print "It's a triangle!"
if a==b==c:
print "It's an equilateral triangle.."
elif a==b or b==c or a==c:
print "It is an isoceles triangle.."
else:
print" It's a scalene triangle.."
if a**2==b**2+c**2 or b**2==c**2+a**2 or c**2==a**2+b**2:
print "It's a right angle triangle.."
else:
print"It's not a right angled triangle.."
s=(a+b+c)/2.0
area=(s*(s-a)*(s-b)*(s-c))**0.5
print "Semi-perimeter = ",s
print "Area = ",area
else:
print"It is not a triangle "
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import sys
import os.path
from PyQt4 import QtCore, QtGui
QtCore.Signal = QtCore.pyqtSignal
import vtk
from vtk.qt4.QVTKRenderWindowInteractor import QVTKRenderWindowInteractor
class VTKFrame(QtGui.QFrame):
def __init__(self, parent = None):
super(VTKFrame, self).__init__(parent)
self.vtkWidget = QVTKRenderWindowInteractor(self)
vl = QtGui.QVBoxLayout(self)
vl.addWidget(self.vtkWidget)
vl.setContentsMargins(0, 0, 0, 0)
self.iren = self.vtkWidget.GetRenderWindow().GetInteractor()
# Create source
source = vtk.vtkConeSource()
#source = vtk.vtkSphereSource()
#source.SetCenter(0, 0, 5.0)
#source.SetRadius(2.0)
#source.SetPhiResolution(20)
#source.SetThetaResolution(20)
source.Update()
data3D = source.GetOutput()
boundsData = [1]*6
centerData = [1]*3
data3D.GetBounds(boundsData)
data3D.GetCenter(centerData)
# Black and white scene with the data in order to print the view
mapperData = vtk.vtkPolyDataMapper()
mapperData.SetInput(data3D)
actorData = vtk.vtkActor()
actorData.SetMapper(mapperData)
actorData.GetProperty().SetColor(0, 0, 0)
tmpRender = vtk.vtkRenderer()
tmpRender.SetBackground(1, 1, 1)
tmpRender.AddActor(actorData)
tmpRender.ResetCamera()
tmpRender.GetActiveCamera().SetParallelProjection(1)
tmpRenderWindow = vtk.vtkRenderWindow()
tmpRenderWindow.SetOffScreenRendering(1)
tmpRenderWindow.AddRenderer(tmpRender)
tmpRenderWindow.Render()
# Get a print of the window
windowToImageFilter = vtk.vtkWindowToImageFilter()
windowToImageFilter.SetInput(tmpRenderWindow)
windowToImageFilter.SetMagnification(2)
windowToImageFilter.Update()
# Extract the silhouette corresponding to the black limit of the image
contourFilter = vtk.vtkContourFilter()
contourFilter.SetInputConnection(windowToImageFilter.GetOutputPort())
contourFilter.SetValue(0, 255)
contourFilter.Update()
# Make the contour coincide with the data.
contour = contourFilter.GetOutput()
boundsContour = [1]*6
contour.GetBounds(boundsContour)
ratioX = (boundsData[1]-boundsData[0])/(boundsContour[1]-boundsContour[0])
ratioY = (boundsData[3]-boundsData[2])/(boundsContour[3]-boundsContour[2])
# Rescale the contour so that it shares the same bounds as the input data
transform1 = vtk.vtkTransform()
transform1.Scale(ratioX, ratioY, 1.0)
tfilter1 = vtk.vtkTransformPolyDataFilter()
tfilter1.SetInput(contour)
tfilter1.SetTransform(transform1)
tfilter1.Update()
contour = tfilter1.GetOutput()
#Translate the contour so that it shares the same center as the input data
centerContour = [1]*3
contour.GetCenter(centerContour)
transX = centerData[0] - centerContour[0]
transY = centerData[1] - centerContour[1]
transZ = centerData[2] - centerContour[2]
transform2 = vtk.vtkTransform()
transform2.Translate(transX, transY, transZ)
tfilter2 = vtk.vtkTransformPolyDataFilter()
tfilter2.SetInput(contour)
tfilter2.SetTransform(transform2)
tfilter2.Update()
contour = tfilter2.GetOutput()
# Render the result: Input data + resulting silhouette
# Updating the color of the data
actorData.GetProperty().SetColor(0.9, 0.9, 0.8)
# Create a mapper and actor of the silhouette
mapperContour = vtk.vtkPolyDataMapper()
mapperContour.SetInput(contour)
actorContour = vtk.vtkActor()
actorContour.SetMapper(mapperContour)
actorContour.GetProperty().SetLineWidth(2.0)
# 2 renders and a render window
renderer1 = vtk.vtkRenderer()
renderer1.AddActor(actorData)
renderer2 = vtk.vtkRenderer()
renderer2.AddActor(actorContour)
self.vtkWidget.GetRenderWindow().AddRenderer(renderer1)
renderer1.SetViewport(0, 0, 0.5, 1)
self.vtkWidget.GetRenderWindow().AddRenderer(renderer2)
renderer2.SetViewport(0.5, 0, 1, 1)
self._initialized = False
def showEvent(self, evt):
if not self._initialized:
self.iren.Initialize()
self._initialized = True
class MainPage(QtGui.QMainWindow):
def __init__(self, parent = None):
super(MainPage, self).__init__(parent)
self.setCentralWidget(VTKFrame())
self.setWindowTitle("External Contour example")
def categories(self):
return ['Demo']
def mainClasses(self):
return ['vtkWindowToImageFilter', 'vtkContourFilter', 'vtkTransformPolyDataFilter']
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
w = MainPage()
w.show()
sys.exit(app.exec_())
|
import sys
import pandas as pd
import numpy as np
def loadmsoaData(dirName='../Loneliness'):
msoaData = pd.read_excel('%s/msoa_loneliness.xlsx'%(dirName), 'msoa_loneliness', index_col=None)
msoaDataDict = pd.read_excel('%s/msoa_loneliness.xlsx'%(dirName), 'Data Dictionary', index_col=None)
return msoaData, msoaDataDict
def loadFinalData(dirName='../Loneliness'):
finalData = pd.read_excel('%s/final_data.xlsx'%(dirName), 'Data', index_col=None)
finalDataDict = pd.read_excel('%s/final_data.xlsx'%(dirName), 'Data Dictionary', index_col=None)
return finalData, finalDataDict
def loadDrugsList(dirName='../Loneliness'):
drugsList = pd.read_csv('%s/drug_list.csv'%(dirName))
return drugsList
def loadProccessedData(dirName='../Loneliness'):
processedData = pd.read_csv('%s/processed_data.csv'%(dirName))
# drop the un-named column
processedData = processedData.drop(['Unnamed: 0'], axis=1)
return processedData
def mergedDataStreams(final_data, proc_data):
pCodeData= final_data.merge( proc_data , on=['PCT','pcstrip','SHA'])
pCodeData.reset_index(drop=False, inplace=True)
pCodeData = pCodeData[['PCT','pcstrip','SHA','Postcode']]
mergedData= final_data.merge( pCodeData , on=['PCT','pcstrip','SHA'])
return mergedData
|
# Copyright 2010-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
"""Provides an easy-to-use python interface to Gentoo's metadata.xml file.
Example usage:
>>> from portage.xml.metadata import MetaDataXML
>>> pkg_md = MetaDataXML('/usr/portage/app-misc/gourmet/metadata.xml')
>>> pkg_md
<MetaDataXML '/usr/portage/app-misc/gourmet/metadata.xml'>
>>> pkg_md.herds()
['no-herd']
>>> for maint in pkg_md.maintainers():
... print "{0} ({1})".format(maint.email, maint.name)
...
nixphoeni@gentoo.org (Joe Sapp)
>>> for flag in pkg_md.use():
... print flag.name, "->", flag.description
...
rtf -> Enable export to RTF
gnome-print -> Enable printing support using gnome-print
>>> upstream = pkg_md.upstream()
>>> upstream
[<_Upstream {'docs': [], 'remoteid': [], 'maintainer':
[<_Maintainer 'Thomas_Hinkle@alumni.brown.edu'>], 'bugtracker': [],
'changelog': []}>]
>>> upstream[0].maintainer[0].name
'Thomas Mills Hinkle'
"""
__all__ = ('MetaDataXML',)
import sys
if sys.hexversion < 0x2070000 or \
(sys.hexversion < 0x3020000 and sys.hexversion >= 0x3000000):
# Our _MetadataTreeBuilder usage is incompatible with
# cElementTree in Python 2.6, 3.0, and 3.1:
# File "/usr/lib/python2.6/xml/etree/ElementTree.py", line 644, in findall
# assert self._root is not None
import xml.etree.ElementTree as etree
else:
try:
import xml.etree.cElementTree as etree
except (SystemExit, KeyboardInterrupt):
raise
except (ImportError, SystemError, RuntimeError, Exception):
# broken or missing xml support
# http://bugs.python.org/issue14988
import xml.etree.ElementTree as etree
try:
from xml.parsers.expat import ExpatError
except (SystemExit, KeyboardInterrupt):
raise
except (ImportError, SystemError, RuntimeError, Exception):
ExpatError = SyntaxError
import re
import xml.etree.ElementTree
from portage import _encodings, _unicode_encode, _unicode_decode
from portage.util import unique_everseen
class _MetadataTreeBuilder(xml.etree.ElementTree.TreeBuilder):
"""
Implements doctype() as required to avoid deprecation warnings with
Python >=2.7.
"""
def doctype(self, name, pubid, system):
pass
class _Maintainer(object):
"""An object for representing one maintainer.
@type email: str or None
@ivar email: Maintainer's email address. Used for both Gentoo and upstream.
@type name: str or None
@ivar name: Maintainer's name. Used for both Gentoo and upstream.
@type description: str or None
@ivar description: Description of what a maintainer does. Gentoo only.
@type restrict: str or None
@ivar restrict: e.g. >=portage-2.2 means only maintains versions
of Portage greater than 2.2. Should be DEPEND string with < and >
converted to < and > respectively.
@type status: str or None
@ivar status: If set, either 'active' or 'inactive'. Upstream only.
"""
def __init__(self, node):
self.email = None
self.name = None
self.description = None
self.restrict = node.get('restrict')
self.status = node.get('status')
for attr in node:
setattr(self, attr.tag, attr.text)
def __repr__(self):
return "<%s %r>" % (self.__class__.__name__, self.email)
class _Useflag(object):
"""An object for representing one USE flag.
@todo: Is there any way to have a keyword option to leave in
<pkg> and <cat> for later processing?
@type name: str or None
@ivar name: USE flag
@type restrict: str or None
@ivar restrict: e.g. >=portage-2.2 means flag is only available in
versions greater than 2.2
@type description: str
@ivar description: description of the USE flag
"""
def __init__(self, node):
self.name = node.get('name')
self.restrict = node.get('restrict')
_desc = ''
if node.text:
_desc = node.text
for child in node.getchildren():
_desc += child.text if child.text else ''
_desc += child.tail if child.tail else ''
# This takes care of tabs and newlines left from the file
self.description = re.sub('\s+', ' ', _desc)
def __repr__(self):
return "<%s %r>" % (self.__class__.__name__, self.name)
class _Upstream(object):
"""An object for representing one package's upstream.
@type maintainers: list
@ivar maintainers: L{_Maintainer} objects for each upstream maintainer
@type changelogs: list
@ivar changelogs: URLs to upstream's ChangeLog file in str format
@type docs: list
@ivar docs: Sequence of tuples containing URLs to upstream documentation
in the first slot and 'lang' attribute in the second, e.g.,
[('http.../docs/en/tut.html', None), ('http.../doc/fr/tut.html', 'fr')]
@type bugtrackers: list
@ivar bugtrackers: URLs to upstream's bugtracker. May also contain an email
address if prepended with 'mailto:'
@type remoteids: list
@ivar remoteids: Sequence of tuples containing the project's hosting site
name in the first slot and the project's ID name or number for that
site in the second, e.g., [('sourceforge', 'systemrescuecd')]
"""
def __init__(self, node):
self.node = node
self.maintainers = self.upstream_maintainers()
self.changelogs = self.upstream_changelogs()
self.docs = self.upstream_documentation()
self.bugtrackers = self.upstream_bugtrackers()
self.remoteids = self.upstream_remoteids()
def __repr__(self):
return "<%s %r>" % (self.__class__.__name__, self.__dict__)
def upstream_bugtrackers(self):
"""Retrieve upstream bugtracker location from xml node."""
return [e.text for e in self.node.findall('bugs-to')]
def upstream_changelogs(self):
"""Retrieve upstream changelog location from xml node."""
return [e.text for e in self.node.findall('changelog')]
def upstream_documentation(self):
"""Retrieve upstream documentation location from xml node."""
result = []
for elem in self.node.findall('doc'):
lang = elem.get('lang')
result.append((elem.text, lang))
return result
def upstream_maintainers(self):
"""Retrieve upstream maintainer information from xml node."""
return [_Maintainer(m) for m in self.node.findall('maintainer')]
def upstream_remoteids(self):
"""Retrieve upstream remote ID from xml node."""
return [(e.text, e.get('type')) for e in self.node.findall('remote-id')]
class MetaDataXML(object):
"""Access metadata.xml"""
def __init__(self, metadata_xml_path, herds):
"""Parse a valid metadata.xml file.
@type metadata_xml_path: str
@param metadata_xml_path: path to a valid metadata.xml file
@type herds: str or ElementTree
@param herds: path to a herds.xml, or a pre-parsed ElementTree
@raise IOError: if C{metadata_xml_path} can not be read
"""
self.metadata_xml_path = metadata_xml_path
self._xml_tree = None
try:
self._xml_tree = etree.parse(_unicode_encode(metadata_xml_path,
encoding=_encodings['fs'], errors='strict'),
parser=etree.XMLParser(target=_MetadataTreeBuilder()))
except ImportError:
pass
except ExpatError as e:
raise SyntaxError(_unicode_decode("%s") % (e,))
if isinstance(herds, etree.ElementTree):
herds_etree = herds
herds_path = None
else:
herds_etree = None
herds_path = herds
# Used for caching
self._herdstree = herds_etree
self._herds_path = herds_path
self._descriptions = None
self._maintainers = None
self._herds = None
self._useflags = None
self._upstream = None
def __repr__(self):
return "<%s %r>" % (self.__class__.__name__, self.metadata_xml_path)
def _get_herd_email(self, herd):
"""Get a herd's email address.
@type herd: str
@param herd: herd whose email you want
@rtype: str or None
@return: email address or None if herd is not in herds.xml
@raise IOError: if $PORTDIR/metadata/herds.xml can not be read
"""
if self._herdstree is None:
try:
self._herdstree = etree.parse(_unicode_encode(self._herds_path,
encoding=_encodings['fs'], errors='strict'),
parser=etree.XMLParser(target=_MetadataTreeBuilder()))
except (ImportError, IOError, SyntaxError):
return None
# Some special herds are not listed in herds.xml
if herd in ('no-herd', 'maintainer-wanted', 'maintainer-needed'):
return None
try:
# Python 2.7 or >=3.2
iterate = self._herdstree.iter
except AttributeError:
iterate = self._herdstree.getiterator
for node in iterate('herd'):
if node.findtext('name') == herd:
return node.findtext('email')
def herds(self, include_email=False):
"""Return a list of text nodes for <herd>.
@type include_email: bool
@keyword include_email: if True, also look up the herd's email
@rtype: tuple
@return: if include_email is False, return a list of strings;
if include_email is True, return a list of tuples containing:
[('herd1', 'herd1@gentoo.org'), ('no-herd', None);
"""
if self._herds is None:
if self._xml_tree is None:
self._herds = tuple()
else:
herds = []
for elem in self._xml_tree.findall('herd'):
text = elem.text
if text is None:
text = ''
if include_email:
herd_mail = self._get_herd_email(text)
herds.append((text, herd_mail))
else:
herds.append(text)
self._herds = tuple(herds)
return self._herds
def descriptions(self):
"""Return a list of text nodes for <longdescription>.
@rtype: list
@return: package description in string format
@todo: Support the C{lang} attribute
"""
if self._descriptions is None:
if self._xml_tree is None:
self._descriptions = tuple()
else:
self._descriptions = tuple(e.text \
for e in self._xml_tree.findall("longdescription"))
return self._descriptions
def maintainers(self):
"""Get maintainers' name, email and description.
@rtype: list
@return: a sequence of L{_Maintainer} objects in document order.
"""
if self._maintainers is None:
if self._xml_tree is None:
self._maintainers = tuple()
else:
self._maintainers = tuple(_Maintainer(node) \
for node in self._xml_tree.findall('maintainer'))
return self._maintainers
def use(self):
"""Get names and descriptions for USE flags defined in metadata.
@rtype: list
@return: a sequence of L{_Useflag} objects in document order.
"""
if self._useflags is None:
if self._xml_tree is None:
self._useflags = tuple()
else:
try:
# Python 2.7 or >=3.2
iterate = self._xml_tree.iter
except AttributeError:
iterate = self._xml_tree.getiterator
self._useflags = tuple(_Useflag(node) \
for node in iterate('flag'))
return self._useflags
def upstream(self):
"""Get upstream contact information.
@rtype: list
@return: a sequence of L{_Upstream} objects in document order.
"""
if self._upstream is None:
if self._xml_tree is None:
self._upstream = tuple()
else:
self._upstream = tuple(_Upstream(node) \
for node in self._xml_tree.findall('upstream'))
return self._upstream
def format_maintainer_string(self):
"""Format string containing maintainers and herds (emails if possible).
Used by emerge to display maintainer information.
Entries are sorted according to the rules stated on the bug wranglers page.
@rtype: String
@return: a string containing maintainers and herds
"""
maintainers = []
for maintainer in self.maintainers():
if maintainer.email is None or not maintainer.email.strip():
if maintainer.name and maintainer.name.strip():
maintainers.append(maintainer.name)
else:
maintainers.append(maintainer.email)
for herd, email in self.herds(include_email=True):
if herd == "no-herd":
continue
if email is None or not email.strip():
if herd and herd.strip():
maintainers.append(herd)
else:
maintainers.append(email)
maintainers = list(unique_everseen(maintainers))
maint_str = ""
if maintainers:
maint_str = maintainers[0]
maintainers = maintainers[1:]
if maintainers:
maint_str += " " + ",".join(maintainers)
return maint_str
def format_upstream_string(self):
"""Format string containing upstream maintainers and bugtrackers.
Used by emerge to display upstream information.
@rtype: String
@return: a string containing upstream maintainers and bugtrackers
"""
maintainers = []
for upstream in self.upstream():
for maintainer in upstream.maintainers:
if maintainer.email is None or not maintainer.email.strip():
if maintainer.name and maintainer.name.strip():
maintainers.append(maintainer.name)
else:
maintainers.append(maintainer.email)
for bugtracker in upstream.bugtrackers:
if bugtracker.startswith("mailto:"):
bugtracker = bugtracker[7:]
maintainers.append(bugtracker)
maintainers = list(unique_everseen(maintainers))
maint_str = " ".join(maintainers)
return maint_str
|
#!python3
from numpy import random
from time import perf_counter
def selection_sort(array):
"a is a list like iterable. returns sorted version of a"
for i in range(len(array)):
j = 1
k=i
while i+j<len(array):
if array[i+j] < array[k]:
k = i+j
j+=1
array[i], array[k] = array[k], array[i]
return array
def selection_sort_withEAFP(array):
"a is a list like iterable. returns sorted version of a"
"also implements EAFP principle as opposed to naive method above"
for i in range(len(array)):
j = 1
k=i
#while i+j<len(array):
controller = True
while controller:
try:
if array[i+j] < array[k]:
k = i+j
j+=1
except IndexError:
controller = False
array[i], array[k] = array[k], array[i]
return array
def main():
a = random.randint(1000, size=5_000)
print(a)
start = perf_counter()
print(selection_sort(a))
print(f"naive selection: it took {(perf_counter() - start):.2f} "\
f"seconds to sort {len(a):,} items\n")
a = random.randint(1000, size=5_000)
print(a)
start = perf_counter()
print(selection_sort_withEAFP(a))
print(f"selection with EAFP: it took {(perf_counter() - start):.2f} "\
f"seconds to sort {len(a):,} items")
if __name__ == "__main__":
main()
|
import os
import uuid
from datetime import datetime, timezone, timedelta
from io import BytesIO
from threading import Lock
import pytest
import pytz
from wacryptolib._crypto_backend import get_random_bytes
from wacryptolib.utilities import (
split_as_chunks,
recombine_chunks,
dump_to_json_bytes,
dump_to_json_str,
load_from_json_bytes,
load_from_json_str,
check_datetime_is_tz_aware,
dump_to_json_file,
load_from_json_file,
generate_uuid0,
SUPPORTED_HASH_ALGOS,
hash_message,
get_utc_now_date,
get_memory_rss_bytes,
delete_filesystem_node_for_stream,
catch_and_log_exception,
synchronized,
)
def test_check_datetime_is_tz_aware():
with pytest.raises(ValueError):
check_datetime_is_tz_aware(datetime.now())
check_datetime_is_tz_aware(get_utc_now_date())
def test_hash_message():
bytestring = get_random_bytes(1000)
assert len(SUPPORTED_HASH_ALGOS) == 4 # For now
for hash_algo in SUPPORTED_HASH_ALGOS:
digest1 = hash_message(bytestring, hash_algo=hash_algo)
assert 32 <= len(digest1) <= 64, len(digest1)
digest2 = hash_message(bytestring, hash_algo=hash_algo)
assert digest1 == digest2
with pytest.raises(ValueError, match="Unsupported"):
hash_message(bytestring, hash_algo="XYZ")
def test_split_as_chunks_and_recombine():
bytestring = get_random_bytes(100)
chunks = split_as_chunks(bytestring, chunk_size=25, must_pad=True)
assert all(len(x) == 25 for x in chunks)
result = recombine_chunks(chunks, chunk_size=25, must_unpad=True)
assert result == bytestring
chunks = split_as_chunks(bytestring, chunk_size=22, must_pad=True)
assert all(len(x) == 22 for x in chunks)
result = recombine_chunks(chunks, chunk_size=22, must_unpad=True)
assert result == bytestring
chunks = split_as_chunks(bytestring, chunk_size=25, must_pad=False)
assert all(len(x) == 25 for x in chunks)
result = recombine_chunks(chunks, chunk_size=25, must_unpad=False)
assert result == bytestring
with pytest.raises(ValueError, match="size multiple of chunk_size"):
split_as_chunks(bytestring, chunk_size=22, must_pad=False)
chunks = split_as_chunks(bytestring, chunk_size=22, must_pad=False, accept_incomplete_chunk=True)
assert not all(len(x) == 22 for x in chunks)
result = recombine_chunks(chunks, chunk_size=22, must_unpad=False)
assert result == bytestring
def test_serialization_utilities(tmp_path):
uid = uuid.UUID("7c0b18f5-f410-4e83-9263-b38c2328e516")
payload = dict(b=b"xyz", a="hêllo", c=uid)
serialized_str = dump_to_json_str(payload)
# Keys are sorted
assert (
serialized_str
== r'{"a": "h\u00eallo", "b": {"$binary": {"base64": "eHl6", "subType": "00"}}, "c": {"$binary": {"base64": "fAsY9fQQToOSY7OMIyjlFg==", "subType": "04"}}}'
)
deserialized = load_from_json_str(serialized_str)
assert deserialized == payload
serialized_str = dump_to_json_str(payload, ensure_ascii=False) # Json arguments well propagated
assert (
serialized_str
== r'{"a": "hêllo", "b": {"$binary": {"base64": "eHl6", "subType": "00"}}, "c": {"$binary": {"base64": "fAsY9fQQToOSY7OMIyjlFg==", "subType": "04"}}}'
)
deserialized = load_from_json_str(serialized_str)
assert deserialized == payload
serialized_str = dump_to_json_bytes(payload)
# Keys are sorted
assert (
serialized_str
== rb'{"a": "h\u00eallo", "b": {"$binary": {"base64": "eHl6", "subType": "00"}}, "c": {"$binary": {"base64": "fAsY9fQQToOSY7OMIyjlFg==", "subType": "04"}}}'
)
deserialized = load_from_json_bytes(serialized_str)
assert deserialized == payload
serialized_str = dump_to_json_bytes(payload, ensure_ascii=False) # Json arguments well propagated
assert (
serialized_str
== b'{"a": "h\xc3\xaallo", "b": {"$binary": {"base64": "eHl6", "subType": "00"}}, "c": {"$binary": {"base64": "fAsY9fQQToOSY7OMIyjlFg==", "subType": "04"}}}'
)
deserialized = load_from_json_bytes(serialized_str)
assert deserialized == payload
tmp_filepath = os.path.join(tmp_path, "dummy_temp_file.dat")
serialized_str = dump_to_json_file(tmp_filepath, data=payload, ensure_ascii=True) # Json arguments well propagated
assert (
serialized_str
== b'{"a": "h\u00eallo", "b": {"$binary": {"base64": "eHl6", "subType": "00"}}, "c": {"$binary": {"base64": "fAsY9fQQToOSY7OMIyjlFg==", "subType": "04"}}}'
)
deserialized = load_from_json_file(tmp_filepath)
assert deserialized == payload
# Special tests for DATES
utc_date = pytz.utc.localize(datetime(2022, 10, 10))
pst_date = utc_date.astimezone(pytz.timezone("America/Los_Angeles"))
payload1 = {"date": utc_date}
serialized_str1 = dump_to_json_str(payload1)
payload2 = {"date": pst_date}
serialized_str2 = dump_to_json_str(payload2)
assert serialized_str1 == r'{"date": {"$date": {"$numberLong": "1665360000000"}}}'
assert serialized_str1 == serialized_str2
deserialized = load_from_json_str(serialized_str1)
assert deserialized == payload1
assert deserialized == payload2
utcoffset = deserialized["date"].utcoffset()
assert utcoffset == timedelta(0) # Date is returned as UTC in any case!
def test_generate_uuid0():
utc = pytz.UTC
some_date = datetime(year=2000, month=6, day=12, tzinfo=timezone.min)
some_timestamp = datetime.timestamp(some_date)
uuid0 = generate_uuid0(some_timestamp)
assert utc.localize(uuid0.datetime) == some_date
assert uuid0.datetime_local != some_date.replace(tzinfo=None) # Local TZ is used here
assert uuid0.unix_ts == some_timestamp
uuids = [generate_uuid0().int for _ in range(1000)]
assert len(set(uuids)) == 1000
uuids = [generate_uuid0(some_timestamp).int for _ in range(1000)]
assert len(set(uuids)) == 1000
uuid_test = generate_uuid0(0)
assert uuid_test.unix_ts != 0 # Can't generate UUIDs with timestamp=0
def test_get_memory_rss_bytes():
assert 30 * 1024 ** 2 < get_memory_rss_bytes() < 200 * 1024 ** 2
def test_delete_filesystem_node_for_stream(tmp_path):
delete_filesystem_node_for_stream(BytesIO()) # Does nothing
target_file = tmp_path / "target_file.txt"
with open(target_file, "wb") as stream:
stream.write(b"777")
assert target_file.exists()
delete_filesystem_node_for_stream(stream)
assert not target_file.exists()
def test_catch_and_log_exception():
variable = None
with catch_and_log_exception("testage1"):
variable = 12
raise RuntimeError
variable = 13
assert variable == 12
@catch_and_log_exception("testage2")
def myfunc(myarg):
if myarg == 42:
raise ValueError(myarg)
return myarg
result = myfunc(33)
assert result == 33
result = myfunc(42) # Exception raised inside
assert result is None
class MyClass:
_lock = Lock()
@synchronized
@catch_and_log_exception("testage3")
def do_stuffs(self, myarg):
if myarg == 43:
raise ValueError(myarg)
return myarg
my_instance = MyClass()
result = my_instance.do_stuffs(32)
assert result == 32
result = my_instance.do_stuffs(43) # Exception raised inside
assert result is None
|
from scipy.signal import butter, lfilter, resample
from tqdm import tqdm
from pylab import genfromtxt
import scipy.io as io
import numpy as np
import pandas as pd
import lib.utils as utils
import random
import os
import sys
sys.path.append('..')
from methods import pulse_noise
def bandpass(sig, band, fs):
B, A = butter(5, np.array(band) / (fs / 2), btype='bandpass')
return lfilter(B, A, sig, axis=0)
sample_freq = 200.0
epoc_window = 1.3 * sample_freq
subjects = ['02', '06', '07', 11, 12, 13, 14, 16, 17, 18, 20, 21, 22, 23, 24, 26]
npp_params=[0.15, 5, 0.1]
#subjects = ['02', '06', '07', 11, 12, 13, 14, 16]#只取了一半
data_file = 'EEG_Data/ERN/raw/Data_S{}_Sess0{}.csv'
y = genfromtxt('EEG_Data/ERN/raw/TrainLabels.csv', delimiter=',', skip_header=1)[:, 1]
X_cl=[]
Y_cl=[]
X_po=[]
Y_po=[]
Ek_cl=0
Ek_po=0
for index in tqdm(range(len(subjects))):
x = []
e = []
s = []
clean=True
for sess in range(5):#可以减少一部分,原来是5
sess = sess + 1
file_name = data_file.format(subjects[index], sess)
sig = np.array(pd.read_csv(file_name).values)########读取CSV文件
EEG = sig[:, 1:-2]
Trigger = sig[:, -1]
idxFeedBack = np.where(Trigger == 1)[0]
if not clean:
npp = pulse_noise([1, 56, int(epoc_window)], freq=npp_params[1], sample_freq=sample_freq,
proportion=npp_params[2])
amplitude = np.mean(np.std(EEG, axis=0)) * npp_params[0]
for _, idx in enumerate(idxFeedBack):
idx = int(idx)
EEG[idx:int(idx + epoc_window), :] = np.transpose(npp.squeeze() * amplitude,
(1, 0)) + EEG[idx:int(idx + epoc_window), :]
sig_F = bandpass(EEG, [1.0, 40.0], sample_freq)
for _, idx in enumerate(idxFeedBack):
idx = int(idx)
s_sig = sig_F[idx:int(idx + epoc_window), :]
s_sig = resample(s_sig, int(epoc_window * 128 / sample_freq))
x.append(s_sig)
s.append(idx)
x = np.array(x)
x = np.transpose(x, (0, 2, 1))
s = np.squeeze(np.array(s))
y = np.squeeze(np.array(y))
x = utils.standard_normalize(x)
if Ek_cl == 0: # 解决concatenate无法拼接空数组的问题
X_cl = x
Y_cl = y[index * 340:(index + 1) * 340]
Ek_cl = 1
else:
X_cl = np.concatenate((X_cl, x), axis=0)
Y_cl = np.concatenate((Y_cl, y[index * 340:(index + 1) * 340]), axis=0)
x1 = X_cl[np.where(Y_cl == 0)] # 消除类别不平衡的问题
x2 = X_cl[np.where(Y_cl == 1)]
sample_num = min(len(x1), len(x2))
idx1, idx2 = utils.shuffle_data(len(x1)), utils.shuffle_data(len(x2))
X_cl = np.concatenate([x1[idx1[:sample_num]], x2[idx2[:sample_num]]], axis=0)
Y_cl = np.concatenate([np.zeros(shape=[sample_num]), np.ones(shape=[sample_num])], axis=0)
for index in tqdm(range(len(subjects))):
x = []
e = []
s = []
clean=False
for sess in range(5):#可以减少一部分,原来是5
sess = sess + 1
file_name = data_file.format(subjects[index], sess)
sig = np.array(pd.read_csv(file_name).values)########读取CSV文件
EEG = sig[:, 1:-2]
Trigger = sig[:, -1]
idxFeedBack = np.where(Trigger == 1)[0]
if not clean:
npp = pulse_noise([1, 56, int(epoc_window)], freq=npp_params[1], sample_freq=sample_freq,
proportion=npp_params[2])
amplitude = np.mean(np.std(EEG, axis=0)) * npp_params[0]
for _, idx in enumerate(idxFeedBack):
idx = int(idx)
EEG[idx:int(idx + epoc_window), :] = np.transpose(npp.squeeze() * amplitude,
(1, 0)) + EEG[idx:int(idx + epoc_window), :]
sig_F = bandpass(EEG, [1.0, 40.0], sample_freq)
for _, idx in enumerate(idxFeedBack):
idx = int(idx)
s_sig = sig_F[idx:int(idx + epoc_window), :]
s_sig = resample(s_sig, int(epoc_window * 128 / sample_freq))
x.append(s_sig)
s.append(idx)
x = np.array(x)
x = np.transpose(x, (0, 2, 1))
s = np.squeeze(np.array(s))
y = np.squeeze(np.array(y))
x = utils.standard_normalize(x)
if Ek_po == 0: # 解决concatenate无法拼接空数组的问题
X_po = x
Y_po = y[index * 340:(index + 1) * 340]
Ek_po= 1
else:
X_po = np.concatenate((X_po, x), axis=0)
Y_po = np.concatenate((Y_po, y[index * 340:(index + 1) * 340]), axis=0)
x1 = X_po[np.where(Y_po == 0)] # 消除类别不平衡的问题
x2 = X_po[np.where(Y_po == 1)]
sample_num = min(len(x1), len(x2))
#idx1, idx2 = utils.shuffle_data(len(x1)), utils.shuffle_data(len(x2))
X_po = np.concatenate([x1[idx1[:sample_num]], x2[idx2[:sample_num]]], axis=0)
Y_po = np.concatenate([np.zeros(shape=[sample_num]), np.ones(shape=[sample_num])], axis=0)
X_cl=X_cl[:, np.newaxis, :, :]
X_po=X_po[:, np.newaxis, :, :]
leng=len(X_cl)
idx_al=np.arange(leng)#
idx_cl,_, idx_po, _ = utils.split_data([idx_al, idx_al], split=0.86, shuffle=True)
idx_po,_,idx_test_po,_=utils.split_data([idx_po, idx_po], split=0.5, shuffle=True)
x_train=X_cl[idx_cl]
y_train=Y_cl[idx_cl]
x_poison=X_po[idx_po]
y_poison=Y_po[idx_po]
x_test=X_cl[idx_test_po]
y_test=Y_cl[idx_test_po]
x_test_poison=X_po[idx_test_po]##
y_test_poison=Y_po[idx_test_po]##
x_train, y_train, x_validation, y_validation = utils.split_data([x_train, y_train], split=0.8, shuffle=True)
save_dir = 'EEG_Data/ERN/'
save_file = save_dir + 'data2-{}-{}-{}.mat'.format(npp_params[0], npp_params[1],npp_params[2])
io.savemat(save_file, {'x_train': x_train,'y_train': y_train, 'x_validation':x_validation,'y_validation':y_validation,
'x_poison': x_poison,'y_poison':y_poison,'x_test':x_test,'y_test':y_test ,
'x_test_poison':x_test_poison,'y_test_poison':y_test_poison}) |
import os
project_folder = './Sklearn'
os.makedirs(project_folder, exist_ok=True)
# copy the training script into project directory
import shutil
shutil.copy('train_iris.py', project_folder) |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import pandas as pd
import pickle
import random
from scipy import misc
import sklearn.preprocessing
from src.util.BaseProcess import BaseProcess
from conf import FeatureCNNConf
from conf import DataType
from src.util.filter.InfoFilter import InfoFilter
from src.util.filter.RecordFilter import RecordFilter
class FeaturesCNN(BaseProcess):
"""
用户行为特征提取
"""
def __init__(self, user_type, output_data_path):
BaseProcess.__init__(self, user_type, output_data_path)
self.process_name = 'feature'
self.content_type = 'cnn'
self.info_filter = None
self.record_filter = None
def process(self):
"""
UserBehaviorFeatures类,函数执行流程
:param file_path:
:param data_type: fraud_user/normal_user
:return:
"""
if(self.info_filter == None or self.record_filter == None):
raise "Class %s does not have filter." % self.__class__
record_dataframe_dict = self.record_filter.get_data()
self.trans_image(record_dataframe_dict)
return self
def trans_image(self, record_dataframe_dict):
#懒加载
info_dict = pickle.load(open(self.info_filter.get_output_path(self.user_type + '_dict.pkl')))
# 对train_x处理
min_max_scaler = sklearn.preprocessing.MinMaxScaler()
for key, record_df in record_dataframe_dict.items(): # key = fromnum, month_ind, day_ind, window_ind
# 把行补全为60行
index = pd.DataFrame({'index': pd.Series(range(60), index=range(60))})
ind_record_df = pd.merge(index, record_df, left_index=True, right_index=True, how='left')
ind_record_df.drop(['index'], axis=1, inplace=True)
# 补全缺失值
ind_record_df.interpolate(inplace=True)
ind_record_df.fillna(method='bfill', inplace=True)
# 若全部为空,则
if ind_record_df.dropna(how='all').empty:
continue
ind_record_df.fillna(0, inplace=True)
#merge用户信息
from_num = key.strip().split('_')[0]
if(info_dict.has_key(from_num)):
info = info_dict[from_num]
else:
continue
#将info信息转成dataframe格式
info_cols = {}
for (k, val) in dict(info).items():
info_cols[k] = pd.Series(60 * [val])
info_df = pd.DataFrame(info_cols)
#record和info的merge
ind_record_info_df = pd.merge(ind_record_df, info_df, left_index=True, right_index=True)
columns_name_list = []
columes_list = FeatureCNNConf.COLUMNS_NAME
for i in range(FeatureCNNConf.REPETITION_COUNTS):
random.seed(7 * i)
random.shuffle(columes_list)
columns_name_list.extend(columes_list)
data_df = pd.DataFrame([])
for i, name in enumerate(columns_name_list):
data_df.insert(0, '%s_%s' % (name, i), ind_record_info_df[name])
# 对列归一化
data_arr = min_max_scaler.fit_transform(data_df)
columes_count = FeatureCNNConf.REPETITION_COUNTS * FeatureCNNConf.FEATURE_COUNTS
data_pic = misc.imresize(data_arr.reshape(60, columes_count), 1.0)
data_x_str = data_pic.reshape(1, -11).astype(str).tolist()[0]
data_x_str = ','.join(data_x_str)
self.mkdirs(self.get_output_path(from_num + '.uid'))
with open(self.get_output_path(from_num + '.uid'), 'a') as data_x_file:
data_x_file.write(data_x_str + '\n')
data_y = DataType[self.user_type].value
with open(self.get_output_path(from_num + '.label'), 'a') as data_y_file:
data_y_file.write(str(data_y) + '\n')
print("Transfer to picture and save finished")
def set_info_filter(self, info_filter):
self.info_filter = info_filter
def set_record_filter(self, record_filter):
self.record_filter = record_filter
if __name__ == '__main__':
info = InfoFilter(DataType.normal.name,
'/Users/mayuchen/Documents/Python/Repository/DL/Other/UserBehaviorMining/resource/raw/info/普通用户号码_md5.xlsx',
'/Users/mayuchen/Documents/Python/Repository/DL/Other/UserBehaviorMining/resource/data/')
record = RecordFilter(DataType.normal.name,
'/Users/mayuchen/Documents/Python/Repository/DL/Other/UserBehaviorMining/resource/raw/normal_user',
'/Users/mayuchen/Documents/Python/Repository/DL/Other/UserBehaviorMining/resource/data/')
users = FeaturesCNN(DataType.normal.name, '/Users/mayuchen/Documents/Python/Repository/DL/Other/UserBehaviorMining/resource/data/', '/Users/mayuchen/Documents/Python/Repository/DL/Other/UserBehaviorMining/resource/data/')
users.set_info_filter(info)
users.set_record_filter(record)
users.process()
|
# def in_box():
# customer_name = str('Jasmine')
# current_date = str('Novemeber 27, 2020')
#
# def inputes():
# default_survey = '''{customer_name}, your feedback is realy important. Please give a positive feedback.
# Have a great day ahead. Happy shopping. Today's date is {current_date}.'''
#
# def codes():
# name_code = "{customer_name}"
# date_code = "{current_date}"
#
# def change(codes, inputes, in_box):
# if codes in inputes:
# default_survey = default_survey.replace('{customer_name}', customer_name)
# default_survey = default_survey.replace('{current_date}', current_date)
# print(default_survey)
#
# change(codes, inputes, in_box)
# Note :
# str.replace(old, new [, count])
# REPLACE CODES HERE
customer_name_code = "{customer_name}"
date_code = "{current_date}"
product_name_code = "{product_name}"
product_color_code = "{product_color}"
product_type_code = "{product_type}"
product_size_code = "{product_size}"
product_weight_code = "{product_weight}"
product_price_code = "{product_price}"
repromised_date_code = "{repromised_date}"
# coddes ={
# 'customer_name_code' : "{customer_name}",
# 'date_code' : "{current_date}",
# 'product_name_code' : "{product_name}",
# 'product_color_code' : "{product_color}",
# 'product_type_code' : "{product_type}",
# 'product_size_code' : "{product_size}",
# 'product_weight_code' : "{product_weight}",
# 'product_price_code' : "{product_price}",
# 'repromised_date_code' : "{repromised_date}"
#
# }
# REPLACE FROM HERE
customer_name = str('Jasmine')
current_date = str('Novemeber 27, 2020')
product_name = 'Razor'
product_type = 'Shaver Machine'
product_price = str('Rs. 300')
product_color = 'Black'
repromise_date = 'December 1, 2020'
default_survey = '''{customer_name}, your feedback is realy important. Please give a positive feedback. Have a great day ahead. Happy shopping. Today's date is {current_date}.'''
default_survey_2 = '''{customer_name}, your feedback is realy important. Please give a positive feedback.
Have a great day ahead. Happy shopping. Today's date is {current_date}.'''
# https://stackoverflow.com/questions/30239092/how-to-get-multiline-input-from-user
# if customer_name_code in default_survey:
# new = default_survey.replace('{customer_name}', customer_name)
# new = new.replace('{current_date}', current_date)
# print(new)
# https://www.youtube.com/watch?v=kxPXS9uGpwQ&ab_channel=Harshitvashisth
count = 8
type_box = ''
for i in range(8):
if i <= count:
type_box = input('Type here: \n')
inp = inp + type_box
print(i)
user_typed_here = type_box
print('\n', user_typed_here)
# if customer_name_code in user_typed_here:
# new = user_typed_here.replace('{customer_name}', customer_name)
# new = new.replace('{current_date}', current_date)
# print(new)
# else:
# print(type_box)
# Example
# no_of_lines = 5
# lines = ""
# for i in xrange(no_of_lines):
# lines+=input()+"\n"
#
# print(lines) |
class Solution:
def isPowerOfFour(self, n: int) -> bool:
while n > 4 and n % 4 == 0:
n //= 4
if n == 1 or (n > 0 and n % 4 == 0):
return True
else:
return False
|
import statistics
data = [2.75, 1.75, 1.25, 0.25, 0.5, 1.25, 4.5]
statistics.mean(data)
statistics.median(data)
statistics.variance(data)
|
import pygame
import sys
import random
import math
from OpenGL.GL import *
from OpenGL.GLU import *
# general OpenGL initialization
def init_opengl(width, height):
glViewport(0, 0, width, height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(90.0, float(width)/height, 0.1, 100.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
glEnable(GL_DEPTH_TEST)
glShadeModel(GL_SMOOTH)
glClearColor(0.1, 0.1, 0.15, 0.0)
glPointSize(2)
class Particle:
def __init__(self):
# generate random start position
self.x = random.random() * 10 - 5
self.y = random.random() * 10 - 5
self.z = random.random() * -10
# generate random start velocity
magnitude = 2
self.vx = (random.random() - 0.5)*magnitude
self.vy = (random.random() - 0.5)*magnitude
self.vz = (random.random() - 0.5)*magnitude
def render(self):
glColor3f(1, 1, 1)
glVertex3f(self.x, self.y, self.z)
def update(self):
dt = 0.1
gravity_strength = 0.5
# create acceleration vector by taking difference from particle's
# position and (0,0,-5), which we're using as the source of gravity
ax = - self.x * dt * gravity_strength
ay = - self.y * dt * gravity_strength
az = (-5 - self.z) * dt * gravity_strength
# update velocity using acceleration vector
self.vx += ax * dt
self.vy += ay * dt
self.vz += az * dt
# update position using velocity vector
self.x += self.vx * dt
self.y += self.vy * dt
self.z += self.vz * dt
def main():
width = 800
height = 600
size = width, height
pygame.init()
screen = pygame.display.set_mode(size, pygame.DOUBLEBUF | pygame.OPENGL)
init_opengl(width,height)
# generate some particles
particles = []
for i in xrange(1000):
particles.append(Particle())
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT: sys.exit()
# run the simulation for all the particles
for particle in particles:
particle.update()
# render the particles
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glLoadIdentity()
glBegin(GL_POINTS)
for particle in particles:
particle.render()
glEnd()
pygame.display.flip()
if __name__ == "__main__":
main()
|
# Copyright 2023 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from textwrap import dedent
import pytest
from pants.backend.helm.check.kubeconform import chart
from pants.backend.helm.check.kubeconform.chart import (
KubeconformChartFieldSet,
KubeconformCheckChartRequest,
)
from pants.backend.helm.target_types import HelmChartTarget
from pants.backend.helm.target_types import rules as target_types_rules
from pants.backend.helm.testutil import (
HELM_CHART_FILE,
HELM_TEMPLATE_HELPERS_FILE,
HELM_VALUES_FILE,
K8S_CRD_FILE,
K8S_CUSTOM_RESOURCE_FILE,
K8S_SERVICE_TEMPLATE,
)
from pants.core.goals.check import CheckResults
from pants.core.util_rules import config_files, external_tool, source_files, stripped_source_files
from pants.engine import process
from pants.engine.addresses import Address
from pants.engine.rules import QueryRule
from pants.testutil.rule_runner import PYTHON_BOOTSTRAP_ENV, RuleRunner
@pytest.fixture
def rule_runner() -> RuleRunner:
return RuleRunner(
target_types=[HelmChartTarget],
rules=[
*config_files.rules(),
*chart.rules(),
*external_tool.rules(),
*process.rules(),
*source_files.rules(),
*stripped_source_files.rules(),
*target_types_rules(),
QueryRule(CheckResults, (KubeconformCheckChartRequest,)),
],
)
__COMMON_TEST_FILES = {
"src/mychart/Chart.yaml": HELM_CHART_FILE,
"src/mychart/values.yaml": HELM_VALUES_FILE,
"src/mychart/templates/_helpers.tpl": HELM_TEMPLATE_HELPERS_FILE,
"src/mychart/templates/service.yaml": K8S_SERVICE_TEMPLATE,
"src/mychart/templates/pod.yaml": dedent(
"""\
apiVersion: v1
kind: Pod
metadata:
name: {{ template "fullname" . }}
labels:
chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
spec:
containers:
- name: myapp-container
image: busybox:1.28
initContainers:
- name: init-service
image: busybox:1.29
- name: init-db
image: example.com/containers/busybox:1.28
"""
),
}
def test_skip_check(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
**__COMMON_TEST_FILES,
"src/mychart/BUILD": "helm_chart(skip_kubeconform=True)",
}
)
addr = Address("src/mychart")
checked = run_check(rule_runner, addr)
assert checked.exit_code == 0
assert checked.checker_name == "kubeconform"
assert len(checked.results) == 1
assert checked.results[0].partition_description == addr.spec
assert not checked.results[0].stdout
def test_valid_chart(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
**__COMMON_TEST_FILES,
"src/mychart/BUILD": "helm_chart()",
}
)
addr = Address("src/mychart")
checked = run_check(rule_runner, addr)
assert checked.exit_code == 0
assert checked.checker_name == "kubeconform"
assert len(checked.results) == 1
assert checked.results[0].partition_description == addr.spec
assert (
checked.results[0].stdout
== "Summary: 2 resources found in 2 files - Valid: 2, Invalid: 0, Errors: 0, Skipped: 0\n"
)
def test_valid_chart_strict(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
**__COMMON_TEST_FILES,
"src/mychart/BUILD": "helm_chart(kubeconform_strict=True)",
}
)
addr = Address("src/mychart")
checked = run_check(rule_runner, addr)
assert checked.exit_code == 0
assert len(checked.results) == 1
assert checked.results[0].partition_description == addr.spec
assert (
checked.results[0].stdout
== "Summary: 2 resources found in 2 files - Valid: 2, Invalid: 0, Errors: 0, Skipped: 0\n"
)
def test_invalid_chart_rejecting_kinds(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
**__COMMON_TEST_FILES,
"src/mychart/BUILD": dedent(
"""\
helm_chart(kubeconform_reject_kinds=["Pod"])
"""
),
}
)
addr = Address("src/mychart")
checked = run_check(rule_runner, addr)
expected_result = dedent(
"""\
mychart/templates/pod.yaml - Pod mychart failed validation: prohibited resource kind Pod
Summary: 2 resources found in 2 files - Valid: 1, Invalid: 0, Errors: 1, Skipped: 0
"""
)
assert checked.exit_code == 1
assert len(checked.results) == 1
assert checked.results[0].partition_description == addr.spec
assert checked.results[0].stdout == expected_result
def test_invalid_chart(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/mychart/BUILD": "helm_chart()",
"src/mychart/Chart.yaml": HELM_CHART_FILE,
"src/mychart/templates/replication_controller.yml": dedent(
"""\
apiVersion: v1
kind: ReplicationController
metadata:
name: "bob"
spec:
replicas: asd"
selector:
app: nginx
templates:
metadata:
name: nginx
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx
ports:
- containerPort: 80
"""
),
}
)
addr = Address("src/mychart")
checked = run_check(rule_runner, addr)
assert checked.exit_code == 1
assert len(checked.results) == 1
assert checked.results[0].partition_description == addr.spec
assert (
"Summary: 1 resource found in 1 file - Valid: 0, Invalid: 1, Errors: 0, Skipped: 0"
in checked.results[0].stdout
)
_CRD_TEST_FILES = {
"src/mychart/Chart.yaml": HELM_CHART_FILE,
"src/mychart/crds/myplatform.yaml": K8S_CRD_FILE,
"src/mychart/templates/mycustom.yml": K8S_CUSTOM_RESOURCE_FILE,
}
def test_fail_using_crd(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
**_CRD_TEST_FILES,
"src/mychart/BUILD": "helm_chart()",
}
)
addr = Address("src/mychart")
checked = run_check(rule_runner, addr)
assert checked.exit_code == 1
def test_pass_using_crd_ignoring_schemas(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
**_CRD_TEST_FILES,
"src/mychart/BUILD": "helm_chart(kubeconform_ignore_missing_schemas=True)",
}
)
addr = Address("src/mychart")
checked = run_check(rule_runner, addr)
assert checked.exit_code == 0
def test_pass_using_crd_skipping_kinds(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
**_CRD_TEST_FILES,
"src/mychart/BUILD": dedent(
"""\
helm_chart(kubeconform_skip_kinds=["MyPlatform"])
"""
),
}
)
addr = Address("src/mychart")
checked = run_check(rule_runner, addr)
assert checked.exit_code == 0
def run_check(rule_runner: RuleRunner, address: Address) -> CheckResults:
rule_runner.set_options(
["--kubeconform-summary"],
env_inherit=PYTHON_BOOTSTRAP_ENV,
)
target = rule_runner.get_target(address)
field_set = KubeconformChartFieldSet.create(target)
return rule_runner.request(CheckResults, [KubeconformCheckChartRequest([field_set])])
|
# coding=utf-8
BOT_NAME = 'myspider'
SPIDER_MODULES = ['myspider.spiders']
NEWSPIDER_MODULE = 'myspider.spiders'
ROBOTSTXT_OBEY = False # 遵守robots协议
LOG_LEVEL = 'DEBUG' # 日志级别
CONCURRENT_REQUESTS = 20 # 线程数
DOWNLOAD_DELAY = 0.01 # 间隔时间
REDIRECT_ENABLED = True # 不允许重定向
#HTTPERROR_ALLOWED_CODES = [302, 405, 303, 400,] #允许的http错误状态
COOKIES_ENABLED = False # 如果启用了cookies, 那么http request会带上cookies; 有些站点会使用cookies发现爬虫轨迹
COOKIES_DEBUG = False #开启cookies debug
DEPTH_LIMIT = 0 # 不限爬取深度
DEPTH_PRIORITY = 0 # 深度优先级
RETRY_ENABLED= True # 允许重试
DOWNLOADER_MIDDLEWARES = {
'myspider.middlewares.RandomUAMiddleware': 400,
#'myspider.middlewares.RandomUAMiddleware2': 401,
#'myspider.middlewares.RandomProxyMiddleware': 402,
#'scrapy_proxies.RandomProxy': 100,
#'scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware': 110,
}
# 如果使用了scrapy_proxies.RandomProxy
#PROXY_LIST = '/path/to/proxy/list'
#PROXY_MODE = 0
#CUSTOM_PROXY = 'http://host:port'
# 这里的pipeline全局生效, 每个spider通过custom_settings配置pipeline, 会覆盖掉全局的
ITEM_PIPELINES = {
'scrapy_redis.pipelines.RedisPipeline': 300,
}
IMAGES_STORE = 'images'
IMAGES_MIN_HEIGHT = 110 # 图片高度限制
IMAGES_MIN_WIDTH = 110 # 图片宽度限制
IMAGES_EXPIRES = 30 #设置图片失效的时间
# 缩略图
#IMAGES_THUMBS = {
# 'small': (50, 50),
# 'big': (270, 270),
#}
EXTENSIONS = {
'scrapy.extensions.statsmailer.StatsMailer': 500,
}
#STATSMAILER_RCPTS = ['receiver@domain.com']
MAIL_FROM = 'scrapy@domain.com'
MAIL_HOST = 'smtp.domain.com'
MAIL_PORT = 25
MAIL_USER = 'username'
MAIL_PASS = 'password'
MAIL_TLS = False
MAIL_SSL = False
MONGODB_URI = 'mongodb://localhost:27017'
MONGODB_DATABASE = 'common'
# 配置url去重规则
DUPEFILTER_CLASS = 'myspider.lib.custom_filters.CustomURLFilter'
USER_AGENTS = [
'Mozilla/5.0 (iPhone; CPU iPhone OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13B143 Safari/601.1',
'Mozilla/5.0 (Linux; U; Android 2.3.6; en-us; Nexus S Build/GRK39F) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1',
'Mozilla/5.0 (Linux; Android 7.0; LON-AL00 Build/HUAWEILON-AL00; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043221 Safari/537.36 V1_AND_SQ_7.0.0_676_YYB_D QQ/7.0.0.3135 NetType/4G WebP/0.3.0 Pixel/1440',
'Mozilla/5.0 (Linux; U; Android 5.1.1; zh-CN; R7Plusm Build/LMY47V) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/40.0.2214.89 UCBrowser/11.5.2.942 Mobile Safari/537.36',
'Mozilla/5.0 (Linux; Android 5.0.2; Redmi Note 2 Build/LRX22G; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043221 Safari/537.36 V1_AND_SQ_7.0.0_676_YYB_D QQ/7.0.0.3135 NetType/WIFI WebP/0.3.0 Pixel/1920',
'Mozilla/5.0 (Linux; Android 6.0; HUAWEI CRR-UL00 Build/HUAWEICRR-UL00; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043220 Safari/537.36 MicroMessenger/6.5.7.1041 NetType/4G Language/zh_CN',
'Mozilla/5.0 (Linux; Android 6.0.1; vivo X9Plus Build/MMB29M; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043221 Safari/537.36 V1_AND_SQ_7.0.0_676_YYB_D QQ/7.0.0.3135 NetType/4G WebP/0.3.0 Pixel/1080',
'Mozilla/5.0 (iPhone 92; CPU iPhone OS 10_3_2 like Mac OS X) AppleWebKit/603.2.4 (KHTML, like Gecko) Version/10.0 MQQBrowser/7.4.1 Mobile/14F89 Safari/8536.25 MttCustomUA/2 QBWebViewType/1 WKType/1',
'Mozilla/5.0 (Linux; U; Android 5.1; zh-CN; PRO 5 Build/LMY47D) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/40.0.2214.89 UCBrowser/11.5.2.942 Mobile Safari/537.36',
'Mozilla/5.0 (Linux; Android 5.1.1; OPPO A53 Build/LMY47V; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043220 Safari/537.36 MicroMessenger/6.5.8.1060 NetType/4G Language/zh_CN',
'Mozilla/5.0 (Linux; U; Android 6.0.1; zh-cn; MI MAX Build/MMB29M) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.146 Mobile Safari/537.36 XiaoMi/MiuiBrowser/8.7.8'
]
# 以下配置和scrapy-redis相关
#SCHEDULER = "scrapy_redis.scheduler.Scheduler" #使用scrapy_redis重新实现的调度器
#DUPEFILTER_CLASS = "scrapy_redis.dupefilter.RFPDupeFilter" #使用scrapy_redis重新实现的request去重策略
#SCHEDULER_SERIALIZER = "scrapy_redis.picklecompat" #python3 里面不支持json/msgpack
#SCHEDULER_PERSIST = False #默认为False, 表示爬虫关闭时调度器会清空redis中去重队列和调度池
#SCHEDULER_QUEUE_CLASS = 'scrapy_redis.queue.PriorityQueue' #优先级队列
#SCHEDULER_QUEUE_CLASS = 'scrapy_redis.queue.FifoQueue' #队列
#SCHEDULER_QUEUE_CLASS = 'scrapy_redis.queue.LifoQueue' #堆栈
#SCHEDULER_IDLE_BEFORE_CLOSE = 10 #空闲等待时间, 仅当scheduler_queue_class为spiderQueue或spiderStack时生效
#REDIS_ITEMS_KEY = '%(spider)s:items' #item pipeline 序列化并存储在redis中的key
#REDIS_ITEMS_SERIALIZER = 'json.dumps'
#REDIS_HOST = 'localhost'
#REDIS_PORT = 6379
#REDIS_URL = 'redis://user:pass@hostname:6379' #该项设置优先于redis_host和redis_port
#REDIS_URL = 'redis://localhost:6379'
#REDIS_PARAMS = {} #redis额外参数, 如timeout/socket
#REDIS_START_URLS_AS_SET = False #若为true, 则使用sadd/spop操作, 可对start urls去重
#REDIS_START_URLS_KEY = '%(name)s:start_urls'
#REDIS_ENCODING = 'utf-8'
#DEFAULT_REQUEST_HEADERS = {} #覆盖默认请求头
#USER_AGENT = 'scrapy-redis (+https://github.com/rolando/scrapy-redis)' #默认ua
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 27 22:12:22 2019
@author: HP
"""
import math
import heapq
n,k=input().split()
n=int(n)
k=int(k)
A = [int(x) for x in input().split()]
C=[]
for i in range(1,n):
small=math.inf
for j in range(i-k,i):
if j>=0:
if C[j]<small:
small=C[j]
C.append(small*A[i])
print(C[n-1]%(1000000007)) |
from command_interface import Command
from receivers import *
class SandwichCommand(Command):
"""
A concrete / specific Command class, implementing exectue()
which calls a specific or an appropriate action of a method
from a Receiver class.
Args:
lunch (Lunch): Receiver class to be attached to the command
"""
def __init__(self, sandwich: Sandwich):
self._sandwich = sandwich
def execute(self):
self._sandwich.make_sandwich()
class SaladCommand(Command):
def __init__(self, salad: Salad):
self._salad = salad
def execute(self):
self._salad.make_salad()
class TacoCommand(Command):
def __init__(self, taco: Taco):
self._taco = taco
def execute(self):
self._taco.make_taco() |
# -*- coding: ms949 -*-
from sklearn.datasets.samples_generator import make_blobs
import matplotlib.pylab as plt
from sklearn.cluster import KMeans
X, Y = make_blobs(n_samples=300, centers=4, cluster_std=0.60, random_state=0)
kmeans = KMeans(n_clusters=4) # n_clusters: 군집의 개수
kmeans.fit(X)
y_kmeans = kmeans.predict(X)
print(X)
print(y_kmeans)
plt.scatter(X[:,0], X[:,1], c=y_kmeans, s=50)
centers = kmeans.cluster_centers_
plt.scatter(centers[:,0], centers[:,1],
c='red', s=200, alpha=0.5) # center값 설정; c: 색, s: 사이즈, alpha: 투명도
plt.show() |
n = int(input())
arr = list(map(int,input().strip().split()))[:n-1]
sum1 = sum(arr)
sum2 = (n*(n+1))//2
ans = sum2 - sum1
print(ans) |
# -*- encoding: utf-8 -*-
"""
Topic:定义数据库模型
"""
from sqlalchemy.engine.url import URL
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine, Column, Integer, String, Text, DateTime
from settings import DATABASE
def db_connect():
"""
连接数据库
:return:
"""
return create_engine(URL(**DATABASE))
def create_tables(engine):
"""
根据找到Base的子类,并在数据库中创建
:param engine:
:return:
"""
Base.metadata.create_all(engine)
Base = declarative_base()
class ArticleRule(Base):
"""
文章爬取规则表
"""
__tablename__ = 'article_rule'
id = Column(Integer, primary_key=True)
# 规则名
name = Column(String(30))
# 写入的表名
write_to = Column(String(50))
# 域名列表
allow_domains = Column(String(255))
# 开始链接列表
start_urls = Column(String(255))
# 下一页的xpath
next_page = Column(String(255))
# 文章链接正则表达式(子串)
allow_url = Column(String(255))
# 文章链接提取区域xpath
extract_from = Column(String(255))
# 文章标题xpath
title_xpath = Column(String(255))
# 文章内容xpath
content_xpath = Column(String(255))
class Article(Base):
"""
文章表
"""
__tablename__ = ''
id = Column(Integer, primary_key=True)
title = Column(String(255))
content = Column(Text)
class IncrementVerify(Base):
"""
增量爬取验证表
"""
__tablename__ = 'increment_verify'
id = Column(Integer, primary_key=True)
crawler_name = Column(String(255))
crawler_md = Column(String(32))
|
# -*- coding: utf-8 -*-
# @Time : 2019/5/16 1:12 PM
# @Author : Shande
# @Email : seventhedog@163.com
# @File : __init__.py.py
# @Software: PyCharm
from flask import Blueprint
pcadmin = Blueprint('pcadmin', __name__)
import app.Tvzhijian.pcadmin.pc_admin
|
#!/usr/bin/python
class Heap(object):
def build(self, elements, typ):
i = len(elements)/2 - 1
while i >= 0:
self.heapify(elements, i, typ)
i -= 1
def heapify(self, elements, index, typ):
mIndex = self.getMIndex(elements, index, typ)
if mIndex != index:
temp = elements[index]
elements[index] = elements[mIndex]
elements[mIndex] = temp
self.heapify(elements, mIndex, typ)
def getMIndex(self, elements, index, typ):
leftIndex = 2*index + 1
rightIndex = 2*index + 2
length = len(elements)
erIndex = 0
mIndex = 0
if rightIndex < length and leftIndex < length:
if self.compareOps(elements[rightIndex], elements[index], typ):
erIndex = rightIndex
else:
erIndex = index
if self.compareOps(elements[leftIndex], elements[erIndex], typ):
mIndex = leftIndex
else:
mIndex = erIndex
elif leftIndex < length and rightIndex >= length:
if self.compareOps(elements[leftIndex], elements[index], typ):
mIndex = leftIndex
else:
mIndex = index
else:
mIndex = index
return mIndex
def compareOps(self, op1, op2, typ):
if typ == 0:
return op1 < op2
else:
return op1 > op2
def removeTop(self, elements, typ):
maxVal = elements[0]
elements[0] = elements[len(elements)-1]
elements.pop()
self.heapify(elements, 0, typ)
# MinHeap
minHeap = Heap()
elements = [10,9,8,7,6,5,4,3,2,1]
minHeap.build(elements, 0)
print(elements)
for i in range(9):
minHeap.removeTop(elements, 0)
print(elements)
# MaxHeap
maxHeap = Heap()
elements = [1,2,3,4,5,6,7,8,9,10]
maxHeap.build(elements, 1)
print(elements)
for i in range(9):
maxHeap.removeTop(elements, 1)
print(elements) |
from django.views.generic import View
from django.shortcuts import render
from django.contrib.admin.models import LogEntry, ADDITION
from django.contrib.contenttypes.models import ContentType
from blog.models import Post
from events.models import Event
from songs.models import Song
def get_or_none(model, **kwargs):
try:
return model.objects.get(**kwargs)
except model.DoesNotExist:
return None
class IndexView(View):
template_name = "frontpage/index.html"
post_count = 2
song_count = 10
def get(self, request):
song_type = ContentType.objects.get(app_label="songs", model="song")
entries = LogEntry.objects.filter(content_type=song_type, action_flag=ADDITION).order_by("-action_time")[:IndexView.song_count]
songs = [(x.action_time, get_or_none(Song, pk=x.object_id)) for x in entries if get_or_none(Song, pk=x.object_id) != None]
return render(
request,
self.template_name,
{
'posts': Post.objects.all().order_by('-date')[0:IndexView.post_count],
'events': Event.current.all(),
'songs': songs,
}
)
|
class simple:
def __init__(self):
print("constructor called,".format(id(self)))
a = simple()
|
# Generated by Django 3.0.6 on 2020-05-16 20:49
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('todos', '0002_auto_20200516_2046'),
]
operations = [
migrations.RemoveField(
model_name='todo',
name='done',
),
]
|
#!/usr/bin/env python3
import io
import csv
import xlrd
import utils
def download():
utils.download_file('https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3534468/bin/' +
'supp_amiajnl-2012-000935_amiajnl-2012-000935supp_table2.xls',
'../data/pmid_22647690/supp_amiajnl-2012-000935_amiajnl-2012-000935supp_table2.xls')
def convert_to_csv():
workbook = xlrd.open_workbook('../data/pmid_22647690/supp_amiajnl-2012-000935_amiajnl-2012-000935supp_table2.xls')
sheet = workbook.sheet_by_index(0)
with io.open('../data/pmid_22647690/supp_amiajnl-2012-000935_amiajnl-2012-000935supp_table2.csv', 'w', newline='',
encoding='utf-8') as f:
c = csv.writer(f)
row_iter = iter(sheet.get_rows())
# Skip first three rows
next(row_iter)
next(row_iter)
next(row_iter)
for r in row_iter:
c.writerow([str(cell.value).strip() for cell in r][0:9])
def map_to_drugbank():
matched_pairs = []
matched_triples = []
existing_pairs = set()
unmapped_names = set()
total = 0
duplicated = 0
with io.open('../data/pmid_22647690/supp_amiajnl-2012-000935_amiajnl-2012-000935supp_table2.csv', 'r',
encoding='utf-8') as f:
reader = csv.reader(f, delimiter=',', quotechar='"')
next(reader, None)
# 0 - number
# 1 - drug A
# 2 - Effect interaction drugA-drugB
# 3 - drug B
# 4 - Similar_drug
# 5 - Tanimoto coefficient (TC)
# 6 - Name-interaction
# 7 - Procedence
# 8 - Interaction explanation
for row in reader:
row = [x.strip() for x in row[0:9]]
total += 1
id1 = utils.name_to_drugbank_id(row[1])
id2 = utils.name_to_drugbank_id(row[3])
id3 = utils.name_to_drugbank_id(row[4])
if id1 is None:
unmapped_names.add(row[1])
if id2 is None:
unmapped_names.add(row[3])
if id3 is None:
unmapped_names.add(row[4])
direction = row[7].lower()
# 0 - number
# 1 - Drugbank A
# 2 - Drugbank B
# 3 - Drugbank Similar_drug
# 4 - drug A
# 5 - drug B
# 6 - Similar_drug
# 7 - Name-interaction
# 8 - Procedence
# 9 - Tanimoto coefficient (TC)
# 10 - Effect interaction drugA-drugB
# 11 - Interaction explanation
output = [row[0], id1, id2, id3, row[1], row[3], row[4], row[6], row[7], row[5], row[2], row[8]]
if id3 is not None:
id_partner = id1 if direction == 'interaction-a' else (id2 if direction == 'interaction-b' else None)
if id_partner is not None:
if (id3, id_partner) in existing_pairs or (id_partner, id3) in existing_pairs:
duplicated += 1
continue
else:
existing_pairs.add((id3, id_partner))
matched_pairs.append(output)
if id2 is not None and id1 is not None:
matched_triples.append(output)
header = ['number', 'Drugbank A', 'Drugbank B', 'Drugbank Similar_drug', 'drug A', 'drug B', 'Similar_drug',
'Name-interaction', 'Procedence', 'Tanimoto coefficient (TC)', 'Effect interaction drugA-drugB',
'Interaction explanation']
with io.open('../data/pmid_22647690/supp_amiajnl-2012-000935_amiajnl-2012-000935supp_table2_pairs.csv', 'w',
newline='', encoding='utf-8') as f:
writer = csv.writer(f, delimiter=',', quotechar='"')
writer.writerow(header)
for row in matched_pairs:
writer.writerow(row)
with io.open('../data/pmid_22647690/supp_amiajnl-2012-000935_amiajnl-2012-000935supp_table2_triplets.csv', 'w',
newline='', encoding='utf-8') as f:
writer = csv.writer(f, delimiter=',', quotechar='"')
writer.writerow(header)
for row in matched_triples:
writer.writerow(row)
with io.open('../data/pmid_22647690/unmapped_names.csv', 'w', newline='', encoding='utf-8') as f:
writer = csv.writer(f, delimiter=',', quotechar='"')
for row in unmapped_names:
writer.writerow([row])
# Matched, Duplicated, Unmatched
return [len(matched_pairs), duplicated, total - duplicated - len(matched_pairs)]
def process() -> [int]:
convert_to_csv()
return map_to_drugbank()
def get_all_interaction_pairs() -> []:
result = []
with io.open('../data/pmid_22647690/supp_amiajnl-2012-000935_amiajnl-2012-000935supp_table2_pairs.csv', 'r',
encoding='utf-8') as f:
reader = csv.reader(f, delimiter=',', quotechar='"')
next(reader, None)
for row in reader:
procedence = row[8].lower()
id_partner = row[1] if procedence == 'interaction-a' else (
row[2] if procedence == 'interaction-b' else None)
name_partner = row[4] if procedence == 'interaction-a' else (
row[5] if procedence == 'interaction-b' else None)
id3 = row[3]
result.append([id3, row[6], id_partner, name_partner, float(row[9])])
return result
|
from django.urls import path
from .views import index_view,account_of_user,profile_change,details_update,user_todo,change_details,history,view,completed, about
urlpatterns = [
path('', index_view, name = 'Home'),
path('account/', account_of_user, name = 'Account'),
path('updateprofile/', profile_change, name = 'Updateprofile'),
path('updatedetails/', details_update, name='Updatedata'),
path('add/', user_todo, name = 'Todo'),
path('changedetails/<int:item_id>/', change_details, name='Change'),
path('history/', history, name = 'History'),
path('view/<int:item_id>/', view, name = 'View'),
path('complete/<int:item_id>/', completed, name = 'Complete'),
path('about/', about, name = 'About')
]
#app name
app_name = 'home' |
Pn = n! – «число перестановок» из n различных знаков (т.е. знаки нельзя повторять)
m^n – число наборов по n из m знаков, если знаки можно повторять
Обобщение: Сколькими способами из n элементов можно выбрать m, учитывая, что
сначала выбираем первую фигуру, потом – вторую и т.д.:
n!/(n-m)! - («Число размещений из n по m»
Обобщение: Сколькими способами из n элементов можно выбрать m, не учитывая при
выборе порядок этих элементов?
n!/m!(n-m)! - («Число сочетаний из n по m»)
|
'''Compiles the NumPy ufuncs in `c/`.
To use from the command line, run the following script:
`python setup.py build_ext --inplace` and make sure you are in the `model`
directory.
'''
import numpy
from numpy.distutils.core import setup
from numpy.distutils.misc_util import Configuration
from os.path import join as path_join
from os import walk
def configuration(parent_package='', top_path=None):
config = Configuration('.', parent_package, top_path)
# Add all .c files from the `c` directory
np_srcs = []
for (dirpath, dirnames, filenames) in walk('c'):
np_srcs.extend([path_join(dirpath,fn) for fn in filenames if fn[-2:] == ".c"])
config.add_extension('numpyColorgorical', np_srcs, extra_compile_args=['-std=c99'])
return config
if __name__ == "__main__":
setup(configuration=configuration)
|
from roboclaw import *
def counterClockwise(speed):
M1Forward(speed, 128)
M2Forward(speed, 128)
M1Forward(speed, 129)
def clockwise(speed):
M1Backward(speed, 128)
M2Backward(speed, 128)
M1Backward(speed, 129)
def right():
M1Backward(35, 128)
M2Backward(35, 128)
M1Forward(70, 129)
def left():
M1Forward(35, 128)
M2Forward(35, 128)
M1Backward(70, 129)
def forward():
M1Forward(127, 128)
M2Backward(127, 128)
M1Forward(2, 129)
def back():
M1Backward(127, 128)
M2Forward(127, 128)
M1Forward(0, 129)
def stop():
M1Forward(0, 128)
M2Forward(0, 128)
M1Forward(0, 129)
def box():
forward()
time.sleep(1)
stop()
time.sleep(.3)
right()
time.sleep(1.5)
stop()
time.sleep(.3)
back()
time.sleep(.8)
stop()
time.sleep(.3)
left()
time.sleep(1.5)
stop()
time.sleep(.3)
stop()
def crazy():
M1Forward(120, 128)
M2Backward(120, 128)
M1Forward(2, 129)
time.sleep(.3)
M1Backward(60, 128)
M2Backward(60, 128)
M1Forward(120, 129)
time.sleep(.3)
M1Backward(120, 128)
M2Forward(120, 128)
M1Forward(0, 129)
time.sleep(.3)
M1Forward(60, 128)
M2Forward(60, 128)
M1Backward(120, 129)
time.sleep(.3)
stop()
|
# -*- coding: utf-8 -*-
from django.contrib import admin
from app.rentacar.models import RentACar
class RentACarAdmin(admin.ModelAdmin):
model = RentACar
list_display = ['id', 'vehicle', 'customer', 'is_back']
admin.site.register(RentACar, RentACarAdmin)
|
__title__ = 'names'
__version__ = '0.0.1'
__licence__ = '????'
# Losely based on code developed by Trey Hunner at https://github.com/treyhunner/names
import os
from random import random
from bisect import bisect
class SingletonMetaClass(type):
def __init__(cls, name, bases, dict):
super(SingletonMetaClass, cls).__init__(name, bases, dict)
original_new = cls.__new__
def my_new(cls, *args, **kwds):
if cls.instance == None:
cls.instance = original_new(cls, *args, **kwds)
return cls.instance
cls.instance = None
cls.__new__ = staticmethod(my_new)
class NameGenerator(object):
__module__ = SingletonMetaClass
def __init__(self, data_dir=None):
"""
Read data from the
:param data_dir: The directory containing the data files
:return:
"""
self._last_file_name = 'dist.all.last'
self._female_file_name = 'dist.female.first'
self._male_file_name = 'dist.male.first'
self._female_names = []
self._male_names = []
self._last_names = []
self.data_dir = None
if data_dir is not None:
self.set_data_dir(data_dir)
def set_data_dir(self, data_dir=None):
"""
Set the directory/folder that the data files reside in.
:param data_dir: The directory
"""
if data_dir is None:
self.data_dir = os.getcwd()
else:
self.data_dir = data_dir
def get_data_dir(self):
"""
Get the full path to the directory containing the data files.
:return os.path: path to the data files or None if it is not set
"""
return self.data_dir
def load_data(self):
"""
If creating a number of random names it will be more time efficient then re-reading the
data files over and over again. However, it will use up more memory so it might be better
to read the files depending on the purpose.
"""
if self.data_dir is None:
raise Exception('Name Creator requires the data directory to be set.')
self._last_names = self._file_reader(os.path.join(self.data_dir, self._last_file_name))
self._female_names = self._file_reader(os.path.join(self.data_dir, self._female_file_name))
self._male_names = self._file_reader(os.path.join(self.data_dir, self._male_file_name))
def _file_reader(self, file_path):
"""
Read the given file and store the "name" and 'cummulative score'
:param file_name:
:return: dict{'last': <last name>, 'first': <first name>}
"""
data = {}
data['names'] = []
data['cumulatives'] = []
with open(file_path) as fp:
prev_cumulative_frequency = -1
for line in fp:
name, frequency, cumulative_frequency, rank, = line.split()
data['names'].append(name)
if int(prev_cumulative_frequency*1000) == float(cumulative_frequency)*1000:
# If the values match up to three decimal places then add .01 to the
# cumulative percentage
cumulative_frequency = prev_cumulative_frequency + 0.0001
data['cumulatives'].append(float(cumulative_frequency))
prev_cumulative_frequency = float(cumulative_frequency)
return data
def get_full_name(self, gender=None):
"""
Generate a random name of the specified gender, if one is given. Otherwised the
gender is chosen randomly
:param gender str: One of 'male', 'm', 'female', or 'f'
:return: dict{ 'last', <last name>, 'first', <first name>}
"""
if gender is None:
gender = random.choice(('male', 'female'))
if gender.lower() == 'female' or gender.lower() == 'f':
gender = 'F'
elif gender.lower() == 'male' or gender.lower() == 'm':
gender = 'M'
else:
raise Exception('Invalid gender provided: ' + str(gender))
assert(gender in ['F', 'M'])
last_name = first_name = ''
if self._last_names is None or self._female_names is None or self._male_names is None:
last_name = self._get_name_from_file(os.path.join(self.data_dir, self._last_file_name))
if gender == 'F':
first_name = self._get_name_from_file(os.path.join(self.data_dir, self._female_file_name))
else:
first_name = self._get_name_from_file(os.path.join(self.data_dir, self._male_file_name))
else:
last_name = self._get_name_from_data(self._last_names)
if gender == 'F':
first_name = self._get_name_from_data(self._female_names)
else:
first_name = self._get_name_from_data(self._male_names)
return { 'last': last_name,
'first': first_name}
def _get_name_from_file(self, file_name):
"""
Read the contents of the given file to find the name distribution. From
the distribution randomly pick a value.
:param file_name: The file that is to be read for data
:return [last_name, first_name]: The last name and first name that have been randomly selected
"""
# TODO check the format of the file before reading the data
names = []
cumulative_scores = []
with open(file_name) as fp:
for line in fp:
name, percentage, cumulative_percentage, rank = line.split()
names.append(name)
cumulative_scores.append(cumulative_percentage)
max = float(cumulative_scores[-1])
random_value = random() * max
index = bisect(cumulative_scores, random_value)
return names[index]
def _get_name_from_data(self, data):
"""
Obtain the random name for the list of names provided.
:param data{}: Dictionary with two keys: 'names' and "cumulatives'
These lists are expected to be the same length. The cumulative values
are the cumulative percentages read from the data file
:return [last_name, first_name]: The last name and first name that have been randomly selected
"""
names = data['names']
cumulative_scores = data['cumulatives']
max = float(cumulative_scores[-1])
random_value = random() * max
index = bisect(cumulative_scores, random_value)
return names[index]
|
import numpy as np
import numpy.ma as ma
import matplotlib.pyplot as plt
from scipy import ndimage, signal
import hylite
from hylite import HyHeader
import hylite.reference.features as ref
from hylite.hyfeature import HyFeature, MultiFeature, MixedFeature
from matplotlib.ticker import AutoMinorLocator
class HyData(object):
"""
A generic class for encapsulating hyperspectral (points and images), and associated metadata such as georeferencing, bands etc.
This class is based around a data numpy array containing the hyperspectral data in a representation such that the last dimension
corresponds to individual bands (e.g. data[pointID, band] or data[px,py,band]). Note that the data array can be empty!
"""
@classmethod
def to_grey(cls, data):
"""
Return a copy of the specified data array as uint8 greyscale. Useful for OpenCV operations, compression or mapping to RGB.
"""
return np.uint8(255 * (data - np.nanmin(data)) / (np.nanmax(data) - np.nanmin(data)))
#####################################
## Instance methods
#####################################
def __init__(self, data, **kwds):
"""
Create an image object from a data array.
*Arguments*:
- data = a numpy array such that the last dimension
corresponds to individual bands (e.g. data[pointID, band] or data[px,py,band])
*Keywords*:
- header = associated header file. Default is None (create a new header).
"""
#copy reference to data. Note that this can be None!
self.data = data
if not data is None:
self.dtype = data.dtype
else:
self.dtype = None
# header data
self.set_header(kwds.get('header', None))
def __getitem__(self, key):
"""
Expose underlying data array when using [ ] operators
"""
return self.data.__getitem__(key)
def __setitem__(self, key, value):
"""
Expose underlying data array when using [ ] operators
"""
self.data.__setitem__(key, value)
def copy(self, data=True):
"""
Make a deep copy of this image instance.
*Arguments*:
- data = True if a copy of the data should be made, otherwise only copy header.
*Returns*
- a new HyData instance.
"""
if not data or self.data is None:
return HyData( None, header=self.header.copy())
else:
return HyData( self.data.copy(), header=self.header.copy())
def set_header(self, header):
"""
Loads associated header data into self.header.
Arguments:
- header = a HyHeader object or None.
"""
#no header - create one
if header is None:
self.header = HyHeader()
return
#set the header
self.header = header
def push_to_header(self):
"""
Update header data to match this hyperspectral data.
"""
self.header['samples'] = str(self.samples())
self.header['bands'] = str(self.band_count())
self.header['lines'] = str(self.lines())
#############################################
##Expose important parts of the header file
#############################################
def has_wavelengths(self):
"""
True if the header data contains wavelength information.
"""
return self.header.has_wavelengths()
def get_wavelengths(self):
"""
Get the wavelength that corresponds with each band of this image, as stored in the .hdr file.
"""
return self.header.get_wavelengths()
def has_band_names(self):
"""
True if the header data contains band names.
"""
return self.header.has_band_names()
def get_band_names(self):
"""
Return band names as defined in the header file.
"""
return self.header.get_band_names()
def has_fwhm(self):
"""
True if the header data contains band names.
"""
return self.header.has_fwhm()
def get_fwhm(self):
"""
Return band names as defined in the header file.
"""
return self.header.get_fwhm()
def set_wavelengths(self, wavelengths):
"""
Set the wavelengths associated with this hyperspectral data
"""
if not wavelengths is None:
assert len(wavelengths) == self.band_count(), "Error - wavelengths must be specified for each band."
self.header.set_wavelengths(wavelengths)
def set_band_names(self, names ):
"""
Set the band names associated with this hyperspectral data
"""
if not names is None:
assert len(names) == self.band_count(), "Error - band names must be specified for each band."
self.header.set_band_names( names )
def set_fwhm(self, fwhm):
"""
Set the band widths associated with this hyperspectral data
"""
if not fwhm is None:
assert len(fwhm) == self.band_count(), "Error - wavelengths must be specified for each band."
self.header.set_fwhm(fwhm)
def is_image(self):
"""
Return true if this dataset is an image (i.e. data array has dimension [x,y,b]).
"""
if self.data is None: # for point clouds data can be none
return False
return len(self.data.shape) == 3
def is_point(self):
"""
Return true if this dataset is an point cloud or related dataset (i.e. data array has dimension [idx,b]). Note
that this will return true for spectral libraries and other 'cloud like' datasets.
"""
if self.data is None: # for point clouds data can be none
return True
return len(self.data.shape) == 2
def is_classification(self):
return 'classification' in self.header['file type'].lower()
###################################
## Data dimensions and properties
###################################
def band_count(self):
"""
Return the number of bands in this dataset.
"""
if self.data is None: return 0
return self.data.shape[-1]
def samples(self):
"""
Return number of samples in this dataset. For 2D data (images) this is the image height (number of pixels in a line
scanner). For 1D data (point clouds) this is 1 (each point is like an individual sample).
"""
if self.data is None:
return 0
return self.data.shape[0]
def lines(self):
"""
Return number of lines in this dataset. For 2D data (images) this is the image height. For 1D data (point clouds)
this is 1.
"""
if self.data is None:
return 0
if len(self.data.shape) > 2:
return self.data.shape[1]
else:
return 1
def is_int(self):
"""
Return true if this dataset contains data with integer precision.
"""
return np.issubdtype( self.data.dtype, np.integer )
def is_float(self):
"""
Return true if this dataset contains data with floating point precision.
"""
return np.issubdtype( self.data.dtype, np.floating )
#############################
## masking and band removal
#############################
def export_bands(self, bands ):
"""
Export a specified band range to a new HyData instance.
*Arguments*:
- bands = either:
(1) a tuple containing the (min,max) wavelength to extract. If range is a tuple, -1 can be used to specify the
first or last band index.
(2) a list of bands or boolean mask such that image.data[:,:,range] is exported to the new image.
"""
# wrap individual integers or floats in a list
if isinstance(bands, int) or isinstance(bands, float):
bands = [bands] # wrap in list
# calculate bands to remove
mask = np.full( self.band_count(), True )
if isinstance(bands, np.ndarray) and bands.dtype == np.bool:
# mask is a numpy array containing True for bands to keep, so flip it to get bands to remove.
mask = np.logical_not(bands)
elif isinstance(bands, tuple) and len(bands) == 2:
# get indices of bands to keep and flag these as False in mask.
if bands[0] == -1: bands = (0, bands[1])
if bands[1] == -1: bands = (bands[0], self.band_count())
mn = self.get_band_index(bands[0])
mx = self.get_band_index(bands[1])
# calculate mask
mask[ mn:mx ] = False
else:
# check bands are all indices and flag these bands as False in mask.
bands = list(bands)
for i, b in enumerate(bands):
if isinstance(b,float) or isinstance(b,str):
bands[i] = self.get_band_index(b)
mask[bands] = False
# check that we're leaving at least one band....
assert (mask==False).any(), "Error - cannot export image with no bands."
# quick exit if we are exporting all bands
if (mask==False).all(): # no bands are masked
return self.copy()
# copy this dataset
subset = self.copy()
subset.header.drop_bands(mask) # apply mask to header data
subset.data = np.zeros( self.data[..., np.logical_not(mask) ].shape, dtype=self.data.dtype )
subset.data[...] = self.data[..., np.logical_not(mask)]
# special case for spectral libraries
if hasattr(self, 'upper'):
if self.upper is not None:
subset.upper = self.upper[..., np.logical_not(mask)]
if hasattr(self, 'lower'):
if self.lower is not None:
subset.lower = self.lower[..., np.logical_not(mask)]
return subset
def resample(self, wavelengths):
"""
Returns a new image resampled to the specified list of wavelengths. Note that this
simply uses a nearest neighbour resampling, so chooses the closest band matching each
wavelength. No averaging will be performed using this method - for more advanced resampling see
hylite.filter.sample.
Also note that to avoid confusion, the original wavelengths will be preserved rather than
overwritten. Also note that bands will not be duplicated, so the number of bands returned MAY NOT equal
the number of wavelengths provided!
*Arguments*:
- wavelengths = the wavelengths (list of floats) to resample to. MUST be in ascending order.
*Returns*:
- a resampled HyData instance
"""
out = []
for w in wavelengths:
out.append( self.get_band_index(w) )
out = self.export_bands(out)
return out
def delete_nan_bands(self, inplace=True):
"""
Remove bands in this image that contain only nans.
*Arguments*:
- inplace = True if this operation should be applied to the data in situ. Default is True.
*Returns*:
- an image copy with the nan bands removed IF inplaces is False. Otherwise the image is modified inplace.
"""
if len(self.data.shape) == 3: #hyImage
cpy = self.export_bands(np.isfinite(self.data).any(axis=(0, 1))) # remove bands that are all nan
else: #hycloud
assert len(self.data.shape) == 2, "Weird error?"
cpy = self.export_bands(np.isfinite(self.data).any(axis=0)) # remove bands that are all nan
if inplace:
self.header = cpy.header
self.data = cpy.data
else:
return cpy
def set_as_nan(self, value):
"""
Sets data with the specified value to NaN. Useful for handling no-data values.
*Arguments*:
- value = the value to (permanently) replace with np.nan.
"""
if self.is_int():
nan = int(self.header.get("data ignore value", 0))
self.data[ self.data == value ] = nan
self.header["data ignore value"] = str(nan)
else:
self.data[self.data == value] = np.nan
def mask_bands(self, mn, mx=None, val=np.nan):
"""
Masks a specified range of bands, useful for removing water features etc.
*Arguments*:
- min = the start of the band mask (as per get_band_index(...)).
- max = the end of the band mask (as per get_band_index( ... )). Can be None to mask individual bands. Default is
None.
- val = the value to set masked bands to. Default is np.nan. Set to None to keep values but flag bands in band band list.
"""
if mx is None:
self.data[..., self.get_band_index(mn)] = np.nan
return
elif mx == -1:
mx = self.band_count()
# update bad band list
bbl = np.full( self.band_count(), True )
if self.header.has_bbl():
bbl = self.header.get_bbl()
bbl[..., self.get_band_index(mn): self.get_band_index(mx)] = False
self.header.set_bbl( bbl )
if not val is None:
self.data[..., self.get_band_index(mn): self.get_band_index(mx)] = val
def mask_water_features(self, **kwds):
"""
Removes typical water features. By default this removes bands between:
- 960 - 990 nm
- 1320 - 1500 nm
- 1780 - 2050 nm
- 2400 - 2500 nm
Custom wavelengths can be set using the mask keyword.
*Keywords*:
- mask = mask custom bands. This should be a list of tuple band indices or wavelengths containing the
minimum and maximum wavelenght/index of each region to mask.
"""
default = [(960.0, 990.0), (1320.0, 1500.0), (1780.0, 2050.0), (2400.0, 2502.0)]
bands = kwds.get("mask", default)
# mask bands
for mn, mx in bands:
try:
self.mask_bands(mn, mx)
except:
pass # ignore errors associated with out of range etc.
# delete/export
#########################
## band getters/setters
#########################
def get_band(self, b):
"""
Gets an individual band from this dataset. If an integer is passed it is treated as a band index. If a string is passed it is
treated as a band name. If a float is passed then the closest band to this wavelength is retrieved.
*Arguments*:
- b = the band to get. Integers are treated as indices, strings as band names and floats as wavelengths.
*Returns*:
- a sliced np.array exposing the band. Note that this is NOT a copy.
"""
return self.data[...,self.get_band_index(b)]
def get_band_grey(self, b):
"""
Returns the specified band as a uint8 greyscale image compatable with opencv.
"""
return HyData.to_grey( self.get_band(b) )
def get_raveled(self):
"""
Get the data array as a 2D array of points/pixels. NOTE: this is just a view of the original data array, so any
operations changes made to it will affect the original image. Useful for fast transformations!
*Returns*
- pixels = a list such that pixel[n][band] gives the spectra of the nth pixel.
"""
return self.data.reshape(-1, self.data.shape[-1])
def X(self):
"""
A shorthand way of writing get_raveled(), as X is conventionally used for a vector of spectra.
"""
return self.get_raveled()
def set_raveled(self, pix, shape=None):
"""
Fills the image/dataset from a list of pixels of the format returned by get_pixel_list(...). Note that this does not
copy the list, but simply stores a view of it in this image.
*Arguments*:
- pix = a list such that pixel[n][band] gives the spectra of the nth pixel.
- shape = the reshaped data dimensions. Defaults to the shape of the current dataset, except with auto-shape for the last dimension.
"""
if shape is None:
shape = list( self.data.shape )
shape[-1] = -1
self.data = pix.reshape(shape)
def get_band_index(self, w, **kwds):
"""
Get the band index that corresponds with the given wavelength or band name.
*Arguments*:
- w = the wavelength, index or band name search for. Note that if w is an integer it is treated
as a band index and simply returned. If it is a string then the index of the matching band name
is returned. If it is a wavelength then the closest band to this wavelength is returned.
*Keywords*:
- thresh = the threshold (in nanometers) within which a band must fall to be valid. Default is
hylite.band_select_threshold (which defaults to 10 nm). If a wavelength is passed and a
band exists within this distance, then it is returned. Otherwise an error is thrown).
*Returns*:
- the matching band index.
"""
thresh = kwds.get("thresh", hylite.band_select_threshold)
if isinstance(w, int): # already a valid band index
assert -self.band_count() <= w <= self.band_count(), "Error - band index %d is out of range (image has %d bands)." % (w, self.band_count())
if w < 0: #convert negative indices to positive ones
return self.band_count() + w
else:
return w
elif isinstance(w, str): # treat w as band name
assert w in self.get_band_names(), "Error - could not find band with name %s" % w
return int(self.get_band_names().index(w))
elif isinstance(w, float): # otherwise treat w as wavelength
wavelengths = self.get_wavelengths()
diff = np.abs( wavelengths - w)
assert np.nanmin(diff) <= thresh, "Error - no bands exist within %d nm of wavelength %f. Try increasing the 'thresh' keyword?" % (thresh, w)
return int(np.argmin(diff))
else:
assert False, "Error - %s is an unknown band descriptor type." % type(w)
def contiguous_chunks(self, p=75, min_size=0):
"""
Extract contiguous chunks of spectra, splitting a (1) completely nan bands or (2) large steps in wavelength.
*Arguments*:
- p = the percentile used to define a large change in wavelength. Default is 90. A "gap" is considered to be
a change in wavelength greater than double this percentile.
- min_size = the minimum number of bands required to consider a chunk valid. Default is 0 (return all chunks).
*Returns*:
- chunks = copies of the orignal data array that contain continuous spectra. At least one pixel/point
in each slice of these bans is guaranteed to be finite.
- wav = array containing the wavelengths corresponding to each band of each chunk.
"""
# find gaps in wavelength and/or completely nan bands and/or data ignore values
finite = np.isfinite(self.data).any(axis=tuple(range(len(self.data.shape) - 1))) # False = nans
finite = finite & (self.data != float(self.header.get("data ignore value", 0))).any(
axis=tuple(range(len(self.data.shape) - 1)))
assert len(self.get_wavelengths()) == len(
finite), "Error - hyperspectral dataset has %d bands but %d wavelengths." % (
len(finite), len(self.get_wavelengths()))
x = self.get_wavelengths()[finite]
dx = np.abs(np.diff(x))
maxstep = 2. * np.percentile(dx, p)
if not (dx >= maxstep).any(): # no gaps - just return contiguous block!
assert len(x) > min_size, "Error - total band count < min_size."
msk = [self.get_band_index(b) for b in x]
return [self.data[..., msk]], [x]
else:
break_start = list(np.argwhere(dx > maxstep)[:, 0])
break_end = list((-np.argwhere(np.abs(np.diff(x[::-1])) > maxstep)[:, 0])[::-1])
break_start.append(-1) # add end of dataset so we don't miss last chunk
break_end.append(-1) # add end of dataset so we don't miss last chunk
assert len(break_start) == len(
break_end), r"Error - weird shit is happening? [ useful error messages ftw ¯\_(ツ)_/¯ ]"
idx0 = 0
chunks = []
wav = []
for i in range(len(break_start)): # build chunks
W = x[idx0:break_start[i]]
msk = [self.get_band_index(b) for b in W]
if W.shape[-1] > min_size:
wav.append(W)
chunks.append(self.data[..., msk])
idx0 = break_end[i] # skip forwards
return chunks, wav
##################################
## Smoothing algorithms
###################################
def smooth_median(self, window=3):
"""
Applies running median filter on data.
*Arguments*:
- window = size of running window, must be int.
*Returns*: Nothing - overwrites data with smoothed result.
"""
assert isinstance(window, int), "Error - running window size must be integer."
if len(self.data.shape) == 3: # image data
self.data = ndimage.median_filter(self.data, size=(1, 1, window))
elif len(self.data.shape) == 2: # point cloud data
self.data = ndimage.median_filter(self.data, size=(1, window))
else:
assert False, "Error: Run_median does not work on %d-d data." % len(self.data.shape)
def smooth_savgol(self, window=5, poly=2, **kwds):
"""
Applies Savitzky-Golay-filter on data.
*Arguments*:
- window = size of running window, must be an odd integer.
- poly = degree of polynom, must be int.
*Keywords*: Keywords are passed to scipy.signal.savgol_filter(...).
*Returns*: A copy of the input dataset with smoothed spectra.
"""
assert isinstance(window, int), "Error - running window size must be integer."
# extract contiguous chunks
C, w = self.contiguous_chunks(min_size=window)
# do smoothing
kwds['window_length'] = window
kwds['polyorder'] = poly
kwds['axis'] = -1
for X in C:
mask = np.isfinite(X).all(axis=-1) # remove nans
X[mask, :] = signal.savgol_filter(X[mask, :], **kwds)
# return copy
out = self.copy(data=False)
if self.is_image():
out.data = np.dstack(C)
out.set_wavelengths(np.hstack(w))
else:
out.data = np.vstack(C)
out.set_wavelengths(np.hstack(w))
return out
###################################
# PLOTTING AND OTHER VISUALISATIONS
###################################
# noinspection PyDefaultArgument
def plot_spectra(self, ax=None, band_range=None, labels=None, indices=[], colours='blue', **kwds):
"""
Plots a summary of all the spectra in this dataset.
*Arguments*:
- ax = an axis to plot to. If None (default), a new axis is created.
- band_range = tuple containing the (min,max) band index (int) or wavelength (float) to plot.
- labels = Labels for spectral features such that labels[0] = [feat1,feat2,..] and labels[1] = [name1,name2,...]
can be passed. Pass None (default) to disable labels.
- indices = specific data point to plot. Should be a list containing index tuples, or an empty list if no pixels
should be plotted (Default).
- colours = a matplotlib colour string or list of colours corresponding to each index spectra. Default is 'blue'.
*Keywords*
- quantiles = True if summary quantiles of all pixels should be plotted. Default is True.
- median = True if the median spectra of all pixels should be plotted. Default is True.
- other keywords are passed to plt.plot( ... ).
"""
if ax is None:
fig, ax = plt.subplots(figsize=(18, 6))
# extract relevant range
subset = self
if band_range is not None:
subset = self.export_bands(band_range)
# ensure wavelengths are appropriate
if not subset.has_wavelengths() or len(subset.get_wavelengths()) != subset.band_count():
subset.set_wavelengths( np.arange( subset.band_count() ) )
# calculate and plot percentiles
quantiles = kwds.get("quantiles", True)
median = kwds.get("median", True)
if "quantiles" in kwds: del kwds['quantiles'] # remove keyword
if 'median' in kwds: del kwds['median'] # remove keyword
C, x = subset.contiguous_chunks()
for C, x in zip(C, x):
if quantiles or median: # calculate percentiles
percent = np.nanpercentile(C, axis=tuple(range(len(self.data.shape) - 1)),
q=[5, 25, 50, 75, 95]) # calculate percentiles
q5, q25, q50, q75, q95 = percent
if median: # plot median line
ax.plot(x, q50, color='k', label='median', **kwds)
if quantiles: # plot percentile areas
for lower, upper in zip([q5, q25], [q95, q75]):
_y = np.hstack([lower, upper[::-1]])
_x = np.hstack([x, x[::-1]])
ax.fill(_x[np.isfinite(_y)], _y[np.isfinite(_y)], color='grey', alpha=0.25)
# plot specific spectra
if isinstance(indices, tuple): indices = [indices]
for i,idx in enumerate(indices):
if isinstance(colours, list):
ax.plot(x, C[idx], color=colours[i], **kwds)
else:
ax.plot(x, C[idx], color=colours, **kwds)
# plot labels
if not labels is None: # plot labels?
# parse string labels
if isinstance(labels, str):
if 'silicate' in labels.lower(): # plot common minerals theme
labels = ref.Themes.CLAY
elif 'carbonates' in labels.lower(): # plot carbonate minerals
labels = ref.Themes.CARBONATE
elif 'ree' in labels.lower(): # plot REE features
labels = None # todo - add REE features
i = 0
for l in labels:
# plot label
if ax.get_xlim()[0] < l.pos < ax.get_xlim()[1]:
if isinstance(l, MultiFeature) or isinstance(l, MixedFeature):
l.quick_plot(ax=ax, method='fill+line', sublabel=i)
i += l.count()
else:
l.quick_plot(ax=ax, method='fill+line', label=i)
i += 1
# add grid x-ticks
ax.xaxis.set_minor_locator(AutoMinorLocator())
ax.grid(which='major', axis='x', alpha=0.75)
ax.grid(which='minor', axis='x', alpha=0.2)
ax.set_xlabel("Wavelength (%s)" % self.header.get('wavelength units', 'nm'))
#ax.set_ylabel("Reflectance") # n.b. not all images contain reflectance data...
return ax.get_figure(), ax
###################################
##DATA TRANSFORMS AND COMPRESSION
###################################
def compress(self):
"""
Convert data array to int16 to save memory.
"""
#no need to compress...
if self.data.dtype == np.uint16: return
assert np.nanmin(self.data) >= 0, "Error - to compress data range must be 0 - 1 but min is %s." % np.nanmin(self.data)
assert np.nanmax(self.data) <= 1.0, "Error - to compress data range must be 0 - 1 but max is %s." % np.nanmax(self.data)
#map to range 1 - 65535
self.data = 65535 * (self.data)
#map nans to zero
self.data[ np.logical_not( np.isfinite(self.data)) ] = 0
#convert data
self.data = self.data.astype(np.uint16)
#store min/max in header
self.header["data ignore value"] = str(0)
self.header['reflectance scale factor'] = str(65535)
def decompress(self):
"""
Expand data array to floats to get actual values
"""
if (np.nanmax(self.data) <= 1.0) and (np.nanmin(self.data) >= 0.0):
return # datset is already decompressed
# get min/max data
sf = float(self.header.get("reflectance scale factor", 65535))
nan = float(self.header.get("data ignore value", -1))
# expand data array to float32
self.data = self.data.astype(np.float32)
# set nans
self.data[ np.isnan(self.data) | (self.data == nan) ] = np.nan
# transform data back to original range
self.data = self.data / sf #scale to desired range
def normalise(self, minv=None, maxv=None):
"""
Normalizes individual data points to account for variations in illumination and overall reflectivity. This can be done
in two ways: if minv and maxv are both none, each pixel vector will be normalized to length 1. Otherwise, if minv
and maxv are specified, every data point is normalised to the average of the bands between minv and maxv.
*Returns*:
- the normalising factor used for each data point.
"""
# convert to float
dtype = self.data.dtype
self.data = self.data.astype(np.float32)
# normalise pixel vectors
if minv is None and maxv is None:
# get valid bands
valid = []
for b in range(self.band_count()):
if np.isfinite(self.get_band(b)).any() and not (self.get_band(b) == 0).all():
valid.append(b)
# easy!
nf = np.linalg.norm(self.data[..., valid], axis=-1).astype(np.float32)
self.data /= nf[..., None]
#normalise to band average
else:
# get minimum band to normalize over
minidx = self.get_band_index( minv )
maxidx = self.get_band_index( maxv )
if minidx > maxidx:
t = minidx
minidx = maxidx
maxidx = t
# normalize
nf = np.nanmean(self.data[..., minidx:maxidx], axis=-1).astype(np.float32)
with np.errstate(all='ignore'): # ignore div 0 errors etc.
self.data /= nf[..., None]
#convert back to original datatype
if np.issubdtype( dtype, np.integer ):
self.compress() #convert back to integer
return nf
def correct_spectral_shift(self, position):
"""
Corrects potential spectral sensor shifts by shifting the offset (right) part of the spectrum
:param position: Wavelength or band position of the first offset value - e.g. FENIX: band 714 or wavelength 976., respectively.
:return: None - changes data in place
"""
assert isinstance(position, int) or isinstance(position, float), "Error - shift position must be int (band number) or float (wavelength)."
if isinstance(position, float):
position = self.get_band_index(position)
self.data[..., position:] += (self.data[..., (position - 1)] - self.data[..., position])[..., None] |
# coding: utf-8
# In[1]:
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
import numpy as np
import matplotlib.pyplot as plt
# In[2]:
"""def make_batch(input_data, noise_data, batch_size):
index = np.arange(0, len(input_data))
np.random.shuffle(index)
index = index[:batch_size]
shuffled_input_data = [input_data[i] for i in index]
shuffled_noise_data = [noise_data[i] for i in index]
return np.asarray(shuffled_input_data), np.asarray(shuffled_noise_data)"""
def make_batch(noise_data, batch_size):
index = np.arange(0, len(noise_data))
np.random.shuffle(index)
index = index[:batch_size]
shuffled_noise_data = [noise_data[i] for i in index]
return np.asarray(shuffled_noise_data)
def make_noise_vector(batch_size, noise_size):
noise_vector = np.random.normal(size=(batch_size, noise_size))
return noise_vector
# In[3]:
class GAN(object):
def __init__(self, sess, batch_size):
self.sess = sess
self.batch_size = batch_size
self.Noise_Input_Data_Size = 128#32
self.Hidden_Layer1_Size_G = 256
self.Hidden_Layer1_Size_D = 256
self.Converted_Image_Size = 784
self.parameter()
self.model()
def parameter(self):
#input parameter
self.X = tf.placeholder(dtype=tf.float32, shape=[None, self.Converted_Image_Size])
self.Z = tf.placeholder(dtype=tf.float32, shape=[None, self.Noise_Input_Data_Size])
#generator parameter
self.W1_G = tf.Variable(tf.random_normal(dtype=tf.float32, shape=[self.Noise_Input_Data_Size, self.Hidden_Layer1_Size_G], stddev=0.01))
self.b1_G = tf.Variable(tf.zeros(dtype=tf.float32, shape=[self.Hidden_Layer1_Size_G]))
self.W2_G = tf.Variable(tf.random_normal(dtype=tf.float32, shape=[self.Hidden_Layer1_Size_G, self.Converted_Image_Size], stddev=0.01))
#discriminator parameter
self.W1_D = tf.Variable(tf.random_normal(dtype=tf.float32, shape=[self.Converted_Image_Size, self.Hidden_Layer1_Size_D], stddev=0.01))
self.b1_D = tf.Variable(tf.zeros(dtype=tf.float32, shape=[self.Hidden_Layer1_Size_D]))
self.W2_D = tf.Variable(tf.random_normal(dtype=tf.float32, shape=[self.Hidden_Layer1_Size_D, 1], stddev=0.01))
def generator(self, noise):
self.L1_G = tf.add(tf.matmul(noise, self.W1_G), self.b1_G)
self.Y1_G = tf.nn.relu(self.L1_G)
self.model_G = tf.nn.sigmoid(tf.matmul(self.Y1_G, self.W2_G))
return self.model_G
def discriminator(self, input_data):
self.L1_D = tf.add(tf.matmul(input_data, self.W1_D), self.b1_D)
self.Y1_D = tf.nn.relu(self.L1_D)
self.model_D = tf.nn.sigmoid(tf.matmul(self.Y1_D, self.W2_D))
return self.model_D
def model(self):
noise_data = make_noise_vector(self.batch_size, noise_size = self.Noise_Input_Data_Size)
self.G = self.generator(self.Z)
self.D_fake = self.discriminator(self.G)
self.D_real = self.discriminator(self.X)
D_var_list = [self.W1_D, self.b1_D, self.W2_D]
G_var_list = [self.W1_G, self.b1_G, self.W2_G]
self.cost_D = tf.reduce_mean(tf.log(self.D_real) + tf.log(1-self.D_fake))#maximize
self.Optimize_D = tf.train.AdamOptimizer(learning_rate=0.002).minimize(-self.cost_D, var_list = D_var_list)
#self.Optimize_D = tf.train.AdamOptimizer(learning_rate=0.001).minimize(-self.cost_D)
#self.cost_G = tf.reduce_mean(tf.log(self.D_real))#maximize
self.cost_G = tf.reduce_mean(tf.log(1-self.D_fake))#minimize
self.Optimize_G = tf.train.AdamOptimizer(learning_rate=0.002).minimize(self.cost_G, var_list = G_var_list)
#self.Optimize_G = tf.train.AdamOptimizer(learning_rate=0.001).minimize(-self.cost_G)
def training(self):
data_size = 55000
total_batch = int(data_size / self.batch_size)
#SAVE_PATH = "C:/Users/JAEKYU/Documents/Jupyter Notebook/Super_Resolution_/Weight/Weight.ckpt"
print("Session start")
self.sess.run(tf.global_variables_initializer())
for epoch in range(40):
for i in range(total_batch):
#batch 만들기
batch_X = mnist.train.next_batch(batch_size=self.batch_size)[0]#32,784
batch_Z = make_batch(make_noise_vector(self.batch_size, self.Noise_Input_Data_Size), self.batch_size)#32,10
#print("batch 생성 end")
#session run
#print("sesstion run start")
Opt_G, cost_G = self.sess.run([self.Optimize_G, self.cost_G], feed_dict={self.Z : batch_Z})
Opt_D, cost_D = self.sess.run([self.Optimize_D, self.cost_D], feed_dict={self.X : batch_X, self.Z : batch_Z})
#print("sesstion run sucess")
print("epoch : ", epoch, ", gen_cost : ", cost_G, ", dis_cost : ", cost_D)
if(epoch%10 == 0):
noise_data = make_batch(make_noise_vector(self.batch_size, self.Noise_Input_Data_Size), self.batch_size)
samples = self.sess.run(self.G, feed_dict={self.Z : noise_data})
fig, ax = plt.subplots(1, self.batch_size, figsize=(self.batch_size, 1))
for j in range(self.batch_size):
ax[j].set_axis_off()
ax[j].imshow(np.reshape(samples[j], (28, 28)))
fig.show()
plt.draw()
plt.show()
# In[4]:
# In[ ]:
|
chars = open('store.txt',).read();
print(chars) # display the file contents
|
"""
Write a python program to guess the number in the user's mind.
randrange function of random module can be used to guess the number in user’s mind.
Note: User should think of a number which is in between 1 and 10 (both inclusive).
+---------------------+------------------------------+----------------+--------------------------+
| Random Range values | Sample number in user’s mind | Number guessed | Expected Output |
+---------------------+------------------------------+----------------+--------------------------+
| 1 to 10 | 5 | 3 | Number is low |
| | +----------------+--------------------------+
| | | 7 | Number is high |
| | +----------------+--------------------------+
| | | 5 | You have got it right!!! |
+---------------------+------------------------------+----------------+--------------------------+
"""
#PF-Tryout
def guess_number(number_in_mind):
import random
i=random.randrange(1,11)
print(i)
if i<number_in_mind:
print ('Number is low')
elif i>number_in_mind:
print ('Number is high')
else:
print ('You have got it right!!!')
guess_number(4)
|
'''balancing an inverted double pendulum'''
from math import *
# specs
mass_pen1=131 # pendulum 1 mass [g]
len1=213 # link 1 length [mm]
mass_pen2=145 # 110g bearing +35 pendulum 2 mass [g]
len2=11.35 # link 2 length [mm]
x=0.0 # x position of the end effector
y=0.0 # y position of the end effector
# let theta3 be the angle of the end effector to y-axis
# input: imu reading for theta1, servo angle for theta2;
# output: angle for the end effector to y-axis
def get_theta3(theta1,theta2):
global x,y
q1=90+theta1
q2=theta2-90
x=len1*cos(radians(q1))+len2*cos(radians(q1+q2))
y=len1*sin(radians(q1))+len2*sin(radians(q1+q2))
if x==0:
x+=0.001
angle=degrees(atan(y/x))
if angle<0:
angle=(angle+90)*(-1)
else:
angle=90-angle
return angle
# calculate torque for the system
def get_torque (theta1 , theta3):
t1 = mass_pen1*len1*sin(radians(theta1))
t2 = mass_pen2*get_dist(x,y)*sin(radians(theta3))
return t1+t2
def get_dist(x,y):
return sqrt((x*x)+(y*y))
# this function generates the theta1 and theta2 pairs that will make the system torque zero with 0.2 error
# the points are used to generate a function of theta2 from theta1 input.
def get_theta2():
i = 0
j = -90
while j < 90:
while i < 180:
torque = get_torque(j, get_theta3(j, i))
if abs(torque) < 0.2:
print('theta1:%d theta2: %f torque: %f' % (j, i, torque))
i += 0.01
i = 0
j += 1
get_theta2()
|
import distribute_setup
distribute_setup.use_setuptools()
from setuptools import setup, find_packages
setup(
name = "Wicked Jukebox Database",
version = "1.0",
license = "BSD 3-Clause",
packages = find_packages(),
long_description=open("README.rst").read(),
install_requires = [
'sqlalchemy==0.7',
],
)
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
import login.urls
import exercise.url
import collection.urls
import resources.urls
import activity.urls
import fortune.urls
import bbs.urls
import jobs.urls
import complaint.url
admin.autodiscover()
urlpatterns = patterns('',
# url(r'^$', 'subject.views.home', name='home'),
# url(r'^static/(?P<path>.*)$','django.views.static.serve',),
# url(r'^media/(?P<path>.*)$', 'django.views.static.serve',{'document_root': settings.MEDIA_ROOT}),
url(r'^admin/', include(admin.site.urls)),
url(r'^account/', include(login.urls)),
url(r'^title/', include(exercise.url)),
url(r'^complaint/', include(complaint.url)),
url(r'^collection/', include(collection.urls)),
url(r'^resources/', include(resources.urls)),
url(r'^activity/', include(activity.urls)),
url(r'^fortune/', include(fortune.urls)),
url(r'^bbs/', include(bbs.urls)),
url(r'^jobs/', include(jobs.urls)),
url(r'^$', 'login.views.index'),
)
|
from typing import (
Any,
Callable,
Dict,
Iterator,
List,
Mapping,
Optional,
Sequence,
Type,
TypeVar,
Union,
)
from typing_extensions import Protocol
from ._pydantic import BaseModel
BaseModelSubclassType = TypeVar("BaseModelSubclassType", bound=BaseModel)
ModelType = Type[BaseModelSubclassType]
OptionalModelType = Optional[ModelType]
NamingStrategy = Callable[[ModelType], str]
NestedNamingStrategy = Callable[[str, str], str]
class MultiDict(Protocol):
def get(self, key: str) -> Optional[str]:
pass
def getlist(self, key: str) -> List[str]:
pass
def __iter__(self) -> Iterator[str]:
pass
class FunctionDecorator(Protocol):
resp: Any
tags: Sequence[Any]
security: Union[None, Dict, List[Any]]
deprecated: bool
path_parameter_descriptions: Optional[Mapping[str, str]]
_decorator: Any
|
from django.template import RequestContext
from corcho import settings
def app_settings(request):
return {
"settings": settings,
}
|
from objects import glyphs
class RogueGlyphs(glyphs.Glyphs):
# Should put all of them in at some point, just added the ones that matter
# for the initial set of calculations.
allowed_glyphs = frozenset([
'backstab', 'mutilate', 'rupture', 'slice_and_dice', 'vendetta',
'tricks_of_the_trade',
])
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 6 15:00:01 2019
@author: andr
"""
import os
import numpy as np
import matplotlib.pyplot as plt
from copy import copy
from tqdm import tqdm
name_pulsar = input('Enter name pulsar: ')
with open(name_pulsar + '_start.par', 'r') as file:
lines = file.readlines()
start_period = copy(lines[3][:-1])
first_part = ["%.2d" % i for i in range(1, 100)]
for first in tqdm(first_part):
os.system('rm ' + 'res_iter_p_' + name_pulsar + '.txt')
for add in tqdm(range(1, 10000), leave=False):
lines[3] = start_period + first + str(add) + ' 1' + '\n'
with open(name_pulsar + '.par', 'w') as file:
for line in lines:
file.write(line)
os.system('tempo ' + name_pulsar + '.tim > outtempo.log')
os.system(
'~/work/tempo/util/print_resid/./print_resid -mre > ' +
'resid_' + name_pulsar + '.ascii')
data = np.genfromtxt('resid_' + name_pulsar + '.ascii').T
with open('res_iter_p_' + name_pulsar + '.txt', 'a') as file:
file.write(start_period[11:] + first + str(add) + ' ')
file.write(str(np.std(data[1])))
file.write('\n')
data = np.genfromtxt('res_iter_p_' + name_pulsar + '.txt').T
if os.path.isdir('./deep_period_plot_' + name_pulsar + '/'):
pass
else:
os.system('mkdir ' + './deep_period_plot_' + name_pulsar + '/')
plt.close()
plt.plot(data[1])
plt.savefig(
'./deep_period_plot_' + name_pulsar + '/'
+ first + '.png', format='png', dpi=150)
|
import tkinter as tk
import speech_recognition as sr
import os
from gtts import gTTS
def voice_output(mytext):
# Language in which you want to convert
language = 'en'
# Passing the text and language to the engine,
# here we have marked slow=False. Which tells
# the module that the converted audio should
# have a high speed
myobj = gTTS(text="Your name has been saved as" + str(mytext), lang=language, slow=False)
# Saving the converted audio in a mp3 file named
# welcome
d=os.getcwd()
os.chdir(d)
myobj.save("welcome.mp3")
# Playing the converted file
#welcome = r'D:\voce\welcome.mp3'
#os.system("mpg123" + welcome)
from playsound import playsound
playsound("welcome.mp3")
os.remove("welcome.mp3")
def voice_input():
r = sr.Recognizer()
mic = sr.Microphone(device_index=0)
with mic as source:
r.adjust_for_ambient_noise(source, duration=1)
print("What is your name: ")
audio = r.listen(source, timeout=0)
print("Wait till your voice is recognised......\n")
d=r.recognize_google(audio)
name.insert(0, d)
HEIGHT = 2048
WIDTH = 2048
bgc='lightyellow'
root = tk.Tk()
root.title('SIGN UP for TRACK SMART Attendence')
#this to define canvas in GUI
canvas = tk.Canvas(root, height=HEIGHT, width=WIDTH, bg=bgc)
canvas.pack()
#to upload ot click image for face recognistion
def next_screen():
upload_window = tk.Toplevel(height=HEIGHT, width=WIDTH)
upload_window.title('UPLOAD IMAGE')
upload_label = tk.Label(upload_window, text="Upload Your Image for Face Recognistion", font=('times', 36))
upload_label.place(rely=0.2, relwidth=1)
webcam_button = tk.Button(upload_window, text="WEBCAM", font=('times', 36))
webcam_button.place(relx=0.4, rely=0.4, relwidth=0.2)
upload_button = tk.Button(upload_window, text="UPLOAD", font=('times', 36))
upload_button.place(relx=0.4, rely=0.55, relwidth=0.2)
#to confirm name from user
def confirm_name():
name_label = tk.Label(canvas, bg=bgc, text="You entered \"" + name.get() + "\"", font=('times', 36))
name_label.place(rely=0.5, relwidth=1)
confirmation = tk.Label(canvas, bg=bgc, text="Are you sure you want to continue ?", font=('times', 36))
confirmation.place(rely=0.6, relwidth=1)
backButton = tk.Button(canvas, text="RETAKE", font=('times', 36))
backButton.place(relx=0.1, rely=0.8, relwidth = 0.15)
yesButton = tk.Button(canvas, text="CONFIRM", font=('times', 36), command=next_screen)
yesButton.place(relx=0.75, rely=0.8, relwidth = 0.15)
#here i have added frame to our GUI for name entry
frame = tk.Frame(root, bg=bgc, bd=10)
frame.place(relx=0.5, rely=0.25, relwidth=0.8, relheight=0.1, anchor='n')
#entry field for name
name = tk.Entry(frame, font=('times', 36))
name.place(relwidth=0.6, relheight=1)
#photoimage for icon
vr_image = tk.PhotoImage(file = "vr_icon.png")
vr_icon = vr_image.subsample(11,11)
#button for voice recognition
vr_button = tk.Button(frame, text="Voice Recognistion", image = vr_icon, font=('times', 36), command=voice_input)
vr_button.place(relx=0.64, relheight=1, relwidth=0.07)
#button for name
name_button = tk.Button(frame, text="SUBMIT Name", font=('times', 36), command=lambda:[confirm_name(), voice_output(name.get())])
name_button.place(relx=0.75, relheight=1, relwidth=0.25)
root.mainloop()
|
# Generated by Django 2.2.4 on 2019-12-05 09:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('visitors', '0017_auto_20191202_1244'),
]
operations = [
migrations.AddField(
model_name='track_entry',
name='send_out',
field=models.BooleanField(default=False),
),
]
|
'''
创建一个类
类名:首字母大写,驼峰原则,见名知意
类属性:驼峰原则
类行为:方法或函数
'''
'''
类本身不占内存空间,实例化的对象占内存空间
格式:
class 类名(父类列表):
属性
行为
'''
class People(object):
# 定义属性
name = ""
age = 0
height = 0
weight = 0
# 定义行为(函数)
# 方法的参数必须以self当第一个参数,不传参的话只写self
# self代表类的实例(某个对象)
def run(self):
print("run......")
def eat(self, food):
print("eat {}".format(food))
|
import sys, io, os, random, logging
from jacks.io_preprocess import subsample_and_preprocess
from jacks.jacks import infer_JACKS_gene, LOG
import numpy as np
def read_essentiality():
ess = [l.strip().split("\t")[0] for l in file("../../data/Hart_training_essentials.txt",'r').readlines()[1:]]
noness = [l.strip().split("\t")[0] for l in file("../../data/Hart_training_nonessential.txt",'r').readlines()[1:]]
return {False:noness, True:ess}
if len(sys.argv) != 6 and len(sys.argv) != 7:
print 'Usage: sample_jacks_screen_xs.py rawcountfile num_replicates num_celllines outfile num_bootraps <job_idx - opt, else LSB_JOBINDEX>'
else:
LOG.setLevel(logging.WARNING)
inputfile = sys.argv[1]
num_replicates = eval(sys.argv[2])
num_celllines = eval(sys.argv[3])
outfile = sys.argv[4]
num_bootstraps = eval(sys.argv[5])
if len(sys.argv) == 6: job_idx = os.environ['LSB_JOBINDEX']
else: job_idx = sys.argv[6]
#Get list of test cell lines
f = io.open(inputfile)
hdrs = [x.split('_')[0] for x in f.readline().split('\t')[2:] if 'CTRL' not in x]
f.close()
test_celllines = [x for x in set(hdrs)]
ess_genes = set(read_essentiality()[True])
x_values = {}
for bs in range(num_bootstraps):
selected_celllines = random.sample(test_celllines, num_celllines)
print selected_celllines
# Read all data from input file
data, meta, cell_lines, genes, gene_index, _ = subsample_and_preprocess(inputfile, [('CTRL',-1)] + [(x, num_replicates) for x in selected_celllines])
ctrldata = data[:,cell_lines.index('CTRL'),:]
testdata = data[:,[cell_lines.index(x) for x in selected_celllines],:]
#Run JACKS on essential genes only, record the output x values
for gene in gene_index:
if gene not in ess_genes:
continue
Ig = gene_index[gene]
# Perform inference
y, tau, x1, x2, w1, w2 = infer_JACKS_gene(testdata[Ig,:,0], testdata[Ig,:,1], ctrldata[Ig,0], ctrldata[Ig,1], 50)
for i,grna in enumerate(meta[Ig,0]):
if bs == 0:
x_values[grna] = []
x_values[grna].append((x1[i],x2[i]))
if not os.path.exists(os.path.dirname(outfile)): os.makedirs(os.path.dirname(outfile))
fout = io.open(outfile[:-4] + '_%s.txt' % job_idx, 'w')
fout.write(u'gRNA\t%s\n' % ('\t'.join(['X1\tX2' for i in range(num_bootstraps)])))
for grna in x_values:
fout.write(u'%s\t%s\n' % (grna, '\t'.join(['%.5e\t%5e' % (x1, x2) for (x1,x2) in x_values[grna]])))
fout.close()
|
import networkx as nx
import matplotlib.pyplot as plt
import pandas as pd
import scipy.sparse as sp
import numpy as np
from sklearn.metrics import roc_auc_score, average_precision_score, roc_curve, precision_recall_curve
from sklearn.manifold import spectral_embedding
import node2vec
from gensim.models import Word2Vec
from sklearn.linear_model import LogisticRegression
import time
import os
import tensorflow as tf
from gae.optimizer import OptimizerAE, OptimizerVAE
from gae.model import GCNModelAE, GCNModelVAE
from gae.preprocessing import preprocess_graph, construct_feed_dict, sparse_to_tuple, mask_test_edges, mask_test_edges_directed
import pickle
from copy import deepcopy
#sigmod激活函数
def sigmoid(x):
if x>=0:
return 1 / (1 + np.exp(-x))
else:
return 1 / (1 + np.exp(x))
#绘制训练损失和准确度以及验证AUC值和AP值曲线
def draw_gae_training(dataset, epochs, train_loss, train_acc, val_roc, val_ap):
# plot the training loss and accuracy
_, ax1 = plt.subplots()
ax2 = ax1.twinx()
ax1.plot(np.arange(0, epochs), train_loss, label="train_loss")
ax2.plot(np.arange(0, epochs), train_acc, label="train_accuracy", color='r')
ax1.set_xlabel('Epoch')
ax1.set_ylabel('train loss')
ax2.set_ylabel('train accuracy')
plt.legend(['train_loss', 'train_accuracy'], loc="center right")
plt.savefig("results/tables/{}_loss_accuracy.png".format(dataset))
plt.show()
plt.plot(np.arange(0, epochs), val_roc, label="val_auc")
plt.xlabel("Epoch")
plt.ylabel("Area under Curve")
plt.legend(loc="center right")
plt.savefig("results/tables/{}_val_roc.png".format(dataset))
plt.show()
plt.plot(np.arange(0, epochs), val_ap, label="val_ap")
# plt.title("Training Loss and Accuracy on sar classifier")
plt.xlabel("Epoch")
plt.ylabel("Average Accuracy")
plt.legend(loc="center right")
plt.savefig("results/tables/{}_val_ap.png".format(dataset))
plt.show()
# 输入: positive test/val edges, negative test/val edges, edge score matrix
# 输出: ROC AUC score, ROC Curve (FPR, TPR, Thresholds), AP score
def get_roc_score(edges_pos, edges_neg, score_matrix, apply_sigmoid=False):
# 边的情况
if len(edges_pos) == 0 or len(edges_neg) == 0:
return (None, None, None)
# 保存正例边预测,实际值为1
preds_pos = []
pos = []
for edge in edges_pos:
if apply_sigmoid == True:
preds_pos.append(sigmoid(score_matrix[edge[0], edge[1]]))
else:
preds_pos.append(score_matrix[edge[0], edge[1]])
pos.append(1) # 1-正例边
# 保存负例边预测,实际值为0
preds_neg = []
neg = []
for edge in edges_neg:
if apply_sigmoid == True:
preds_neg.append(sigmoid(score_matrix[edge[0], edge[1]]))
else:
preds_neg.append(score_matrix[edge[0], edge[1]])
neg.append(0) # 0-负例边
# 计算得分
preds_all = np.hstack([preds_pos, preds_neg])
labels_all = np.hstack([np.ones(len(preds_pos)), np.zeros(len(preds_neg))])
roc_score = roc_auc_score(labels_all, preds_all)
# roc_curve_tuple = roc_curve(labels_all, preds_all)
ap_score = average_precision_score(labels_all, preds_all)
# 返回 roc_score, ap_score
return roc_score, ap_score
# 返回(node1, node2)元组列表,用于networkx链路预测评估
def get_ebunch(train_test_split):
adj_train, train_edges, train_edges_false, val_edges, val_edges_false, \
test_edges, test_edges_false = train_test_split
test_edges_list = test_edges.tolist() # 转换为嵌套列表
test_edges_list = [tuple(node_pair) for node_pair in test_edges_list] # 把节点对转换为元组
test_edges_false_list = test_edges_false.tolist()
test_edges_false_list = [tuple(node_pair) for node_pair in test_edges_false_list]
return (test_edges_list + test_edges_false_list)
# 输入: NetworkX 训练图, train_test_split (通过mask_test_edges划分)
# 输出: AA方法的结果字典(ROC AUC, ROC Curve, AP, Runtime)
def adamic_adar_scores(g_train, train_test_split):
if g_train.is_directed(): # 只用于无向图,如果是有向图,将其转为无向图
g_train = g_train.to_undirected()
adj_train, train_edges, train_edges_false, val_edges, val_edges_false, \
test_edges, test_edges_false = train_test_split # 加载输入划分集
start_time = time.time()
aa_scores = {}
# 计算得分
aa_matrix = np.zeros(adj_train.shape)
for u, v, p in nx.adamic_adar_index(g_train, ebunch=get_ebunch(train_test_split)): # (u, v) = 节点索引, p = AA 指数
aa_matrix[u][v] = p
aa_matrix[v][u] = p # 确保它是对称的
aa_matrix = aa_matrix / aa_matrix.max() # 归一化矩阵
runtime = time.time() - start_time
aa_roc, aa_ap = get_roc_score(test_edges, test_edges_false, aa_matrix)
aa_scores['test_roc'] = aa_roc
# aa_scores['test_roc_curve'] = aa_roc_curve
aa_scores['test_ap'] = aa_ap
aa_scores['runtime'] = runtime
return aa_scores
# 输入: NetworkX 训练图, train_test_split (通过mask_test_edges划分)
# 输出: JC方法的结果字典(ROC AUC, ROC Curve, AP, Runtime)
def jaccard_coefficient_scores(g_train, train_test_split):
if g_train.is_directed(): # 只用于无向图,如果是有向图,将其转为无向图
g_train = g_train.to_undirected()
adj_train, train_edges, train_edges_false, val_edges, val_edges_false, \
test_edges, test_edges_false = train_test_split # 加载输入划分集
start_time = time.time()
jc_scores = {}
# 计算得分
jc_matrix = np.zeros(adj_train.shape)
for u, v, p in nx.jaccard_coefficient(g_train, ebunch=get_ebunch(train_test_split)): # (u, v) = 节点索引, p = JC 指数
jc_matrix[u][v] = p
jc_matrix[v][u] = p # 确保它是对称的
jc_matrix = jc_matrix / jc_matrix.max() # 归一化矩阵
runtime = time.time() - start_time
jc_roc, jc_ap = get_roc_score(test_edges, test_edges_false, jc_matrix)
jc_scores['test_roc'] = jc_roc
# jc_scores['test_roc_curve'] = jc_roc_curve
jc_scores['test_ap'] = jc_ap
jc_scores['runtime'] = runtime
return jc_scores
# 输入: NetworkX 训练图, train_test_split (通过mask_test_edges划分)
# 输出: PA方法的结果字典(ROC AUC, ROC Curve, AP, Runtime)
def preferential_attachment_scores(g_train, train_test_split):
if g_train.is_directed(): # 只用于无向图,如果是有向图,将其转为无向图
g_train = g_train.to_undirected()
adj_train, train_edges, train_edges_false, val_edges, val_edges_false, \
test_edges, test_edges_false = train_test_split # 加载输入划分集
start_time = time.time()
pa_scores = {}
# 计算得分
pa_matrix = np.zeros(adj_train.shape)
for u, v, p in nx.preferential_attachment(g_train, ebunch=get_ebunch(train_test_split)): # (u, v) = 节点索引, p = PA 指数
pa_matrix[u][v] = p
pa_matrix[v][u] = p # 确保它是对称的
pa_matrix = pa_matrix / pa_matrix.max() # 归一化矩阵
runtime = time.time() - start_time
pa_roc, pa_ap = get_roc_score(test_edges, test_edges_false, pa_matrix)
pa_scores['test_roc'] = pa_roc
#pa_scores['test_roc_curve'] = pa_roc_curve
pa_scores['test_ap'] = pa_ap
pa_scores['runtime'] = runtime
return pa_scores
# 输入: train_test_split (通过mask_test_edges划分)
# 输出: PA方法的结果字典(ROC AUC, ROC Curve, AP, Runtime)
def spectral_clustering_scores(train_test_split, random_state=0):
adj_train, train_edges, train_edges_false, val_edges, val_edges_false, \
test_edges, test_edges_false = train_test_split # 加载输入划分集
start_time = time.time()
sc_scores = {}
# 进行谱聚类链接预测
spectral_emb = spectral_embedding(adj_train, n_components=16, random_state=random_state)
sc_score_matrix = np.dot(spectral_emb, spectral_emb.T)
runtime = time.time() - start_time
sc_test_roc, sc_test_ap = get_roc_score(test_edges, test_edges_false, sc_score_matrix, apply_sigmoid=True)
sc_val_roc, sc_val_ap = get_roc_score(val_edges, val_edges_false, sc_score_matrix, apply_sigmoid=True)
# 记录得分
sc_scores['test_roc'] = sc_test_roc
# sc_scores['test_roc_curve'] = sc_test_roc_curve
sc_scores['test_ap'] = sc_test_ap
sc_scores['val_roc'] = sc_val_roc
# sc_scores['val_roc_curve'] = sc_val_roc_curve
sc_scores['val_ap'] = sc_val_ap
sc_scores['runtime'] = runtime
return sc_scores
# 输入: NetworkX 训练图, train_test_split (通过mask_test_edges划分),Node2Vec超参数
# 输出: Node2Vec方法的结果字典(ROC AUC, ROC Curve, AP, Runtime)
def node2vec_scores(
g_train, train_test_split,
P = 1, # 返回概率参数
Q = 1, # 进出概率参数
WINDOW_SIZE = 10, # 优化的上下文大小
NUM_WALKS = 10, # 每次源的游走次数
WALK_LENGTH = 80, # 每次源的游走序列长度
DIMENSIONS = 128, # 嵌入维度
DIRECTED = False, # 有向/无向图
WORKERS = 8, # 平行游者的数量
ITER = 1, # SGD 迭代次数
edge_score_mode = "edge-emb", # 使用自举边嵌入+逻辑回归,
# 或者使用简单的点积用于计算边得分
verbose=1,
):
if g_train.is_directed():
DIRECTED = True
adj_train, train_edges, train_edges_false, val_edges, val_edges_false, \
test_edges, test_edges_false = train_test_split # Unpack train-test split
start_time = time.time()
# 预处理,生成 walks
if verbose >= 1:
print('Preprocessing grpah for node2vec...')
g_n2v = node2vec.Graph(g_train, DIRECTED, P, Q) # 创建 node2vec 图实例
g_n2v.preprocess_transition_probs()
if verbose == 2:
walks = g_n2v.simulate_walks(NUM_WALKS, WALK_LENGTH, verbose=True)
else:
walks = g_n2v.simulate_walks(NUM_WALKS, WALK_LENGTH, verbose=False)
walks = [list(map(str, walk)) for walk in walks]
# 训练skip-gram模型
model = Word2Vec(walks, size=DIMENSIONS, window=WINDOW_SIZE, min_count=0, sg=1, workers=WORKERS, iter=ITER)
# 保存嵌入映射
emb_mappings = model.wv
# 创建节点嵌入矩阵(rows = nodes, columns = embedding features)
emb_list = []
for node_index in range(0, adj_train.shape[0]):
node_str = str(node_index)
node_emb = emb_mappings[node_str]
emb_list.append(node_emb)
emb_matrix = np.vstack(emb_list)
# 生成自举边嵌入 (按照node2vec论文做法)
# (v1, v2)的边嵌入 = v1,v2节点嵌入的哈马达积
if edge_score_mode == "edge-emb":
def get_edge_embeddings(edge_list):
embs = []
for edge in edge_list:
node1 = edge[0]
node2 = edge[1]
emb1 = emb_matrix[node1]
emb2 = emb_matrix[node2]
edge_emb = np.multiply(emb1, emb2)
embs.append(edge_emb)
embs = np.array(embs)
return embs
# 训练集 边嵌入
pos_train_edge_embs = get_edge_embeddings(train_edges)
neg_train_edge_embs = get_edge_embeddings(train_edges_false)
train_edge_embs = np.concatenate([pos_train_edge_embs, neg_train_edge_embs])
# 创建训练集边标签: 1 = real edge, 0 = false edge
train_edge_labels = np.concatenate([np.ones(len(train_edges)), np.zeros(len(train_edges_false))])
# 验证集 边嵌入 标签
if len(val_edges) > 0 and len(val_edges_false) > 0:
pos_val_edge_embs = get_edge_embeddings(val_edges)
neg_val_edge_embs = get_edge_embeddings(val_edges_false)
val_edge_embs = np.concatenate([pos_val_edge_embs, neg_val_edge_embs])
val_edge_labels = np.concatenate([np.ones(len(val_edges)), np.zeros(len(val_edges_false))])
# 测试集 边嵌入 标签
pos_test_edge_embs = get_edge_embeddings(test_edges)
neg_test_edge_embs = get_edge_embeddings(test_edges_false)
test_edge_embs = np.concatenate([pos_test_edge_embs, neg_test_edge_embs])
# 创建验证集边标签: 1 = real edge, 0 = false edge
test_edge_labels = np.concatenate([np.ones(len(test_edges)), np.zeros(len(test_edges_false))])
# 在训练集边嵌入上训练逻辑回归分类器
edge_classifier = LogisticRegression(random_state=0, solver='liblinear')
edge_classifier.fit(train_edge_embs, train_edge_labels)
# 预测边的得分:分为1类(真实边)的概率
if len(val_edges) > 0 and len(val_edges_false) > 0:
val_preds = edge_classifier.predict_proba(val_edge_embs)[:, 1]
test_preds = edge_classifier.predict_proba(test_edge_embs)[:, 1]
runtime = time.time() - start_time
# 计算得分
if len(val_edges) > 0 and len(val_edges_false) > 0:
n2v_val_roc = roc_auc_score(val_edge_labels, val_preds)
# n2v_val_roc_curve = roc_curve(val_edge_labels, val_preds)
n2v_val_ap = average_precision_score(val_edge_labels, val_preds)
else:
n2v_val_roc = None
n2v_val_roc_curve = None
n2v_val_ap = None
n2v_test_roc = roc_auc_score(test_edge_labels, test_preds)
# n2v_test_roc_curve = roc_curve(test_edge_labels, test_preds)
n2v_test_pr_curve = precision_recall_curve(test_edge_labels, test_preds)
n2v_test_ap = average_precision_score(test_edge_labels, test_preds)
# 使用节点嵌入的简单点积生成边得分
elif edge_score_mode == "dot-product":
score_matrix = np.dot(emb_matrix, emb_matrix.T)
runtime = time.time() - start_time
# 验证集得分
if len(val_edges) > 0:
n2v_val_roc, n2v_val_ap = get_roc_score(val_edges, val_edges_false, score_matrix, apply_sigmoid=True)
else:
n2v_val_roc = None
n2v_val_roc_curve = None
n2v_val_ap = None
# 测试集得分
n2v_test_roc, n2v_test_ap = get_roc_score(test_edges, test_edges_false, score_matrix, apply_sigmoid=True)
else:
print("Invalid edge_score_mode! Either use edge-emb or dot-product.")
# 记录得分
n2v_scores = {}
n2v_scores['test_roc'] = n2v_test_roc
# n2v_scores['test_roc_curve'] = n2v_test_roc_curve
# n2v_scores['test_pr_curve'] = n2v_test_pr_curve
n2v_scores['test_ap'] = n2v_test_ap
n2v_scores['val_roc'] = n2v_val_roc
# n2v_scores['val_roc_curve'] = n2v_val_roc_curve
n2v_scores['val_ap'] = n2v_val_ap
n2v_scores['runtime'] = runtime
return n2v_scores
# 输入: 原始稀疏邻接矩阵adj_sparse, train_test_split (通过mask_test_edges划分),特征矩阵,VAGE
# 输出: VGAE方法的结果字典(ROC AUC, ROC Curve, AP, Runtime)
def gae_scores(
adj_sparse,
train_test_split,
features_matrix=None,
LEARNING_RATE = 0.01,
EPOCHS = 250,
HIDDEN1_DIM = 32,
HIDDEN2_DIM = 16,
DROPOUT = 0,
edge_score_mode="dot-product",
verbose=1,
dtype=tf.float32
):
adj_train, train_edges, train_edges_false, val_edges, val_edges_false, \
test_edges, test_edges_false = train_test_split # Unpack train-test split
if verbose >= 1:
print('GAE preprocessing...')
# start_time = time.time()
# 由于内存限制,使用CPU (隐藏 GPU)训练
os.environ['CUDA_VISIBLE_DEVICES'] = ""
# 特征转换 正常矩阵 --> 稀疏矩阵 --> 元组
# 特征元组包含: (矩阵坐标列表, 矩阵值列表, 矩阵维度)
if features_matrix is None:
x = sp.lil_matrix(np.identity(adj_sparse.shape[0]))
else:
x = sp.lil_matrix(features_matrix)
features_tuple = sparse_to_tuple(x)
features_shape = features_tuple[2]
# 获取图属性 (用于输入模型)
num_nodes = adj_sparse.shape[0] # 邻接矩阵的节点数量
num_features = features_shape[1] # 特征数量 (特征矩阵的列数)
features_nonzero = features_tuple[1].shape[0] # 特征矩阵中的非零条目数(或者矩阵值列表长度)
# 保存原始邻接矩阵 (没有对角线条目) 到后面使用
adj_orig = deepcopy(adj_sparse)
adj_orig = adj_orig - sp.dia_matrix((adj_orig.diagonal()[np.newaxis, :], [0]), shape=adj_orig.shape)
adj_orig.eliminate_zeros()
# 归一化邻接矩阵
adj_norm = preprocess_graph(adj_train)
# 添加对角线
adj_label = adj_train + sp.eye(adj_train.shape[0])
adj_label = sparse_to_tuple(adj_label)
# 定义占位符
placeholders = {
'features': tf.sparse_placeholder(tf.float32),
'adj': tf.sparse_placeholder(tf.float32),
'adj_orig': tf.sparse_placeholder(tf.float32),
'dropout': tf.placeholder_with_default(0., shape=())
}
# How much to weigh positive examples (true edges) in cost print_function
# Want to weigh less-frequent classes higher, so as to prevent model output bias
# pos_weight = (num. negative samples / (num. positive samples)
pos_weight = float(adj_sparse.shape[0] * adj_sparse.shape[0] - adj_sparse.sum()) / adj_sparse.sum()
# normalize (scale) average weighted cost
norm = adj_sparse.shape[0] * adj_sparse.shape[0] / float((adj_sparse.shape[0] * adj_sparse.shape[0] - adj_sparse.sum()) * 2)
if verbose >= 1:
print('Initializing GAE model...')
# 创建 VAE 模型
model = GCNModelVAE(placeholders, num_features, num_nodes, features_nonzero,
HIDDEN1_DIM, HIDDEN2_DIM, dtype=dtype, flatten_output=False)
opt = OptimizerVAE(preds=model.reconstructions,
labels=tf.sparse_tensor_to_dense(placeholders['adj_orig'], validate_indices=False),
# labels=placeholders['adj_orig'],
model=model, num_nodes=num_nodes,
pos_weight=pos_weight,
norm=norm,
learning_rate=LEARNING_RATE,
dtype=tf.float32)
cost_val = []
acc_val = []
val_roc_score = []
prev_embs = []
# 初始化 session
sess = tf.Session()
if verbose >= 1:
# 打印所有可训练的变量
total_parameters = 0
for variable in tf.trainable_variables():
# shape 是tf.Dimension的一个数组
shape = variable.get_shape()
print("Variable shape: ", shape)
variable_parameters = 1
for dim in shape:
print("Current dimension: ", dim)
variable_parameters *= dim.value
print("Variable params: ", variable_parameters)
total_parameters += variable_parameters
print('')
print("TOTAL TRAINABLE PARAMS: ", total_parameters)
print('Initializing TF variables...')
sess.run(tf.global_variables_initializer())
if verbose >= 1:
print('Starting GAE training!')
start_time = time.time()
# 训练模型
train_loss = []
train_acc = []
val_roc = []
val_ap = []
for epoch in range(EPOCHS):
t = time.time()
# 构造 feed dictionary
feed_dict = construct_feed_dict(adj_norm, adj_label, features_tuple, placeholders)
feed_dict.update({placeholders['dropout']: DROPOUT})
# 单一权重更新
outs = sess.run([opt.opt_op, opt.cost, opt.accuracy], feed_dict=feed_dict)
# 计算平均损失
avg_cost = outs[1]
avg_accuracy = outs[2]
# 评估预测
feed_dict.update({placeholders['dropout']: 0})
gae_emb = sess.run(model.z_mean, feed_dict=feed_dict)
prev_embs.append(gae_emb)
gae_score_matrix = np.dot(gae_emb, gae_emb.T)
roc_curr, ap_curr = get_roc_score(val_edges, val_edges_false, gae_score_matrix, apply_sigmoid=True)
val_roc_score.append(roc_curr)
# 每次迭代打印结果
# if verbose == 2:
# print(("Epoch:", '%04d' % (epoch + 1), "train_loss=", "{:.5f}".format(avg_cost),
# "train_acc=", "{:.5f}".format(avg_accuracy), "val_roc=", "{:.5f}".format(val_roc_score[-1]),
# "val_ap=", "{:.5f}".format(ap_curr),
# "time=", "{:.5f}".format(time.time() - t)))
train_loss.append(avg_cost)
train_acc.append(avg_accuracy)
val_roc.append(val_roc_score[-1])
val_ap.append(ap_curr)
# 画出训练过程损失和准确度以及验证AUC和AP
#draw_gae_training('hamster', EPOCHS, train_loss, train_acc, val_roc, val_ap)
runtime = time.time() - start_time
if verbose == 2:
print("Optimization Finished!")
# 打印最终结果
feed_dict.update({placeholders['dropout']: 0})
gae_emb = sess.run(model.z_mean, feed_dict=feed_dict)
# 点积边得分
if edge_score_mode == "dot-product":
gae_score_matrix = np.dot(gae_emb, gae_emb.T)
# runtime = time.time() - start_time
# 计算最终得分
gae_val_roc, gae_val_ap = get_roc_score(val_edges, val_edges_false, gae_score_matrix)
gae_test_roc, gae_test_ap = get_roc_score(test_edges, test_edges_false, gae_score_matrix)
# 采取自举边嵌入 (通过哈达玛积)
elif edge_score_mode == "edge-emb":
def get_edge_embeddings(edge_list):
embs = []
for edge in edge_list:
node1 = edge[0]
node2 = edge[1]
emb1 = gae_emb[node1]
emb2 = gae_emb[node2]
edge_emb = np.multiply(emb1, emb2)
embs.append(edge_emb)
embs = np.array(embs)
return embs
# 训练集 边嵌入
pos_train_edge_embs = get_edge_embeddings(train_edges)
neg_train_edge_embs = get_edge_embeddings(train_edges_false)
train_edge_embs = np.concatenate([pos_train_edge_embs, neg_train_edge_embs])
# 创建训练集 边标签: 1 = real edge, 0 = false edge
train_edge_labels = np.concatenate([np.ones(len(train_edges)), np.zeros(len(train_edges_false))])
# 验证集 边嵌入,标签
if len(val_edges) > 0 and len(val_edges_false) > 0:
pos_val_edge_embs = get_edge_embeddings(val_edges)
neg_val_edge_embs = get_edge_embeddings(val_edges_false)
val_edge_embs = np.concatenate([pos_val_edge_embs, neg_val_edge_embs])
val_edge_labels = np.concatenate([np.ones(len(val_edges)), np.zeros(len(val_edges_false))])
# 测试集 边嵌入,标签
pos_test_edge_embs = get_edge_embeddings(test_edges)
neg_test_edge_embs = get_edge_embeddings(test_edges_false)
test_edge_embs = np.concatenate([pos_test_edge_embs, neg_test_edge_embs])
# 创建验证集 边标签: 1 = real edge, 0 = false edge
test_edge_labels = np.concatenate([np.ones(len(test_edges)), np.zeros(len(test_edges_false))])
# 在训练集边嵌入上训练逻辑回归分类器
edge_classifier = LogisticRegression(random_state=0, solver='liblinear')
edge_classifier.fit(train_edge_embs, train_edge_labels)
#预测边得分: 分为1类(真实边)的概率
if len(val_edges) > 0 and len(val_edges_false) > 0:
val_preds = edge_classifier.predict_proba(val_edge_embs)[:, 1]
test_preds = edge_classifier.predict_proba(test_edge_embs)[:, 1]
#runtime = time.time() - start_time
# 计算得分
if len(val_edges) > 0 and len(val_edges_false) > 0:
gae_val_roc = roc_auc_score(val_edge_labels, val_preds)
gae_val_roc_curve = roc_curve(val_edge_labels, val_preds)
gae_val_ap = average_precision_score(val_edge_labels, val_preds)
else:
gae_val_roc = None
gae_val_roc_curve = None
gae_val_ap = None
gae_test_roc = roc_auc_score(test_edge_labels, test_preds)
gae_test_roc_curve = roc_curve(test_edge_labels, test_preds)
gae_test_pr_curve = precision_recall_curve(test_edge_labels, test_preds)
gae_test_ap = average_precision_score(test_edge_labels, test_preds)
# 记录得分
gae_scores = {}
gae_scores['test_roc'] = gae_test_roc
gae_scores['test_ap'] = gae_test_ap
gae_scores['val_roc'] = gae_val_roc
gae_scores['val_ap'] = gae_val_ap
if(edge_score_mode=="edge-emb"):
gae_scores['test_roc_curve'] = gae_test_roc_curve
gae_scores['val_roc_curve'] = gae_val_roc_curve
gae_scores['test_pr_curve'] = gae_test_pr_curve
gae_scores['val_roc_per_epoch'] = val_roc_score
gae_scores['runtime'] = runtime
return gae_scores
# 输入: adj_sparse(邻接矩阵,以稀疏矩阵形式), features_matrix(特征矩阵), test_frac(测试集比例), val_frac(验证集比例), verbose(是否显示详细过程)
# Verbose: 0 - 不打印, 1 - 打印得分, 2 - 打印得分 + GAE 训练过程
# 返回: 每个链路预测方法的结果字典(ROC AUC, ROC Curve, AP, Runtime)
def calculate_all_scores(adj_sparse, features_matrix=None, directed=False, \
test_frac=.1, val_frac=.05, random_state=0, verbose=1, \
train_test_split_file=None,
tf_dtype=tf.float32):
np.random.seed(random_state) # Guarantee consistent train/test split
tf.set_random_seed(random_state) # Consistent GAE training
# 链路预测得分字典
lp_scores = {}
### ---------- 预处理 ---------- ###
train_test_split = None
try: # 如果找到存在的划分好的数据集,则使用找到的文件
with open(train_test_split_file, 'rb') as f:
train_test_split = pickle.load(f)
print('Found existing train-test split!')
except: # 否则, 生成数据划分集
print('Generating train-test split...')
if directed == False:
train_test_split = mask_test_edges(adj_sparse, test_frac=test_frac, val_frac=val_frac)
else:
train_test_split = mask_test_edges_directed(adj_sparse, test_frac=test_frac, val_frac=val_frac)
adj_train, train_edges, train_edges_false, val_edges, val_edges_false, \
test_edges, test_edges_false = train_test_split # 打开元组
# g_train: 完整的图对象(没有隐藏边)
if directed == True:
g_train = nx.DiGraph(adj_train)
else:
g_train = nx.Graph(adj_train)
# 检查训练集测试集划分
if verbose >= 1:
print("Total nodes:", adj_sparse.shape[0])
print("Total edges:", int(adj_sparse.nnz/2)) # adj is symmetric, so nnz (num non-zero) = 2*num_edges
print("Training edges (positive):", len(train_edges))
print("Training edges (negative):", len(train_edges_false))
print("Validation edges (positive):", len(val_edges))
print("Validation edges (negative):", len(val_edges_false))
print("Test edges (positive):", len(test_edges))
print("Test edges (negative):", len(test_edges_false))
print('')
print("------------------------------------------------------")
# ---------- 链路预测基线方法---------- ###
# # Adamic-Adar
aa_scores = adamic_adar_scores(g_train, train_test_split)
lp_scores['aa'] = aa_scores
if verbose >= 1:
print('')
print('Adamic-Adar Test ROC score: ', str(aa_scores['test_roc']))
print('Adamic-Adar Test AP score: ', str(aa_scores['test_ap']))
# Jaccard Coefficient
jc_scores = jaccard_coefficient_scores(g_train, train_test_split)
lp_scores['jc'] = jc_scores
if verbose >= 1:
print('')
print('Jaccard Coefficient Test ROC score: ', str(jc_scores['test_roc']))
print('Jaccard Coefficient Test AP score: ', str(jc_scores['test_ap']))
# Preferential Attachment
pa_scores = preferential_attachment_scores(g_train, train_test_split)
lp_scores['pa'] = pa_scores
if verbose >= 1:
print('')
print('Preferential Attachment Test ROC score: ', str(pa_scores['test_roc']))
print('Preferential Attachment Test AP score: ', str(pa_scores['test_ap']))
### ---------- SPECTRAL CLUSTERING ---------- ###
sc_scores = spectral_clustering_scores(train_test_split)
lp_scores['sc'] = sc_scores
if verbose >= 1:
print('')
print('Spectral Clustering Validation ROC score: ', str(sc_scores['val_roc']))
print('Spectral Clustering Validation AP score: ', str(sc_scores['val_ap']))
print('Spectral Clustering Test ROC score: ', str(sc_scores['test_roc']))
print('Spectral Clustering Test AP score: ', str(sc_scores['test_ap']))
print('')
## ---------- NODE2VEC ---------- ###
# node2vec 参数设置
# 当 p = q = 1, Node2Vec等同于DeepWalk
P = 1 # 返回概率参数
Q = 1 # 进出概率参数
WINDOW_SIZE = 10 # 优化的上下文大小
NUM_WALKS = 10 # 每次源的游走次数
WALK_LENGTH = 80 # 每次源的游走序列长度
DIMENSIONS = 128 # 嵌入维度
DIRECTED = False # 有向/无向图
WORKERS = 8 # 平行游者的数量
ITER = 1 # SGD 迭代次数
# 使用自举边嵌入+逻辑回归
n2v_edge_emb_scores = node2vec_scores(g_train, train_test_split,
P, Q, WINDOW_SIZE, NUM_WALKS, WALK_LENGTH, DIMENSIONS, DIRECTED, WORKERS, ITER,
"edge-emb",
verbose)
lp_scores['n2v_edge_emb'] = n2v_edge_emb_scores
if verbose >= 1:
print('')
print('node2vec (Edge Embeddings) Validation ROC score: ', str(n2v_edge_emb_scores['val_roc']))
print('node2vec (Edge Embeddings) Validation AP score: ', str(n2v_edge_emb_scores['val_ap']))
print('node2vec (Edge Embeddings) Test ROC score: ', str(n2v_edge_emb_scores['test_roc']))
print('node2vec (Edge Embeddings) Test AP score: ', str(n2v_edge_emb_scores['test_ap']))
print('')
# 使用点积计算边得分
n2v_dot_prod_scores = node2vec_scores(g_train, train_test_split,
P, Q, WINDOW_SIZE, NUM_WALKS, WALK_LENGTH, DIMENSIONS, DIRECTED, WORKERS, ITER,
"dot-product",
verbose)
lp_scores['n2v_dot_prod'] = n2v_dot_prod_scores
if verbose >= 1:
print('')
print('node2vec (Dot Product) Validation ROC score: ', str(n2v_dot_prod_scores['val_roc']))
print('node2vec (Dot Product) Validation AP score: ', str(n2v_dot_prod_scores['val_ap']))
print('node2vec (Dot Product) Test ROC score: ', str(n2v_dot_prod_scores['test_roc']))
print('node2vec (Dot Product) Test AP score: ', str(n2v_dot_prod_scores['test_ap']))
print('')
### ---------- (VARIATIONAL) GRAPH AUTOENCODER ---------- ###
# # GAE 参数设置
LEARNING_RATE = 0.01 # Default: 0.01
EPOCHS = 250
HIDDEN1_DIM = 32
HIDDEN2_DIM = 16
DROPOUT = 0
# 使用点积
tf.set_random_seed(random_state) # Consistent GAE training
gae_results = gae_scores(adj_sparse, train_test_split, features_matrix,
LEARNING_RATE, EPOCHS, HIDDEN1_DIM, HIDDEN2_DIM, DROPOUT,
"dot-product",
verbose,
dtype=tf.float32)
lp_scores['gae'] = gae_results
if verbose >= 1:
print('')
print('GAE (Dot Product) Validation ROC score: ', str(gae_results['val_roc']))
print('GAE (Dot Product) Validation AP score: ', str(gae_results['val_ap']))
print('GAE (Dot Product) Test ROC score: ', str(gae_results['test_roc']))
print('GAE (Dot Product) Test AP score: ', str(gae_results['test_ap']))
print("------------------------------------------------------")
print("------------------------------------------------------")
print('')
# 使用边嵌入
tf.set_random_seed(random_state) # Consistent GAE training
gae_edge_emb_results = gae_scores(adj_sparse, train_test_split, features_matrix,
LEARNING_RATE, EPOCHS, HIDDEN1_DIM, HIDDEN2_DIM, DROPOUT,
"edge-emb",
verbose)
lp_scores['gae_edge_emb'] = gae_edge_emb_results
if verbose >= 1:
print('')
print('GAE (Edge Embeddings) Validation ROC score: ', str(gae_edge_emb_results['val_roc']))
print('GAE (Edge Embeddings) Validation AP score: ', str(gae_edge_emb_results['val_ap']))
#print('GAE (Edge Embeddings) Validation ROC_CURVE score: ', str(gae_edge_emb_results['val_roc_curve']))
print('GAE (Edge Embeddings) Test ROC score: ', str(gae_edge_emb_results['test_roc']))
print('GAE (Edge Embeddings) Test AP score: ', str(gae_edge_emb_results['test_ap']))
#print('GAE (Edge Embeddings) Test ROC_CURVE score: ', str(gae_edge_emb_results['test_roc_curve']))
### ---------- 返回结果 ---------- ###
return lp_scores
|
import matplotlib.pyplot as plp, numpy
xValue = numpy.random.randn(50)
yValue = numpy.random.randn(50)
plp.scatter(xValue, yValue)
plp.title("Scatter Plot")
plp.suptitle("Scatter Title")
plp.grid(True)
plp.xlabel("xLabel")
plp.ylabel("yLabel")
plp.show() |
#Bubble Sort
def bubble_sort(items):
'''Return array of items, sorted in ascending order'''
swapFlag = True
while swapFlag:
swapFlag= False
for i in range(len(items)-1):
if items[i] > items[i+1]:
items[i], items[i+1] = items[i+1], items[i]
swapFlag = True
return items
#Merge Sort
def merge(left, right):
"""Merge sort merging function."""
left_index, right_index = 0, 0
result = []
while left_index < len(left) and right_index < len(right):
if left[left_index] < right[right_index]:
result.append(left[left_index])
left_index += 1
else:
result.append(right[right_index])
right_index += 1
result += left[left_index:]
result += right[right_index:]
return result
def merge_sort(items):
'''Return array of items, sorted in ascending order'''
if len(items) <= 1: # base case
return items
# divide array in half and merge sort recursively
half = len(items) // 2
left = merge_sort(items[:half])
right = merge_sort(items[half:])
return merge(left, right)
#Quick Sort
def quick_sort(items):
'''Return array of items, sorted in ascending order'''
if len(items) == 1 or len(items) == 0:
return items
else:
pivot = items[0]
i = 0
for j in range(len(items)-1):
if items[j+1] < pivot:
items[j+1],items[i+1] = items[i+1], items[j+1]
i += 1
items[0],items[i] = items[i],items[0]
first_part = quick_sort(items[:i])
second_part = quick_sort(items[i+1:])
first_part.append(items[i])
return first_part + second_part
|
from django.db import models
# Create your models here.
class Entry (models.Model):
creation_date = models.DateField(('creation_date'), auto_now=False, auto_now_add=False, blank=False)
updated_date = models.DateTimeField(('updated_date'), auto_now=True, blank = True)
class FeelingOptions(models.TextChoices):
HAPPY = 'HA'
SAD = 'SA'
ANGRY = 'AN'
CONFIDENT = 'CO'
SICK = 'SI'
AMAZED = 'AM'
feeling = models.CharField('feeling', max_length=2, choices=FeelingOptions.choices)
description = models.TextField('description')
photo = models.ImageField(null=True)
time_stamp = models.DateTimeField(('time_stamp'), auto_now=False, auto_now_add=True, blank=False)
class Practice (models.Model):
text_part = models.TextField('text')
date_part = models.DateField(('date'), auto_now=False, auto_now_add=False) |
# KVM-based Discoverable Cloudlet (KD-Cloudlet)
# Copyright (c) 2015 Carnegie Mellon University.
# All Rights Reserved.
#
# THIS SOFTWARE IS PROVIDED "AS IS," WITH NO WARRANTIES WHATSOEVER. CARNEGIE MELLON UNIVERSITY EXPRESSLY DISCLAIMS TO THE FULLEST EXTENT PERMITTEDBY LAW ALL EXPRESS, IMPLIED, AND STATUTORY WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT OF PROPRIETARY RIGHTS.
#
# Released under a modified BSD license, please see license.txt for full terms.
# DM-0002138
#
# KD-Cloudlet includes and/or makes use of the following Third-Party Software subject to their own licenses:
# MiniMongo
# Copyright (c) 2010-2014, Steve Lacy
# All rights reserved. Released under BSD license.
# https://github.com/MiniMongo/minimongo/blob/master/LICENSE
#
# Bootstrap
# Copyright (c) 2011-2015 Twitter, Inc.
# Released under the MIT License
# https://github.com/twbs/bootstrap/blob/master/LICENSE
#
# jQuery JavaScript Library v1.11.0
# http://jquery.com/
# Includes Sizzle.js
# http://sizzlejs.com/
# Copyright 2005, 2014 jQuery Foundation, Inc. and other contributors
# Released under the MIT license
# http://jquery.org/license
__author__ = 'Sebastian'
from pycloud.pycloud.mongo import Model, ObjectID
import os
################################################################################################################
# Represents a user.
################################################################################################################
class User(Model):
# Meta class is needed so that minimongo can map this class onto the database.
class Meta:
collection = "users"
external = ['_id', 'name', 'hashed_pwd']
mapping = {
}
################################################################################################################
# Constructor.
################################################################################################################
def __init__(self, *args, **kwargs):
# self._id = None # Commented out make Mongo auto populate it
self.name = None
self.hashed_pwd = None
super(User, self).__init__(*args, **kwargs)
################################################################################################################
# Locate a user by its username
################################################################################################################
# noinspection PyBroadException
@staticmethod
def by_username(username=None):
try:
user = User.find_one({'username': username})
except:
return None
return user
|
from django.db import models
from django.urls import reverse
class Catalog(models.Model):
name = models.CharField(db_index=True, max_length=200, primary_key=True, verbose_name='Имя')
slug = models.SlugField(max_length=200, db_index=True)
seo_descr = models.TextField(blank=True, verbose_name='Описание', max_length=300)
code = 'catalog'
image = models.ImageField(upload_to = 'media/', blank=True, verbose_name='Preview')
available = models.BooleanField(default=True, verbose_name='Доступность')
class Meta:
ordering = ['name']
verbose_name = 'Каталог'
verbose_name_plural = 'Каталоги'
def __str__(self):
return self.slug
def get_absolute_url(self):
return reverse('mainapp:Choose', args=[self.code, self.slug])
class Subcat(models.Model):
name = models.CharField(db_index=True, max_length=200, primary_key=True, verbose_name='Имя')
slug = models.SlugField(max_length=200, db_index=True)
code = 'categories'
available = models.BooleanField(default=True, verbose_name='Доступность')
category = models.ForeignKey(Catalog, on_delete=models.CASCADE)
image = models.ImageField(upload_to = 'media/', blank=True, verbose_name='Изображение категории')
seo_descr = models.TextField(blank=True, verbose_name='Описание', max_length=200)
class Meta:
ordering = ['name']
verbose_name = 'Категория'
verbose_name_plural = 'Категории'
def __str__(self):
return self.slug
def get_absolute_url(self):
return reverse('mainapp:Choose', args=[self.code, self.slug])
class Product(models.Model):
name = models.CharField(max_length=200, db_index=True)
slug = models.SlugField(max_length=200, db_index=True)
category = models.ForeignKey(Subcat, on_delete=models.CASCADE)
available = models.BooleanField(default=True, verbose_name='Доступность')
code = 'product'
image = models.ImageField(upload_to = 'media/', blank=True, verbose_name='Изображение товара')
seo_descr = models.TextField(blank=True, verbose_name='Описание', max_length=200)
class Meta:
ordering = ['name']
verbose_name = 'Продукт'
verbose_name_plural = 'Продукты'
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('catalog:Choose', args=[self.code, self.slug]) |
preProduto = float(input('Digite o preço do produto:'))
desconto = (preProduto * 6)/100
print('O produto descontado 6% ficará com esse preço {:.2f}'.format(preProduto - desconto))
|
import sys
from PyQt5 import QtCore, QtWidgets, QtGui
from PyQt5.QtWidgets import (QApplication, QWidget, QLabel, QVBoxLayout, QLineEdit, QPlainTextEdit, QStyleFactory, QTableWidget,
QAbstractItemView, QTableWidgetItem, QGridLayout, QPushButton, QCheckBox, QComboBox, QHeaderView, QGridLayout,
QSpacerItem, QSizePolicy)
from PyQt5.QtCore import pyqtSlot, pyqtSignal, QThread, QWaitCondition, QMutex, Qt
from PyQt5.QtGui import QPalette, QPixmap, QBrush, QFont
import torch
import pandas as pd
import numpy as np
import time
import model
# import qtmodern.styles
# import random
class MyThread(QThread):
# 시그널 선언
change_value = pyqtSignal(object)
def __init__(self):
QThread.__init__(self)
self.cond = QWaitCondition()
self.mutex = QMutex()
self._status = False
self._attack = False
self._read_speed = 1000
self.consume = dict()
self.consume['stop'] = 'Stop!!'
def __del__(self):
self.wait()
# 추론 및 기록 시작
def run(self):
net = model.OneNet(self.packet_num)
# net.load_state_dict(torch.load('model_weight_%d.pth' % self.packet_num))
net.load_state_dict(torch.load('99.pth', map_location='cpu'))
net.to(self.device)
net.eval()
packet_state = torch.zeros(1, model.STATE_DIM).to(self.device)
inference_count = 0
accuracy = 0.0
normal_idx = 0
abnormal_idx = 0
te_no_load = np.load('./fuzzy_tensor_normal_numpy.npy')
te_ab_load = np.load('./fuzzy_tensor_abnormal_numpy.npy')
no_load = np.load('./fuzzy_normal_numpy.npy')
ab_load = np.load('./fuzzy_abnormal_numpy.npy')
while True:
self.mutex.lock()
if not self._status:
self.consume['type'] = 'end'
self.change_value.emit(self.consume)
self.cond.wait(self.mutex)
if not self._attack:
inputs = torch.from_numpy(te_no_load[normal_idx]).float()
labels = 1
else:
inputs = torch.from_numpy(te_ab_load[abnormal_idx]).float()
labels = 0
inputs = inputs.to(self.device)
with torch.no_grad():
time_temp = time.time()
outputs, packet_state = net(inputs, packet_state)
time_temp = time.time() - time_temp
packet_state = torch.autograd.Variable(packet_state, requires_grad=False)
_, preds = torch.max(outputs, 1)
inference_count += 1
# print(preds.item(), labels)
if preds.item() == labels:
self.consume['check'] = 'ok'
accuracy += 1.0
else:
self.consume['check'] = 'no'
accuracy += 0.0
self.consume['type'] = 'start'
self.consume['acc'] = accuracy / inference_count * 100.0
self.consume['time'] = round(time_temp, 6)
# 반복
if not self._attack:
self.consume['packet'] = no_load[normal_idx]
normal_idx += 1
if normal_idx == len(no_load):
normal_idx = 0
else:
self.consume['packet'] = ab_load[abnormal_idx]
abnormal_idx += 1
if abnormal_idx == len(ab_load):
abnormal_idx = 0
self.change_value.emit(self.consume)
self.msleep(self._read_speed) # QThread에서 제공하는 sleep
self.mutex.unlock()
def toggle_status(self):
self._status = not self._status
if self._status:
self.cond.wakeAll()
def toggle_attack(self):
self._attack = not self._attack
def parameter(self, packet_num, device):
self.packet_num = packet_num
self.device = device
def set_speed(self, value):
self._read_speed = int(value)
@property
def status(self):
return self._status
class MyApp(QWidget):
def __init__(self):
super().__init__()
self.prev_packet_num = 0
self.setupUI()
def setupUI(self):
self.setWindowTitle("Detection")
self.resize(740, 400)
# 메인 수평 레이아웃
self.main_horizontalLayout = QtWidgets.QHBoxLayout()
# 왼쪽 수직 레이아웃
self.left_verticalLayout = QtWidgets.QVBoxLayout()
# 패킷 보여줄 곳
self.scrollArea = QtWidgets.QScrollArea()
self.scrollArea.setWidgetResizable(True)
# self.packet_area = QPlainTextEdit()
# self.scrollArea.setWidget(self.packet_area)
# 테이블 시작
self.table = QTableWidget()
self.table.setSelectionMode(QAbstractItemView.SingleSelection)
# row, column 갯수 설정해야만 tablewidget 사용할수있다.
self.table.setColumnCount(10)
self.table.setRowCount(0)
# column header
self.table.setHorizontalHeaderLabels(["ID"])
self.table.horizontalHeaderItem(0).setTextAlignment(Qt.AlignCenter) # header 정렬 방식
self.table.setEditTriggers(QAbstractItemView.NoEditTriggers) # edit 금지 모드
self.table.setShowGrid(False) # grid line 숨기기
self.table.verticalHeader().setVisible(False) # row header 숨기기
# 테이블 끝
self.scrollArea.setWidget(self.table)
self.left_verticalLayout.addWidget(self.scrollArea)
#
# 정확도 보여줄 곳
self.accuracy_horizontalLayout = QtWidgets.QHBoxLayout()
self.accuracy_groupBox = QtWidgets.QGroupBox()
self.accuracy_groupBox.setTitle("Log")
self.accuracy_formLayout = QtWidgets.QGridLayout()
# self.accuracy_formLayout.setRowStretch(0, 1)
# self.accuracy_formLayout.setRowStretch(2, 1)
# self.accuracy_formLayout.setRowStretch(4, 1)
self.now_accuracy = QLabel("?")
self.accuracy_formLayout.addWidget(QLabel("Accuracy:"), 0, 0)
self.accuracy_formLayout.addWidget(self.now_accuracy, 0, 1)
self.now_inference_time = QLabel("?")
self.accuracy_formLayout.addWidget(QLabel("Inference Time:"), 1, 0)
self.accuracy_formLayout.addWidget(self.now_inference_time, 1, 1)
self.accuracy_formLayout.setAlignment(Qt.AlignLeft)
self.accuracy_groupBox.setLayout(self.accuracy_formLayout)
self.accuracy_horizontalLayout.addWidget(self.accuracy_groupBox)
self.left_verticalLayout.addLayout(self.accuracy_horizontalLayout)
self.left_verticalLayout.setStretchFactor(self.scrollArea, 3)
self.left_verticalLayout.setStretchFactor(self.accuracy_horizontalLayout, 1)
#
# 왼쪽 끝
self.main_horizontalLayout.addLayout(self.left_verticalLayout)
# 오른쪽 시작
# 오른쪽 수직 레이아웃
self.right_verticalLayout = QtWidgets.QVBoxLayout()
# 읽을 패킷 숫자
self.parameter_groupBox = QtWidgets.QGroupBox()
self.parameter_groupBox.setTitle("Parameter")
# group 박스 안에 grid
self.parameter_formLayout = QtWidgets.QGridLayout()
self.packet_num_line = QLineEdit()
self.parameter_formLayout.addWidget(QLabel("Packet num:"), 0, 0)
self.parameter_formLayout.addWidget(self.packet_num_line, 0, 1)
self.parameter_formLayout.addWidget(QLabel("(1 ~ 1)"), 1, 0)
self.parameter_formLayout.addWidget(QLabel(""), 2, 0) # grid spacing ...?
# csv 읽는 속도 선택용
self.time_combo = QComboBox()
self.time_combo.addItems(["0.25s", "0.5s", "1.0s", "0.1s"])
self.parameter_formLayout.addWidget(QLabel("Packet read speed:"), 3, 0)
self.parameter_formLayout.addWidget(self.time_combo, 3, 1)
# 버튼
self.start_pushButton = QtWidgets.QPushButton("Start")
self.start_pushButton.setCheckable(True)
self.start_pushButton.toggled.connect(self.start_toggle)
self.attack_pushButton = QtWidgets.QPushButton("Attack")
self.attack_pushButton.setCheckable(True)
self.attack_pushButton.toggled.connect(self.attack_toggle)
self.parameter_formLayout.addWidget(QLabel(""), 4, 0) # grid spacing ...?
self.parameter_formLayout.addWidget(QLabel(""), 5, 0) # grid spacing ...?
# self.parameter_formLayout.setRowStretch(4, 1)
# self.parameter_formLayout.setRowStretch(2, 1)
# vspacer = QtGui.QSpacerItem(
# QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
# layout.addItem(vspacer, last_row, 0, 1, -1)
# hspacer = QtGui.QSpacerItem(
# QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
# layout.addItem(hspacer, 0, last_column, -1, 1)
self.parameter_formLayout.addWidget(self.start_pushButton, 6, 0)
self.parameter_formLayout.addWidget(self.attack_pushButton, 6, 1)
self.parameter_formLayout.setRowStretch(7, 1)
# self.parameter_formLayout.setVerticalSpacing(50)
# self.parameter_formLayout.setContentsMargins(5, 5, 5, 5) # left, top, right, bottom
self.parameter_groupBox.setLayout(self.parameter_formLayout)
self.right_verticalLayout.addWidget(self.parameter_groupBox)
self.main_horizontalLayout.addLayout(self.right_verticalLayout)
self.main_horizontalLayout.setStretchFactor(self.left_verticalLayout, 2)
self.main_horizontalLayout.setStretchFactor(self.right_verticalLayout, 1)
# 오른쪽 끝
self.setLayout(self.main_horizontalLayout)
self.table.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)
self.show()
def start_demo(self):
# 입력 확인
packet_num = self.packet_num_line.text()
if packet_num == '':
print("Empty Value Not Allowed")
self.packet_num_line.setFocus()
return
packet_num = int(packet_num)
if packet_num < 1 or packet_num > 1:
print("too many packet")
self.packet_num_line.setFocus()
return
else:
self.packet_num_line.clearFocus()
# 초기화
self.add_spanRow_text('Start!! Please wait')
# 읽는 패킷이 달라졌음, 새로 시작
if self.prev_packet_num != packet_num:
self.prev_packet_num = packet_num
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.predict_thread = MyThread()
self.predict_thread.parameter(packet_num, device)
self.predict_thread.change_value.connect(self.update_line_edit)
self.predict_thread.start()
csv_read_speed = float(self.time_combo.currentText()[:-1])
self.predict_thread.set_speed(csv_read_speed * 1000)
def update_line_edit(self, consume):
if consume['type'] == 'start':
self.now_accuracy.setText(str(consume['acc']))
self.now_inference_time.setText(str(consume['time']))
if consume['check'] == 'ok': # 맞춤
color = QtGui.QColor(150, 255, 150) # Red, Green, Blue, Alpha
else:
color = QtGui.QColor(255, 150, 150)
next_row = self.table.rowCount()
self.table.insertRow(next_row) # row 추가
col_idx = 0
for consume_packet in consume['packet']:
self.table.setItem(next_row, col_idx, QTableWidgetItem(str(consume_packet)))
self.table.item(next_row, col_idx).setBackground(color)
col_idx += 1
self.table.scrollToBottom()
else:
self.add_spanRow_text(consume['stop'])
def add_row_text(self, text):
next_row = self.table.rowCount()
self.table.insertRow(next_row) # row 추가
self.table.setItem(next_row, 0, QTableWidgetItem(text))
self.table.scrollToBottom()
def add_spanRow_text(self, text):
next_row = self.table.rowCount()
self.table.insertRow(next_row) # row 추가
self.table.setSpan(next_row, 0, 1, 10) # 1 x 10 크기의 span 생성
self.table.setItem(next_row, 0, QTableWidgetItem(text))
self.table.scrollToBottom()
@pyqtSlot(bool)
def start_toggle(self, state):
# self.start_pushButton.setStyleSheet("background-color: %s" % ({True: "green", False: "red"}[state]))
self.start_pushButton.setText({True: "Stop", False: "Start"}[state])
self.packet_num_line.setEnabled({True: False, False: True}[state])
if state:
self.start_demo()
else:
# self.packet_area.appendPlainText('Trying to stop..')
self.add_spanRow_text('Trying to stop..')
self.predict_thread.toggle_status()
@pyqtSlot(bool)
def attack_toggle(self, state):
# self.attack_pushButton.setStyleSheet("background-color: %s" % ({True: "green", False: "red"}[state]))
self.attack_pushButton.setText({True: "Stop", False: "Attack"}[state])
self.predict_thread.toggle_attack()
if __name__ == "__main__":
app = QApplication(sys.argv)
# qtmodern.styles.light(app)
# app.setStyle(QStyleFactory.create('Fusion'))
ex = MyApp()
sys.exit(app.exec_())
|
# Generated by Django 2.2.4 on 2019-09-06 14:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('webapp', '0002_auto_20190906_1959'),
]
operations = [
migrations.AlterField(
model_name='bsc_chem',
name='name',
field=models.CharField(max_length=40),
),
migrations.AlterField(
model_name='bsc_it',
name='name',
field=models.CharField(max_length=40),
),
migrations.AlterField(
model_name='bsc_maths',
name='name',
field=models.CharField(max_length=24),
),
migrations.AlterField(
model_name='bsc_phys',
name='name',
field=models.CharField(max_length=40),
),
migrations.AlterField(
model_name='civil',
name='name',
field=models.CharField(max_length=40),
),
migrations.AlterField(
model_name='etc',
name='name',
field=models.CharField(max_length=40),
),
migrations.AlterField(
model_name='mecha',
name='name',
field=models.CharField(max_length=24),
),
migrations.AlterField(
model_name='msc_chem',
name='name',
field=models.CharField(max_length=40),
),
migrations.AlterField(
model_name='msc_maths',
name='name',
field=models.CharField(max_length=40),
),
migrations.AlterField(
model_name='msc_phys',
name='name',
field=models.CharField(max_length=40),
),
migrations.AlterField(
model_name='power',
name='name',
field=models.CharField(max_length=40),
),
]
|
#!/usr/bin/env python
''' encoding: utf-8
Copyright 2011 Red Hat, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Test that building images w/ the various parameters and templates using aeolus-cli tool
Created by koca (mkoci@redhat.com)
Date: 09/12/2011
Issue: https://tcms.engineering.redhat.com/case/122786/?from_plan=4953
return values:
0 - OK: everything OK
1 - Fail: setupTest wasn't OK
2 - Fail: bodyTest wasn't OK
3 - Fail: cleanTest wasn't OK
4 - Fail: any other error (reserved value)
'''
#necessary libraries
import os
import sys
import subprocess
import oauth2 as oauth
import httplib2
import json
import re
import time
import shutil
from syck import *
configuration = load(file("configuration.yaml", 'r').read())
#constants
SUCCESS=0
FAILED=1
RET_SETUPTEST=1
RET_BODYTEST=2
RET_CLEANTEST=3
RET_UNEXPECTED_ERROR=4
ROOTID=0
TIMEOUT=180
MINUTE=60
#setup
LogFileIF=configuration["LogFileIF"]
LogFileIWH=configuration["LogFileIWH"]
# Define a list to collect all tests
alltests = list()
results = list()
#dirty information setup
consumer = oauth.Consumer(key='key', secret='secret')
#method
sig_method = oauth.SignatureMethod_HMAC_SHA1()
'''mandatory information'''
params = {'oauth_version':"0.4.4",
'oauth_nonce':oauth.generate_nonce(),
'oauth_timestamp':oauth.generate_timestamp(),
'oauth_signature_method':sig_method.name,
'oauth_consumer_key':consumer.key}
url_https="https://localhost:8075/imagefactory/builders/"
temporaryfile = "deleteme_build_image"
tmplogfileIF = "deletemeBuildImage.log"
templatesetupvar = ["""<packages>
<package name='httpd'/>
<package name='php'/>
</packages>
""", """<files>
<file name='/var/www/html/index.html' type='raw'>
Aeolus Cloud Test page on Build Created
</file>
</files>""", """"""]
architectures=configuration['architectures']
#["i386", "x86_64"]
installtypes=configuration["installtypes"]
#["url", "iso"]
targetimages=configuration["targetimages"]
#["ec2", "rhevm", "mock", "vsphere"]
VSPHEREbugFile=configuration["VSPHEREbugFile"]
VSPHEREconfigureFile=configuration["VSPHEREconfigureFile"]
VSPHEREBackupFile=configuration["VSPHEREBackupFile"]
RHEVMbugFile=configuration["RHEVMbugFile"]
RHEVMconfigureFile=configuration["RHEVMconfigureFile"]
RHEVMBackupFile=configuration["RHEVMBackupFile"]
#distribution to build in imagefactory and aeolus-image
distros={"RHEL-6":{"2":["http://download.englab.brq.redhat.com/released/RHEL-6/6.2/Server/", "/iso/RHEL6.2-20111117.0-Server-", "-DVD1.iso"],
"1":["http://download.englab.brq.redhat.com/released/RHEL-6/6.1/Server/", "/iso/RHEL6.1-20110510.1-Server-", "-DVD1.iso"]},
"Fedora":{"15":["http://download.englab.brq.redhat.com/released/F-15/GOLD/Fedora/", "/iso/Fedora-15-", "-DVD.iso"],
"16":["http://download.englab.brq.redhat.com/released/F-16/GOLD/Fedora/", "/iso/Fedora-16-", "-DVD.iso"]}}
# Define an object to record test results
class TestResult(object):
def __init__(self, *args, **kwargs):
if len(args) == 7:
(self.distro, self.version, self.arch, self.installtype, self.isourlstr, self.targetim, self.templatesetup) = args
for k,v in kwargs.items():
setattr(self, k, v)
def __repr__(self):
'''String representation of object'''
return "test-{0}-{1}-{2}-{3}-{5}-{4}-additional template:{6}".format(*self.test_args())
@property
def name(self):
'''Convenience property for test name'''
return self.__repr__()
def test_args(self):
return (self.distro, self.version, self.arch, self.installtype, self.isourlstr, self.targetim, self.templatesetup)
#main function to execute the test
def execute(self):
if self.expect_pass:
if self.methodCLI:
return (self.name, self.__runTestAeolusCLI(self.test_args()), "#aeolus-cli")
else:
return (self.name, self.__runTestImageFactory(self.test_args()), "#imagefactory")
else:
if self.methodCLI:
return (self.name, self.handle_exception(self.test_args()), "#aeolus-cli")
else:
return (self.name, self.__runTestImageFactory(self.test_args()), "#imagefactory")
def __getTemplate(self, *args):
(distro, version, arch, installtype, isourlstr, targetim, templatesetup) = args
if installtype == "url":
repositorystr = isourlstr
else:
repositorystr = isourlstr + "/../../os/"
print "Testing %s-%s-%s-%s-%s-%s..." % (distro, version, arch, installtype, targetim, isourlstr),
tdlxml = """
<template>
<name>tester</name>
<os>
<name>%s</name>
<version>%s</version>
<arch>%s</arch>
<install type='%s'>
<%s>%s</%s>
</install>
<rootpw>redhat</rootpw>
</os>
<repositories>
<repository name='koca-repository'>
<url>%s</url>
</repository>
</repositories>
%s
</template>
""" % (distro, version, arch, installtype, installtype, isourlstr, installtype, repositorystr, templatesetup)
return tdlxml
def __runTestImageFactory(self, args):
global temporaryfile
global tmplogfileIF
(distro, version, arch, installtype, isourlstr, targetim, templatesetup) = args
#lets clean the logs so there is no obsolete records in it.
print "Clearing log file for Image Factory"
os.system("> " + LogFileIF)
print "Clearing log file for Image Warehouse"
os.system("> " + LogFileIWH)
tdlxml = self.__getTemplate(distro, version, arch, installtype, isourlstr, targetim, templatesetup)
os.system("echo \""+tdlxml+"\" > "+temporaryfile)
print "See the testing template"
print "======================================================"
outputtmp = os.popen("cat "+temporaryfile).read()
print outputtmp
CrazyCommand = "imagefactory --debug --target %s --template " % targetim + temporaryfile + " |& tee " + tmplogfileIF
try:
print CrazyCommand
retcode = os.popen(CrazyCommand).read()
print "output is :"
print retcode
except subprocess.CalledProcessError, e:
print >>sys.stderr, "Execution failed:", e
return False
print "Checking if there is any error in the log of image factory"
if os.system("grep -i \"FAILED\\|Error\" " + tmplogfileIF) == SUCCESS:
print "Found FAILED or error message in log file:"
outputtmp = os.popen("grep -i \"FAILED\\|Error\" " + tmplogfileIF).read()
print outputtmp
print "See the output from log file " + LogFileIF + ":"
print "======================================================"
outputtmp = os.popen("cat " + LogFileIF).read()
print outputtmp
print "See the output from log file " + LogFileIWH + ":"
print "======================================================"
outputtmp = os.popen("cat " + LogFileIWH).read()
print outputtmp
print "Test FAILED =============================================================="
return False
if os.system("grep -i \"COMPLETE\" " + tmplogfileIF) != SUCCESS:
print "Build is not completed for some reason! It looks it stuck in the NEW status."
print "Perhaps you can find something in the log file " + tmplogfileIF + ":"
print "======================================================"
outputtmp = os.popen("cat " + tmplogfileIF).read()
print outputtmp
print "See the output from log file " + LogFileIF + " too:"
print "======================================================"
outputtmp = os.popen("cat " + LogFileIF).read()
print outputtmp
print "Test FAILED =============================================================="
return False
return True
def __runTestAeolusCLI(self, args):
global temporaryfile
(distro, version, arch, installtype, isourlstr, targetim, templatesetup) = args
#lets clean the logs so there is no obsolete records in it.
print "Clearing log file for Image Factory"
os.system("> " + LogFileIF)
print "Clearing log file for Image Warehouse"
os.system("> " + LogFileIWH)
tdlxml = self.__getTemplate(distro, version, arch, installtype, isourlstr, targetim, templatesetup)
os.system("echo \""+tdlxml+"\" > "+temporaryfile)
print "See the testing template"
print "======================================================"
outputtmp = os.popen("cat "+temporaryfile).read()
print outputtmp
CrazyCommand = "aeolus-image build --target %s --template " % targetim + temporaryfile
target_image = ""
try:
print CrazyCommand
retcode = os.popen(CrazyCommand).read()
print "output is :"
print retcode
#get target image BEGIN
tempvar = re.search(r'.*\n.*\n([a-zA-Z0-9\-]*).*',retcode,re.I)
if tempvar == None:
print "An unknown error occurred. I'm not able to get target image ID. Check the log file out:"
print "======================================================"
outputtmp = os.popen("cat " + LogFileIF).read()
print outputtmp
print "Test FAILED =============================================================="
return False
else:
target_image = tempvar.group(1)
#get target image END
except subprocess.CalledProcessError, e:
print >>sys.stderr, "Execution failed:", e
return False
#setup counter to do not wait longer then 1 hour
print "Wait until build process is done"
Counter=0
print "Let\'s check this image: " + target_image
data = json.loads(self.__helpTest(target_image))
print "Data Status: " + data['status']
#while os.system("aeolus-cli status --targetimage " + timage + "|grep -i building") == SUCCESS:
while data['status'] == "BUILDING":
Counter=Counter+1
#wait a minute
time.sleep(MINUTE)
data = json.loads(self.__helpTest(target_image))
print "Data Status: " + data['status']
#after an hour break the
if Counter > TIMEOUT:
print "Error: timeout over "+str(TIMEOUT)+" minutes !"
print "Test FAILED =============================================================="
return False
print "Checking if there is any error in the log of image factory"
if os.system("grep -i \"FAILED\\|Error\" " + LogFileIF) == SUCCESS:
print "Found FAILED or error message in log file:"
outputtmp = os.popen("grep -i \"FAILED\\|Error\" " + LogFileIF).read()
print outputtmp
print "See the output from log file " + LogFileIF + ":"
print "======================================================"
outputtmp = os.popen("cat " + LogFileIF).read()
print outputtmp
print "See the output from log file " + LogFileIWH + ":"
print "======================================================"
outputtmp = os.popen("cat " + LogFileIWH).read()
print outputtmp
print "Test FAILED =============================================================="
return False
#check if status is either complete or building
print "Let\'s check this image: " + target_image
data = json.loads(self.__helpTest(target_image))
print "Data Status for image "+target_image+": " + data['status']
if data['status'] == "FAILED":
print "Build "+target_image+" is not completed for some reason! It looks it stuck in the NEW status."
print "Perhaps you can find something in the log file " + LogFileIF + ":"
print "======================================================"
outputtmp = os.popen("cat " + LogFileIF).read()
print outputtmp
print "See the output from log file " + LogFileIWH + " too:"
print "======================================================"
outputtmp = os.popen("cat " + LogFileIWH).read()
print outputtmp
print "Test FAILED =============================================================="
return False
Counter=0
while data['status'] in ("BUILDING", "New"):
Counter=Counter+1
#wait a minute
time.sleep(MINUTE)
data = json.loads(self.__helpTest(target_image))
print "Data Status: " + data['status']
if data['status'] == "New":
#let's speed up a little bit process as New state looks like stuck
Counter = Counter+12
#after an hour break the
if Counter > TIMEOUT:
print "Error: timeout over "+str(TIMEOUT)+" minutes !"
print "Test FAILED =============================================================="
return False
return True
def handle_exception(self, args):
try:
self.getTemplateRunTest(args)
except:
print "(Un)expected error:", sys.exc_info()[0]
raise
#this functions suppose to be as a help function to do not write one code multiple times
def __helpTest(self, imageTest):
url = url_https + imageTest
req = oauth.Request(method='GET', url=url, parameters=params)
sig = sig_method.sign(req, consumer, None)
req['oauth_signature'] = sig
r, c = httplib2.Http().request(url, 'GET', None, headers=req.to_header())
response = 'Response headers: %s\nContent: %s' % (r,c)
print response
return c
def expectSuccess(*args):
'''Create a TestResult object using provided arguments. Append result to global 'alltests' list.'''
global alltests
'''Run build via imagefactory command'''
alltests.append(TestResult(*args, expect_pass=True, methodCLI=False))
'''Run build via aeolus-cli command'''
alltests.append(TestResult(*args, expect_pass=True, methodCLI=True))
def expectFail(*args):
'''Create a TestResult object using provided arguments. Append result to
global 'alltests' list.'''
global alltests
alltests.append(TestResult(*args, expect_pass=False, methodCLI=True))
def setupTest():
print "=============================================="
print "Setup of the sanity test based on 122786 test case from Image Factory test plan"
print "See test plan: https://tcms.engineering.redhat.com/case/122786/?from_plan=4953"
print "Checking if you have enough permission..."
if os.geteuid() != ROOTID:
print "You must have root permissions to run this script, I'm sorry buddy"
return False #exit the test
#run the cleanup configuration
print "Cleanup configuration...."
if os.system("aeolus-cleanup") != SUCCESS:
print "Some error raised in aeolus-cleanup !"
#first backup old rhvm file
print "Backup old rhevm configuration file"
if os.path.isfile(RHEVMconfigureFile):
shutil.copyfile(RHEVMconfigureFile, RHEVMBackupFile)
#then copy the conf. file
print "Copy rhevm configuration file to /etc/aeolus-configure/nodes/rhevm_configure"
if os.path.isfile(RHEVMbugFile):
shutil.copyfile(RHEVMbugFile, RHEVMconfigureFile)
else:
print RHEVMbugFile + " didn't find!"
return False
#first backup old vsphere file
print "Backup old vsphere configuration file"
if os.path.isfile(VSPHEREconfigureFile):
shutil.copyfile(VSPHEREconfigureFile, VSPHEREBackupFile)
#then copy the conf. file
print "Copy rhevm configuration file to /etc/aeolus-configure/nodes/vsphere_configure"
if os.path.isfile(VSPHEREbugFile):
shutil.copyfile(VSPHEREbugFile, VSPHEREconfigureFile)
else:
print VSPHEREbugFile + " didn't find!"
return False
print "running aeolus-configure -p ec2,vsphere,rhevm,mock"
if os.system("aeolus-configure -p ec2,vsphere,rhevm,mock") != SUCCESS:
print "Some error raised in aeolus-configure !"
return False
print "Clearing log file for Image Factory"
os.system("> " + LogFileIF)
print "Clearing log file for Image Warehouse"
os.system("> " + LogFileIWH)
return True
#body of the test
def bodyTest():
global templatesetupvar
print "=============================================="
print "test being started"
for templatesetup in templatesetupvar:
for targetimage in targetimages:
for arch in architectures:
for installtype in installtypes:
for distro_p, distro in distros.iteritems():
for os_distro_p, os_distro in distro.iteritems():
if installtype == "url":
isourlstrvar = "%s%s/os/" % (os_distro[0], arch)
else:
isourlstrvar = "%s%s%s%s%s" % (os_distro[0], arch, os_distro[1], arch, os_distro[2])
expectSuccess(distro_p, os_distro_p, arch, installtype, isourlstrvar , targetimage, templatesetup)
for onetest in alltests:
results.append(onetest.execute())
print "==================================================================================================================================="
returnvalue = True
for result in results:
if result[1] == False:
returnvalue = False
print "FAILED ...."+result[2]+": "+result[0]
else:
print "Passed ...."+result[2]+": "+result[0]
print "==================================================================================================================================="
return returnvalue
#cleanup after test
def cleanTest():
global temporaryfile
global tmplogfileIF
print "============================================== Cleaning the mess after test =============================================="
print "Removing temporary files"
if os.path.isfile(temporaryfile):
os.remove(temporaryfile)
if os.path.isfile(tmplogfileIF):
os.remove(tmplogfileIF)
if os.path.isfile(RHEVMBackupFile):
#copy file back rhevm
shutil.copyfile(RHEVMBackupFile, RHEVMconfigureFile)
if os.path.isfile(VSPHEREBackupFile):
#copy file back VSPHERE
shutil.copyfile(VSPHEREBackupFile, VSPHEREconfigureFile)
return True
#future TODO: maybe delete all iso's and images beneath directories /var/lib/imagefactory/images/ and /var/lib/oz/isos/
#TODO: need to create correct cleanup
#execute the tests and return value (can be saved as a draft for future tests)
if setupTest():
if bodyTest():
if cleanTest():
print "=============================================="
print "Test PASSED entirely !"
sys.exit(SUCCESS)
else:
print "=============================================="
print "Although Test was successful, cleaning after test wasn't successful !"
sys.exit(RET_CLEANTEST)
else:
print "=============================================="
print "Test Failed !"
if not cleanTest():
print "Even cleaning after body test wasn't sort of successful !"
sys.exit(RET_BODYTEST)
else:
print "=============================================="
print "Test setup wasn't successful ! Test didn't even proceed !"
cleanTest()
sys.exit(RET_SETUPTEST)
|
from PIL import Image
from skimage import color
from skimage.feature import hog
import collections
import datetime
import numpy as np
import pytest
from itertools import product
from pelops.features.hog import HOGFeatureProducer
def hog_features(img):
img = color.rgb2gray(np.array(img))
features = hog(img, orientations=8, pixels_per_cell=(14, 14), cells_per_block=(16, 16))
return features
def hist_features(img):
MAX_CHANNELS = 3
BINS = 256
channels = img.split()
# Remove alpha channels
if len(channels) > MAX_CHANNELS:
channels = channel[:MAX_CHANNELS]
# Calculate features
hist_features = np.zeros(MAX_CHANNELS * BINS)
for i, channel in enumerate(channels):
channel_array = np.array(channel)
values, _ = np.histogram(channel_array.flat, bins=BINS)
start = i * BINS
end = (i+1) * BINS
hist_features[start:end] = values
return hist_features
@pytest.fixture(scope="module")
def img_data():
data = {
"DATA_1":{},
"DATA_3":{},
"DATA_4":{},
}
# Raw data
data["DATA_1"]["array"] = np.array([
[[ 0, 0, 0],
[255, 255, 255],
[ 0, 0, 0]],
], dtype=np.uint8)
data["DATA_3"]["array"] = np.array([
[[ 0, 0, 0],
[255, 255, 255],
[ 0, 0, 0]],
[[255, 255, 255],
[ 0, 0, 0],
[255, 255, 255]],
[[ 0, 0, 0],
[255, 255, 255],
[ 0, 0, 0]],
], dtype=np.uint8)
data["DATA_4"]["array"] = np.array([
[[ 0, 0, 0],
[255, 255, 255],
[ 0, 0, 0]],
[[255, 255, 255],
[ 0, 0, 0],
[255, 255, 255]],
[[ 0, 0, 0],
[255, 255, 255],
[ 0, 0, 0]],
[[ 0, 0, 0],
[ 0, 0, 0],
[ 0, 0, 0]],
], dtype=np.uint8)
# PIL images
for data_id in data:
arr = data[data_id]["array"]
img = Image.fromarray(arr)
img = img.convert("RGB")
img = img.resize((224, 224), Image.BICUBIC)
data[data_id]["image"] = img
# Calculate HOG features
for data_id in data:
img = data[data_id]["image"]
hog = hog_features(img)
data[data_id]["hog_features"] = hog
# Calculate Histogram features
for data_id in data:
img = data[data_id]["image"]
hist = hist_features(img)
data[data_id]["hist_features"] = hist
return data
@pytest.fixture
def chip_producer(img_data):
Chip = collections.namedtuple("Chip", ["filepath", "car_id", "cam_id", "time", "img_data", "misc"])
CHIPS = []
for i, data_id in enumerate(img_data):
data = img_data[data_id]
arr = data["array"]
# We use the data_id as the filepath since we do not actually open the
# file and it only needs to be unique
#
# filepath, car_id, cam_id, time, img_data, misc
chip = (data_id, i, 1, datetime.datetime(2016, 10, 1, 0, 1, 2, microsecond=100+i), arr, {})
CHIPS.append(chip)
chip_producer = {"chips": {}}
for filepath, car_id, cam_id, time, data, misc in CHIPS:
chip = Chip(filepath, car_id, cam_id, time, data, misc)
chip_producer["chips"][filepath] = chip
return chip_producer
@pytest.fixture
def feature_producer(chip_producer):
hog = HOGFeatureProducer(chip_producer)
return hog
def test_features(feature_producer, chip_producer, img_data):
fp = feature_producer
for _, chip in chip_producer["chips"].items():
data_id = chip.filepath
data = img_data[data_id]
hog_features = data["hog_features"]
hist_features = data["hist_features"]
hog_len = len(hog_features)
hist_len = len(hist_features)
features = feature_producer.produce_features(chip)
assert len(features) == hog_len + hist_len
total_features = np.concatenate((hog_features, hist_features))
assert np.array_equal(features, total_features)
def test_inputs(chip_producer):
pix_sizes = (32, 64, 128, 256, 512)
cell_counts = (1, 2, 4, 16)
orientation_counts = (2, 4, 8, 16)
histogram_bins = (32, 64, 128, 256)
for pix, cell, orientation, histogram_bin in product(pix_sizes, cell_counts, orientation_counts, histogram_bins):
hog = HOGFeatureProducer(
chip_producer,
image_size=(pix, pix),
cells=(cell, cell),
orientations=orientation,
histogram_bins_per_channel=histogram_bin,
)
for _, chip in chip_producer["chips"].items():
features = hog.produce_features(chip)
assert len(features) == ((cell**2) * orientation) + (3 * histogram_bin)
|
def longestCommonPrefix(strs):
"""
Takes in a list of strings and returns the longest string all of them have in common
"""
first_character = strs[0][0]
output, compare_to = "", ""
for string in strs:
compare_to = string[0]
if compare_to == first_character:
return first_character + longestCommonPrefix([x[1:] for x in strs])
return ""
print(longestCommonPrefix(["twelve", "radio", "nine"])) # ""
print(longestCommonPrefix(["phone", "apple", "google"])) # ""
print(longestCommonPrefix(["chanel", "channel", "chandler"])) # ""
print(longestCommonPrefix(["flower", "fly", "flew"])) # "fl"
|
import os
import sys
ip = sys.argv[1]
username = sys.argv[2]
passwd = sys.argv[3]
remote_dir = sys.argv[4]
acct_id = sys.argv[5]
os.system('sshpass -p '+passwd + ' scp -o StrictHostKeyChecking=no ' +'/root/accounts/account_'+acct_id+'/data_extracted/retraining_data.csv '+ username+'@'+ip+':'+remote_dir)
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import pytest
from pants.backend.python.subsystems.debugpy import DebugPy
@pytest.fixture(autouse=True)
def debugpy_dont_wait_for_client(monkeypatch):
old_debugpy_get_args = DebugPy.get_args
def get_debugpy_args_but_dont_wait_for_client(*args, **kwargs):
result = list(old_debugpy_get_args(*args, **kwargs))
result.remove("--wait-for-client")
return tuple(result)
monkeypatch.setattr(DebugPy, "get_args", get_debugpy_args_but_dont_wait_for_client)
|
def noonerize(numbers):
output = '{}{}'.format
if any(not isinstance(a, int) for a in numbers):
return 'invalid array'
b, c = (str(d) for d in numbers)
return abs(int(output(c[0], b[1:])) - int(output(b[0], c[1:])))
|
import torch
import torch.nn as nn
from distance.chamfer_distance import ChamferDistanceFunction
from distance.emd_module import emdFunction
class ChamferDistance(nn.Module):
def __init__(self):
super(ChamferDistance, self).__init__()
def forward(self, pcs1, pcs2):
"""
Args:
xyz1: tensor with size of (B, N, 3)
xyz2: tensor with size of (B, M, 3)
"""
dist1, dist2 = ChamferDistanceFunction.apply(pcs1, pcs2) # (B, N), (B, M)
dist1 = torch.mean(torch.sqrt(dist1))
dist2 = torch.mean(torch.sqrt(dist2))
return (dist1 + dist2) / 2
class EarthMoverDistance(nn.Module):
def __init__(self, eps=0.005, max_iter=3000):
super(EarthMoverDistance, self).__init__()
self.eps = eps
self.max_iter = max_iter
def forward(self, pcs1, pcs2):
dist, _ = emdFunction.apply(pcs1, pcs2, self.eps, self.max_iter)
return torch.sqrt(dist).mean()
if __name__ == '__main__':
from utils import setup_seed
setup_seed(20)
pcs1 = torch.rand(10, 1024, 3)
pcs2 = torch.rand(10, 1024, 3)
cd_loss = ChamferDistance()
print(cd_loss(pcs1, pcs2))
emd_loss = EarthMoverDistance()
print(emd_loss(pcs1, pcs2))
|
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from six import StringIO
from unittest import TestCase, main
from datetime import datetime
import numpy as np
import numpy.testing as npt
import pandas as pd
from pandas.util.testing import assert_frame_equal
import qiita_db as qdb
class TestUtil(TestCase):
"""Tests some utility functions on the metadata_template module"""
def setUp(self):
metadata_dict = {
'Sample1': {'int_col': 1, 'float_col': 2.1, 'str_col': 'str1'},
'Sample2': {'int_col': 2, 'float_col': 3.1, 'str_col': '200'},
'Sample3': {'int_col': 3, 'float_col': 3, 'str_col': 'string30'},
}
self.metadata_map = pd.DataFrame.from_dict(metadata_dict,
orient='index')
self.headers = ['float_col', 'str_col', 'int_col']
def test_type_lookup(self):
"""Correctly returns the SQL datatype of the passed dtype"""
self.assertEqual(qdb.metadata_template.util.type_lookup(
self.metadata_map['float_col'].dtype), 'float8')
self.assertEqual(qdb.metadata_template.util.type_lookup(
self.metadata_map['int_col'].dtype), 'integer')
self.assertEqual(qdb.metadata_template.util.type_lookup(
self.metadata_map['str_col'].dtype), 'varchar')
def test_get_datatypes(self):
"""Correctly returns the data types of each column"""
obs = qdb.metadata_template.util.get_datatypes(
self.metadata_map.ix[:, self.headers])
exp = ['float8', 'varchar', 'integer']
self.assertEqual(obs, exp)
def test_cast_to_python(self):
"""Correctly returns the value casted"""
b = np.bool_(True)
obs = qdb.metadata_template.util.cast_to_python(b)
self.assertTrue(obs)
self.assertFalse(isinstance(obs, np.bool_))
self.assertTrue(isinstance(obs, bool))
exp = datetime(2015, 9, 1, 10, 00)
dt = np.datetime64(exp)
obs = qdb.metadata_template.util.cast_to_python(dt)
self.assertEqual(obs, exp)
self.assertFalse(isinstance(obs, np.datetime64))
self.assertTrue(isinstance(obs, datetime))
def test_as_python_types(self):
"""Correctly returns the columns as python types"""
obs = qdb.metadata_template.util.as_python_types(
self.metadata_map, self.headers)
exp = [[2.1, 3.1, 3],
['str1', '200', 'string30'],
[1, 2, 3]]
self.assertEqual(obs, exp)
def test_prefix_sample_names_with_id(self):
exp_metadata_dict = {
'1.Sample1': {'int_col': 1, 'float_col': 2.1, 'str_col': 'str1'},
'1.Sample2': {'int_col': 2, 'float_col': 3.1, 'str_col': '200'},
'1.Sample3': {'int_col': 3, 'float_col': 3, 'str_col': 'string30'},
}
exp_df = pd.DataFrame.from_dict(exp_metadata_dict, orient='index')
qdb.metadata_template.util.prefix_sample_names_with_id(
self.metadata_map, 1)
self.metadata_map.sort_index(inplace=True)
exp_df.sort_index(inplace=True)
assert_frame_equal(self.metadata_map, exp_df)
def test_load_template_to_dataframe(self):
obs = qdb.metadata_template.util.load_template_to_dataframe(
StringIO(EXP_SAMPLE_TEMPLATE))
exp = pd.DataFrame.from_dict(SAMPLE_TEMPLATE_DICT_FORM)
exp.index.name = 'sample_name'
assert_frame_equal(obs, exp)
def test_load_template_to_dataframe_qiime_map(self):
obs = qdb.metadata_template.util.load_template_to_dataframe(
StringIO(QIIME_TUTORIAL_MAP_SUBSET), index='#SampleID')
exp = pd.DataFrame.from_dict(QIIME_TUTORIAL_MAP_DICT_FORM)
exp.index.name = 'SampleID'
obs.sort_index(axis=0, inplace=True)
obs.sort_index(axis=1, inplace=True)
exp.sort_index(axis=0, inplace=True)
exp.sort_index(axis=1, inplace=True)
assert_frame_equal(obs, exp)
def test_load_template_to_dataframe_duplicate_cols(self):
with self.assertRaises(qdb.exceptions.QiitaDBDuplicateHeaderError):
qdb.metadata_template.util.load_template_to_dataframe(
StringIO(EXP_SAMPLE_TEMPLATE_DUPE_COLS))
def test_load_template_to_dataframe_scrubbing(self):
obs = qdb.metadata_template.util.load_template_to_dataframe(
StringIO(EXP_SAMPLE_TEMPLATE_SPACES))
exp = pd.DataFrame.from_dict(SAMPLE_TEMPLATE_DICT_FORM)
exp.index.name = 'sample_name'
assert_frame_equal(obs, exp)
def test_load_template_to_dataframe_empty_columns(self):
obs = npt.assert_warns(
qdb.exceptions.QiitaDBWarning,
qdb.metadata_template.util.load_template_to_dataframe,
StringIO(EXP_ST_SPACES_EMPTY_COLUMN))
exp = pd.DataFrame.from_dict(SAMPLE_TEMPLATE_DICT_FORM)
exp.index.name = 'sample_name'
assert_frame_equal(obs, exp)
def test_load_template_to_dataframe_empty_rows(self):
obs = qdb.metadata_template.util.load_template_to_dataframe(
StringIO(EXP_SAMPLE_TEMPLATE_SPACES_EMPTY_ROW))
exp = pd.DataFrame.from_dict(SAMPLE_TEMPLATE_DICT_FORM)
exp.index.name = 'sample_name'
assert_frame_equal(obs, exp)
def test_load_template_to_dataframe_no_sample_name_cast(self):
obs = qdb.metadata_template.util.load_template_to_dataframe(
StringIO(EXP_SAMPLE_TEMPLATE_NUMBER_SAMPLE_NAMES))
exp = pd.DataFrame.from_dict(
SAMPLE_TEMPLATE_NUMBER_SAMPLE_NAMES_DICT_FORM)
exp.index.name = 'sample_name'
obs.sort_index(inplace=True)
exp.sort_index(inplace=True)
assert_frame_equal(obs, exp)
def test_load_template_to_dataframe_empty_sample_names(self):
obs = qdb.metadata_template.util.load_template_to_dataframe(
StringIO(SAMPLE_TEMPLATE_NO_SAMPLE_NAMES))
exp = pd.DataFrame.from_dict(SAMPLE_TEMPLATE_DICT_FORM)
exp.index.name = 'sample_name'
assert_frame_equal(obs, exp)
obs = qdb.metadata_template.util.load_template_to_dataframe(
StringIO(SAMPLE_TEMPLATE_NO_SAMPLE_NAMES_SOME_SPACES))
exp = pd.DataFrame.from_dict(SAMPLE_TEMPLATE_DICT_FORM)
exp.index.name = 'sample_name'
assert_frame_equal(obs, exp)
def test_load_template_to_dataframe_empty_column(self):
obs = npt.assert_warns(
qdb.exceptions.QiitaDBWarning,
qdb.metadata_template.util.load_template_to_dataframe,
StringIO(SAMPLE_TEMPLATE_EMPTY_COLUMN))
exp = pd.DataFrame.from_dict(ST_EMPTY_COLUMN_DICT_FORM)
exp.index.name = 'sample_name'
assert_frame_equal(obs, exp)
def test_load_template_to_dataframe_column_with_nas(self):
obs = qdb.metadata_template.util.load_template_to_dataframe(
StringIO(SAMPLE_TEMPLATE_COLUMN_WITH_NAS))
exp = pd.DataFrame.from_dict(ST_COLUMN_WITH_NAS_DICT_FORM)
exp.index.name = 'sample_name'
assert_frame_equal(obs, exp)
def test_load_template_to_dataframe_exception(self):
with self.assertRaises(qdb.exceptions.QiitaDBColumnError):
qdb.metadata_template.util.load_template_to_dataframe(
StringIO(SAMPLE_TEMPLATE_NO_SAMPLE_NAME))
def test_load_template_to_dataframe_whitespace(self):
obs = qdb.metadata_template.util.load_template_to_dataframe(
StringIO(EXP_SAMPLE_TEMPLATE_WHITESPACE))
exp = pd.DataFrame.from_dict(SAMPLE_TEMPLATE_DICT_FORM)
exp.index.name = 'sample_name'
assert_frame_equal(obs, exp)
def test_load_template_to_dataframe_lowercase(self):
obs = qdb.metadata_template.util.load_template_to_dataframe(
StringIO(EXP_SAMPLE_TEMPLATE_MULTICASE))
exp = pd.DataFrame.from_dict(SAMPLE_TEMPLATE_DICT_FORM)
exp.index.name = 'sample_name'
exp.rename(columns={"str_column": "str_CoLumn"}, inplace=True)
assert_frame_equal(obs, exp)
def test_load_template_to_dataframe_non_utf8(self):
bad = EXP_SAMPLE_TEMPLATE.replace('Test Sample 2', 'Test Sample\x962')
with self.assertRaises(qdb.exceptions.QiitaDBError):
qdb.metadata_template.util.load_template_to_dataframe(
StringIO(bad))
def test_load_template_to_dataframe_typechecking(self):
obs = qdb.metadata_template.util.load_template_to_dataframe(
StringIO(EXP_SAMPLE_TEMPLATE_LAT_ALL_INT))
exp = pd.DataFrame.from_dict(SAMPLE_TEMPLATE_LAT_ALL_INT_DICT)
exp.index.name = 'sample_name'
assert_frame_equal(obs, exp)
obs = qdb.metadata_template.util.load_template_to_dataframe(
StringIO(EXP_SAMPLE_TEMPLATE_LAT_MIXED_FLOAT_INT))
exp = pd.DataFrame.from_dict(SAMPLE_TEMPLATE_MIXED_FLOAT_INT_DICT)
exp.index.name = 'sample_name'
assert_frame_equal(obs, exp)
def test_load_template_to_dataframe_with_nulls(self):
obs = qdb.metadata_template.util.load_template_to_dataframe(
StringIO(EXP_SAMPLE_TEMPLATE_NULLS))
exp = pd.DataFrame.from_dict(SAMPLE_TEMPLATE_NULLS_DICT)
exp.index.name = 'sample_name'
assert_frame_equal(obs, exp)
def test_get_invalid_sample_names(self):
all_valid = ['2.sample.1', 'foo.bar.baz', 'roses', 'are', 'red',
'v10l3t5', '4r3', '81u3']
obs = qdb.metadata_template.util.get_invalid_sample_names(all_valid)
self.assertEqual(obs, [])
all_valid = ['sample.1', 'sample.2', 'SAMPLE.1', 'BOOOM']
obs = qdb.metadata_template.util.get_invalid_sample_names(all_valid)
self.assertEqual(obs, [])
def test_get_invalid_sample_names_str(self):
one_invalid = ['2.sample.1', 'foo.bar.baz', 'roses', 'are', 'red',
'I am the chosen one', 'v10l3t5', '4r3', '81u3']
obs = qdb.metadata_template.util.get_invalid_sample_names(one_invalid)
self.assertItemsEqual(obs, ['I am the chosen one'])
one_invalid = ['2.sample.1', 'foo.bar.baz', 'roses', 'are', 'red',
':L{=<', ':L}=<', '4r3', '81u3']
obs = qdb.metadata_template.util.get_invalid_sample_names(one_invalid)
self.assertItemsEqual(obs, [':L{=<', ':L}=<'])
def test_get_get_invalid_sample_names_mixed(self):
one_invalid = ['.', '1', '2']
obs = qdb.metadata_template.util.get_invalid_sample_names(one_invalid)
self.assertItemsEqual(obs, [])
one_invalid = [' ', ' ', ' ']
obs = qdb.metadata_template.util.get_invalid_sample_names(one_invalid)
self.assertItemsEqual(obs, [' ', ' ', ' '])
def test_invalid_lat_long(self):
with self.assertRaises(qdb.exceptions.QiitaDBColumnError):
obs = qdb.metadata_template.util.load_template_to_dataframe(
StringIO(SAMPLE_TEMPLATE_INVALID_LATITUDE_COLUMNS))
# prevent flake8 from complaining
str(obs)
with self.assertRaises(qdb.exceptions.QiitaDBColumnError):
obs = qdb.metadata_template.util.load_template_to_dataframe(
StringIO(SAMPLE_TEMPLATE_INVALID_LONGITUDE_COLUMNS))
# prevent flake8 from complaining
str(obs)
def test_looks_like_qiime_mapping_file(self):
obs = qdb.metadata_template.util.looks_like_qiime_mapping_file(
StringIO(EXP_SAMPLE_TEMPLATE))
self.assertFalse(obs)
obs = qdb.metadata_template.util.looks_like_qiime_mapping_file(
StringIO(QIIME_TUTORIAL_MAP_SUBSET))
self.assertTrue(obs)
obs = qdb.metadata_template.util.looks_like_qiime_mapping_file(
StringIO())
self.assertFalse(obs)
def test_parse_mapping_file(self):
# Tests ported over from QIIME
s1 = ['#sample\ta\tb', '#comment line to skip',
'x \t y \t z ', ' ', '#more skip', 'i\tj\tk']
exp = ([['x', 'y', 'z'], ['i', 'j', 'k']],
['sample', 'a', 'b'],
['comment line to skip', 'more skip'])
obs = qdb.metadata_template.util._parse_mapping_file(s1)
self.assertEqual(obs, exp)
# check that we strip double quotes by default
s2 = ['#sample\ta\tb', '#comment line to skip',
'"x "\t" y "\t z ', ' ', '"#more skip"', 'i\t"j"\tk']
obs = qdb.metadata_template.util._parse_mapping_file(s2)
self.assertEqual(obs, exp)
QIIME_TUTORIAL_MAP_SUBSET = (
"#SampleID\tBarcodeSequence\tLinkerPrimerSequence\tTreatment\tDOB\t"
"Description\n"
"PC.354\tAGCACGAGCCTA\tYATGCTGCCTCCCGTAGGAGT\tControl\t20061218\t"
"Control_mouse_I.D._354\n"
"PC.607\tAACTGTGCGTAC\tYATGCTGCCTCCCGTAGGAGT\tFast\t20071112\t"
"Fasting_mouse_I.D._607\n"
)
EXP_SAMPLE_TEMPLATE = (
"sample_name\tcollection_timestamp\tdescription\thas_extracted_data\t"
"has_physical_specimen\thost_subject_id\tint_column\tlatitude\tlongitude\t"
"physical_location\trequired_sample_info_status\tsample_type\tstr_column\n"
"2.Sample1\t2014-05-29 12:24:51\tTest Sample 1\tTrue\tTrue\tNotIdentified"
"\t1\t42.42\t41.41\tlocation1\treceived\ttype1\tValue for sample 1\n"
"2.Sample2\t2014-05-29 12:24:51\tTest Sample 2\tTrue\tTrue\tNotIdentified"
"\t2\t4.2\t1.1\tlocation1\treceived\ttype1\tValue for sample 2\n"
"2.Sample3\t2014-05-29 12:24:51\tTest Sample 3\tTrue\tTrue\tNotIdentified"
"\t3\t4.8\t4.41\tlocation1\treceived\ttype1\tValue for sample 3\n")
EXP_SAMPLE_TEMPLATE_MULTICASE = (
"sAmPle_Name\tcollection_timestamp\tDescription\thas_extracted_data\t"
"has_physical_specimen\thost_Subject_id\tint_column\tlatitude\tLongitude\t"
"physical_location\trequired_sample_info_status\tsample_type\tstr_CoLumn\n"
"2.Sample1\t2014-05-29 12:24:51\tTest Sample 1\tTrue\tTrue\tNotIdentified"
"\t1\t42.42\t41.41\tlocation1\treceived\ttype1\tValue for sample 1\n"
"2.Sample2\t2014-05-29 12:24:51\tTest Sample 2\tTrue\tTrue\tNotIdentified"
"\t2\t4.2\t1.1\tlocation1\treceived\ttype1\tValue for sample 2\n"
"2.Sample3\t2014-05-29 12:24:51\tTest Sample 3\tTrue\tTrue\tNotIdentified"
"\t3\t4.8\t4.41\tlocation1\treceived\ttype1\tValue for sample 3\n")
EXP_SAMPLE_TEMPLATE_LAT_ALL_INT = (
"sample_name\tcollection_timestamp\tdescription\thas_extracted_data\t"
"has_physical_specimen\thost_subject_id\tint_column\tlatitude\tlongitude\t"
"physical_location\trequired_sample_info_status\tsample_type\tstr_column\n"
"2.Sample1\t2014-05-29 12:24:51\tTest Sample 1\tTrue\tTrue\tNotIdentified"
"\t1\t42\t41.41\tlocation1\treceived\ttype1\tValue for sample 1\n"
"2.Sample2\t2014-05-29 12:24:51\tTest Sample 2\tTrue\tTrue\tNotIdentified"
"\t2\t4\t1.1\tlocation1\treceived\ttype1\tValue for sample 2\n"
"2.Sample3\t2014-05-29 12:24:51\tTest Sample 3\tTrue\tTrue\tNotIdentified"
"\t3\t4\t4.41\tlocation1\treceived\ttype1\tValue for sample 3\n")
EXP_SAMPLE_TEMPLATE_LAT_MIXED_FLOAT_INT = (
"sample_name\tcollection_timestamp\tdescription\thas_extracted_data\t"
"has_physical_specimen\thost_subject_id\tint_column\tlatitude\tlongitude\t"
"physical_location\trequired_sample_info_status\tsample_type\tstr_column\n"
"2.Sample1\t2014-05-29 12:24:51\tTest Sample 1\tTrue\tTrue\tNotIdentified"
"\t1\t42\t41.41\tlocation1\treceived\ttype1\tValue for sample 1\n"
"2.Sample2\t2014-05-29 12:24:51\tTest Sample 2\tTrue\tTrue\tNotIdentified"
"\t2\t4\t1.1\tlocation1\treceived\ttype1\tValue for sample 2\n"
"2.Sample3\t2014-05-29 12:24:51\tTest Sample 3\tTrue\tTrue\tNotIdentified"
"\t3\t4.8\t4.41\tlocation1\treceived\ttype1\tValue for sample 3\n")
EXP_SAMPLE_TEMPLATE_DUPE_COLS = (
"sample_name\tcollection_timestamp\tdescription\thas_extracted_data\t"
"has_physical_specimen\thost_subject_id\tlatitude\tlongitude\t"
"physical_location\trequired_sample_info_status\tsample_type\t"
"str_column\tstr_column\n"
"2.Sample1\t2014-05-29 12:24:51\tTest Sample 1\tTrue\tTrue\t"
"NotIdentified\t42.42\t41.41\tlocation1\treceived\ttype1\t"
"Value for sample 1\tValue for sample 1\n"
"2.Sample2\t2014-05-29 12:24:51\t"
"Test Sample 2\tTrue\tTrue\tNotIdentified\t4.2\t1.1\tlocation1\treceived\t"
"type1\tValue for sample 2\tValue for sample 2\n"
"2.Sample3\t2014-05-29 12:24:51\tTest Sample 3\tTrue\t"
"True\tNotIdentified\t4.8\t4.41\tlocation1\treceived\ttype1\t"
"Value for sample 3\tValue for sample 3\n")
EXP_SAMPLE_TEMPLATE_SPACES = (
"sample_name\tcollection_timestamp\tdescription\thas_extracted_data\t"
"has_physical_specimen\thost_subject_id\tint_column\tlatitude\tlongitude\t"
"physical_location\trequired_sample_info_status\tsample_type\t"
"str_column\n"
"2.Sample1 \t2014-05-29 12:24:51\tTest Sample 1\tTrue\tTrue\t"
"NotIdentified\t1\t42.42\t41.41\tlocation1\treceived\ttype1\t"
"Value for sample 1\n"
"2.Sample2 \t2014-05-29 12:24:51\t"
"Test Sample 2\tTrue\tTrue\tNotIdentified\t2\t4.2\t1.1\tlocation1\t"
"received\ttype1\tValue for sample 2\n"
"2.Sample3\t2014-05-29 12:24:51\tTest Sample 3\tTrue\t"
"True\tNotIdentified\t3\t4.8\t4.41\tlocation1\treceived\ttype1\t"
"Value for sample 3\n")
EXP_SAMPLE_TEMPLATE_WHITESPACE = (
"sample_name \tcollection_timestamp\t description \thas_extracted_data\t"
"has_physical_specimen\thost_subject_id\tint_column\tlatitude\tlongitude\t"
"physical_location\trequired_sample_info_status\tsample_type\t"
"str_column\n"
"2.Sample1\t2014-05-29 12:24:51\tTest Sample 1\tTrue\tTrue\t"
"NotIdentified\t1\t42.42\t41.41\tlocation1\treceived\ttype1\t"
"Value for sample 1\n"
"2.Sample2\t 2014-05-29 12:24:51 \t"
"Test Sample 2\tTrue\tTrue\tNotIdentified\t2\t4.2\t1.1\tlocation1\t"
"received\ttype1\t Value for sample 2\n"
"2.Sample3\t2014-05-29 12:24:51\t Test Sample 3 \tTrue\t"
"True\tNotIdentified\t3\t4.8\t4.41\tlocation1\treceived\ttype1\t"
"Value for sample 3\n")
EXP_SAMPLE_TEMPLATE_SPACES_EMPTY_ROW = (
"sample_name\tcollection_timestamp\tdescription\thas_extracted_data\t"
"has_physical_specimen\thost_subject_id\tint_column\tlatitude\tlongitude\t"
"physical_location\trequired_sample_info_status\tsample_type\t"
"str_column\n"
"2.Sample1 \t2014-05-29 12:24:51\tTest Sample 1\tTrue\tTrue\t"
"NotIdentified\t1\t42.42\t41.41\tlocation1\treceived\ttype1\t"
"Value for sample 1\n"
"2.Sample2 \t2014-05-29 12:24:51\t"
"Test Sample 2\tTrue\tTrue\tNotIdentified\t2\t4.2\t1.1\tlocation1\t"
"received\ttype1\tValue for sample 2\n"
"2.Sample3\t2014-05-29 12:24:51\tTest Sample 3\tTrue\t"
"True\tNotIdentified\t3\t4.8\t4.41\tlocation1\treceived\ttype1\t"
"Value for sample 3\n"
"\t\t\t\t\t\t\t\t\t\t\t\t\n"
"\t\t\t\t\t\t\t\t\t\t\t\t\n")
EXP_ST_SPACES_EMPTY_COLUMN = (
"sample_name\tcollection_timestamp\tdescription\thas_extracted_data\t"
"has_physical_specimen\thost_subject_id\tint_column\tlatitude\tlongitude\t"
"physical_location\trequired_sample_info_status\tsample_type\t"
"str_column\t\n"
"2.Sample1 \t2014-05-29 12:24:51\tTest Sample 1\tTrue\tTrue\t"
"NotIdentified\t1\t42.42\t41.41\tlocation1\treceived\ttype1\t"
"Value for sample 1\t\n"
"2.Sample2 \t2014-05-29 12:24:51\t"
"Test Sample 2\tTrue\tTrue\tNotIdentified\t2\t4.2\t1.1\tlocation1\t"
"received\ttype1\tValue for sample 2\t\n"
"2.Sample3\t2014-05-29 12:24:51\tTest Sample 3\tTrue\t"
"True\tNotIdentified\t3\t4.8\t4.41\tlocation1\treceived\ttype1\t"
"Value for sample 3\t\n")
EXP_SAMPLE_TEMPLATE_NUMBER_SAMPLE_NAMES = (
"sample_name\tcollection_timestamp\tdescription\thas_extracted_data\t"
"has_physical_specimen\thost_subject_id\tlatitude\tlongitude\t"
"physical_location\trequired_sample_info_status\tsample_type\t"
"str_column\n"
"002.000\t2014-05-29 12:24:51\tTest Sample 1\tTrue\tTrue\t"
"NotIdentified\t42.42\t41.41\tlocation1\treceived\ttype1\t"
"Value for sample 1\n"
"1.11111\t2014-05-29 12:24:51\t"
"Test Sample 2\tTrue\tTrue\tNotIdentified\t4.2\t1.1\tlocation1\treceived\t"
"type1\tValue for sample 2\n"
"0.12121\t2014-05-29 12:24:51\tTest Sample 3\tTrue\t"
"True\tNotIdentified\t4.8\t4.41\tlocation1\treceived\ttype1\t"
"Value for sample 3\n")
SAMPLE_TEMPLATE_NO_SAMPLE_NAMES = (
"sample_name\tcollection_timestamp\tdescription\thas_extracted_data\t"
"has_physical_specimen\thost_subject_id\tint_column\tlatitude\tlongitude\t"
"physical_location\trequired_sample_info_status\tsample_type\t"
"str_column\n"
"2.Sample1\t2014-05-29 12:24:51\tTest Sample 1\tTrue\tTrue\t"
"NotIdentified\t1\t42.42\t41.41\tlocation1\treceived\ttype1\t"
"Value for sample 1\n"
"2.Sample2\t2014-05-29 12:24:51\t"
"Test Sample 2\tTrue\tTrue\tNotIdentified\t2\t4.2\t1.1\tlocation1\t"
"received\ttype1\tValue for sample 2\n"
"2.Sample3\t2014-05-29 12:24:51\tTest Sample 3\tTrue\t"
"True\tNotIdentified\t3\t4.8\t4.41\tlocation1\treceived\ttype1\t"
"Value for sample 3\n"
"\t2014-05-29 12:24:51\tTest Sample 3\tTrue\t"
"True\tNotIdentified\t4.8\t4.41\tlocation1\treceived\ttype1\t"
"Value for sample 3\n"
"\t\t\t\t\t\t\t\t\t\t\t\n"
)
SAMPLE_TEMPLATE_NO_SAMPLE_NAMES_SOME_SPACES = (
"sample_name\tcollection_timestamp\tdescription\thas_extracted_data\t"
"has_physical_specimen\thost_subject_id\tint_column\tlatitude\tlongitude\t"
"physical_location\trequired_sample_info_status\tsample_type\t"
"str_column\n"
"2.Sample1\t2014-05-29 12:24:51\tTest Sample 1\tTrue\tTrue\t"
"NotIdentified\t1\t42.42\t41.41\tlocation1\treceived\ttype1\t"
"Value for sample 1\n"
"2.Sample2\t2014-05-29 12:24:51\t"
"Test Sample 2\tTrue\tTrue\tNotIdentified\t2\t4.2\t1.1\tlocation1\t"
"received\ttype1\tValue for sample 2\n"
"2.Sample3\t2014-05-29 12:24:51\tTest Sample 3\tTrue\t"
"True\tNotIdentified\t3\t4.8\t4.41\tlocation1\treceived\ttype1\t"
"Value for sample 3\n"
"\t\t\t\t\t \t\t\t\t\t \t\t\n"
)
SAMPLE_TEMPLATE_EMPTY_COLUMN = (
"sample_name\tcollection_timestamp\tdescription\thas_extracted_data\t"
"has_physical_specimen\thost_subject_id\tlatitude\tlongitude\t"
"physical_location\trequired_sample_info_status\tsample_type\t"
"str_column\n"
"2.Sample1\t2014-05-29 12:24:51\tTest Sample 1\tTrue\tTrue\t"
"NotIdentified\t42.42\t41.41\tlocation1\treceived\ttype1\t"
"\n"
"2.Sample2\t2014-05-29 12:24:51\t"
"Test Sample 2\tTrue\tTrue\tNotIdentified\t4.2\t1.1\tlocation1\treceived\t"
"type1\t\n"
"2.Sample3\t2014-05-29 12:24:51\tTest Sample 3\tTrue\t"
"True\tNotIdentified\t4.8\t4.41\tlocation1\treceived\ttype1\t"
"\n")
SAMPLE_TEMPLATE_COLUMN_WITH_NAS = (
"sample_name\tcollection_timestamp\tdescription\thas_extracted_data\t"
"has_physical_specimen\thost_subject_id\tlatitude\tlongitude\t"
"physical_location\trequired_sample_info_status\tsample_type\t"
"str_column\n"
"2.Sample1\t2014-05-29 12:24:51\tTest Sample 1\tTrue\tTrue\t"
"NotIdentified\t42.42\t41.41\tlocation1\treceived\ttype1\t"
"NA\n"
"2.Sample2\t2014-05-29 12:24:51\t"
"Test Sample 2\tTrue\tTrue\tNotIdentified\t4.2\t1.1\tlocation1\treceived\t"
"type1\tNA\n"
"2.Sample3\t2014-05-29 12:24:51\tTest Sample 3\tTrue\t"
"True\tNotIdentified\t4.8\t4.41\tlocation1\treceived\ttype1\t"
"NA\n")
SAMPLE_TEMPLATE_NO_SAMPLE_NAME = (
":L}={\tcollection_timestamp\tdescription\thas_extracted_data\t"
"has_physical_specimen\thost_subject_id\tlatitude\tlongitude\t"
"physical_location\trequired_sample_info_status\tsample_type\t"
"str_column\n"
"2.Sample1\t2014-05-29 12:24:51\tTest Sample 1\tTrue\tTrue\t"
"NotIdentified\t42.42\t41.41\tlocation1\treceived\ttype1\t"
"NA\n"
"2.Sample2\t2014-05-29 12:24:51\t"
"Test Sample 2\tTrue\tTrue\tNotIdentified\t4.2\t1.1\tlocation1\treceived\t"
"type1\tNA\n"
"2.Sample3\t2014-05-29 12:24:51\tTest Sample 3\tTrue\t"
"True\tNotIdentified\t4.8\t4.41\tlocation1\treceived\ttype1\t"
"NA\n")
SAMPLE_TEMPLATE_INVALID_LATITUDE_COLUMNS = (
"sample_name\tcollection_timestamp\tdescription\thas_extracted_data\t"
"has_physical_specimen\thost_subject_id\tlatitude\tlongitude\t"
"physical_location\trequired_sample_info_status\tsample_type\t"
"str_column\n"
"2.Sample1\t2014-05-29 12:24:51\tTest Sample 1\tTrue\tTrue\t"
"1\t42\t41.41\tlocation1\treceived\ttype1\t"
"Value for sample 1\n"
"2.Sample2\t2014-05-29 12:24:51\t"
"Test Sample 2\tTrue\tTrue\1\t4.2\t1.1\tlocation1\treceived\t"
"type1\tValue for sample 2\n"
"2.Sample3\t2014-05-29 12:24:51\tTest Sample 3\tTrue\t"
"True\1\tXXXXX4.8\t4.41\tlocation1\treceived\ttype1\t"
"Value for sample 3\n")
SAMPLE_TEMPLATE_INVALID_LONGITUDE_COLUMNS = (
"sample_name\tcollection_timestamp\tdescription\thas_extracted_data\t"
"has_physical_specimen\thost_subject_id\tlatitude\tlongitude\t"
"physical_location\trequired_sample_info_status\tsample_type\t"
"str_column\n"
"2.Sample1\t2014-05-29 12:24:51\tTest Sample 1\tTrue\tTrue\t"
"1\t11.42\t41.41\tlocation1\treceived\ttype1\t"
"Value for sample 1\n"
"2.Sample2\t2014-05-29 12:24:51\t"
"Test Sample 2\tTrue\tTrue\1\t4.2\tXXX\tlocation1\treceived\t"
"type1\tValue for sample 2\n"
"2.Sample3\t2014-05-29 12:24:51\tTest Sample 3\tTrue\t"
"True\1\t4.8\t4.XXXXX41\tlocation1\treceived\ttype1\t"
"Value for sample 3\n")
EXP_SAMPLE_TEMPLATE_NULLS = (
"sample_name\tmy_bool_col\tmy_bool_col_w_nulls\n"
"sample.1\tTrue\tFalse\n"
"sample.2\tFalse\tUnknown\n"
"sample.3\tTrue\tTrue\n"
"sample.4\tFalse\t\n"
"sample.5\tTrue\tTrue\n"
"sample.6\tFalse\tTrue\n")
SAMPLE_TEMPLATE_NULLS_DICT = {
'my_bool_col': {"sample.1": True,
"sample.2": False,
"sample.3": True,
"sample.4": False,
"sample.5": True,
"sample.6": False},
'my_bool_col_w_nulls': {"sample.1": False,
"sample.2": None,
"sample.3": True,
"sample.4": None,
"sample.5": True,
"sample.6": True}
}
SAMPLE_TEMPLATE_DICT_FORM = {
'collection_timestamp': {'2.Sample1': '2014-05-29 12:24:51',
'2.Sample2': '2014-05-29 12:24:51',
'2.Sample3': '2014-05-29 12:24:51'},
'description': {'2.Sample1': 'Test Sample 1',
'2.Sample2': 'Test Sample 2',
'2.Sample3': 'Test Sample 3'},
'has_extracted_data': {'2.Sample1': True,
'2.Sample2': True,
'2.Sample3': True},
'has_physical_specimen': {'2.Sample1': True,
'2.Sample2': True,
'2.Sample3': True},
'host_subject_id': {'2.Sample1': 'NotIdentified',
'2.Sample2': 'NotIdentified',
'2.Sample3': 'NotIdentified'},
'latitude': {'2.Sample1': 42.420000000000002,
'2.Sample2': 4.2000000000000002,
'2.Sample3': 4.7999999999999998},
'longitude': {'2.Sample1': 41.409999999999997,
'2.Sample2': 1.1000000000000001,
'2.Sample3': 4.4100000000000001},
'physical_location': {'2.Sample1': 'location1',
'2.Sample2': 'location1',
'2.Sample3': 'location1'},
'required_sample_info_status': {'2.Sample1': 'received',
'2.Sample2': 'received',
'2.Sample3': 'received'},
'sample_type': {'2.Sample1': 'type1',
'2.Sample2': 'type1',
'2.Sample3': 'type1'},
'str_column': {'2.Sample1': 'Value for sample 1',
'2.Sample2': 'Value for sample 2',
'2.Sample3': 'Value for sample 3'},
'int_column': {'2.Sample1': 1,
'2.Sample2': 2,
'2.Sample3': 3}
}
SAMPLE_TEMPLATE_LAT_ALL_INT_DICT = {
'collection_timestamp': {'2.Sample1': '2014-05-29 12:24:51',
'2.Sample2': '2014-05-29 12:24:51',
'2.Sample3': '2014-05-29 12:24:51'},
'description': {'2.Sample1': 'Test Sample 1',
'2.Sample2': 'Test Sample 2',
'2.Sample3': 'Test Sample 3'},
'has_extracted_data': {'2.Sample1': True,
'2.Sample2': True,
'2.Sample3': True},
'has_physical_specimen': {'2.Sample1': True,
'2.Sample2': True,
'2.Sample3': True},
'host_subject_id': {'2.Sample1': 'NotIdentified',
'2.Sample2': 'NotIdentified',
'2.Sample3': 'NotIdentified'},
'latitude': {'2.Sample1': 42,
'2.Sample2': 4,
'2.Sample3': 4},
'longitude': {'2.Sample1': 41.409999999999997,
'2.Sample2': 1.1000000000000001,
'2.Sample3': 4.4100000000000001},
'physical_location': {'2.Sample1': 'location1',
'2.Sample2': 'location1',
'2.Sample3': 'location1'},
'required_sample_info_status': {'2.Sample1': 'received',
'2.Sample2': 'received',
'2.Sample3': 'received'},
'sample_type': {'2.Sample1': 'type1',
'2.Sample2': 'type1',
'2.Sample3': 'type1'},
'str_column': {'2.Sample1': 'Value for sample 1',
'2.Sample2': 'Value for sample 2',
'2.Sample3': 'Value for sample 3'},
'int_column': {'2.Sample1': 1,
'2.Sample2': 2,
'2.Sample3': 3}
}
SAMPLE_TEMPLATE_MIXED_FLOAT_INT_DICT = {
'collection_timestamp': {'2.Sample1': '2014-05-29 12:24:51',
'2.Sample2': '2014-05-29 12:24:51',
'2.Sample3': '2014-05-29 12:24:51'},
'description': {'2.Sample1': 'Test Sample 1',
'2.Sample2': 'Test Sample 2',
'2.Sample3': 'Test Sample 3'},
'has_extracted_data': {'2.Sample1': True,
'2.Sample2': True,
'2.Sample3': True},
'has_physical_specimen': {'2.Sample1': True,
'2.Sample2': True,
'2.Sample3': True},
'host_subject_id': {'2.Sample1': 'NotIdentified',
'2.Sample2': 'NotIdentified',
'2.Sample3': 'NotIdentified'},
'latitude': {'2.Sample1': 42.0,
'2.Sample2': 4.0,
'2.Sample3': 4.8},
'longitude': {'2.Sample1': 41.409999999999997,
'2.Sample2': 1.1000000000000001,
'2.Sample3': 4.4100000000000001},
'physical_location': {'2.Sample1': 'location1',
'2.Sample2': 'location1',
'2.Sample3': 'location1'},
'required_sample_info_status': {'2.Sample1': 'received',
'2.Sample2': 'received',
'2.Sample3': 'received'},
'sample_type': {'2.Sample1': 'type1',
'2.Sample2': 'type1',
'2.Sample3': 'type1'},
'str_column': {'2.Sample1': 'Value for sample 1',
'2.Sample2': 'Value for sample 2',
'2.Sample3': 'Value for sample 3'},
'int_column': {'2.Sample1': 1,
'2.Sample2': 2,
'2.Sample3': 3}
}
SAMPLE_TEMPLATE_NUMBER_SAMPLE_NAMES_DICT_FORM = {
'collection_timestamp': {'002.000': '2014-05-29 12:24:51',
'1.11111': '2014-05-29 12:24:51',
'0.12121': '2014-05-29 12:24:51'},
'description': {'002.000': 'Test Sample 1',
'1.11111': 'Test Sample 2',
'0.12121': 'Test Sample 3'},
'has_extracted_data': {'002.000': True,
'1.11111': True,
'0.12121': True},
'has_physical_specimen': {'002.000': True,
'1.11111': True,
'0.12121': True},
'host_subject_id': {'002.000': 'NotIdentified',
'1.11111': 'NotIdentified',
'0.12121': 'NotIdentified'},
'latitude': {'002.000': 42.420000000000002,
'1.11111': 4.2000000000000002,
'0.12121': 4.7999999999999998},
'longitude': {'002.000': 41.409999999999997,
'1.11111': 1.1000000000000001,
'0.12121': 4.4100000000000001},
'physical_location': {'002.000': 'location1',
'1.11111': 'location1',
'0.12121': 'location1'},
'required_sample_info_status': {'002.000': 'received',
'1.11111': 'received',
'0.12121': 'received'},
'sample_type': {'002.000': 'type1',
'1.11111': 'type1',
'0.12121': 'type1'},
'str_column': {'002.000': 'Value for sample 1',
'1.11111': 'Value for sample 2',
'0.12121': 'Value for sample 3'}}
ST_EMPTY_COLUMN_DICT_FORM = \
{'collection_timestamp': {'2.Sample1': '2014-05-29 12:24:51',
'2.Sample2': '2014-05-29 12:24:51',
'2.Sample3': '2014-05-29 12:24:51'},
'description': {'2.Sample1': 'Test Sample 1',
'2.Sample2': 'Test Sample 2',
'2.Sample3': 'Test Sample 3'},
'has_extracted_data': {'2.Sample1': True,
'2.Sample2': True,
'2.Sample3': True},
'has_physical_specimen': {'2.Sample1': True,
'2.Sample2': True,
'2.Sample3': True},
'host_subject_id': {'2.Sample1': 'NotIdentified',
'2.Sample2': 'NotIdentified',
'2.Sample3': 'NotIdentified'},
'latitude': {'2.Sample1': 42.420000000000002,
'2.Sample2': 4.2000000000000002,
'2.Sample3': 4.7999999999999998},
'longitude': {'2.Sample1': 41.409999999999997,
'2.Sample2': 1.1000000000000001,
'2.Sample3': 4.4100000000000001},
'physical_location': {'2.Sample1': 'location1',
'2.Sample2': 'location1',
'2.Sample3': 'location1'},
'required_sample_info_status': {'2.Sample1': 'received',
'2.Sample2': 'received',
'2.Sample3': 'received'},
'sample_type': {'2.Sample1': 'type1',
'2.Sample2': 'type1',
'2.Sample3': 'type1'}}
ST_COLUMN_WITH_NAS_DICT_FORM = \
{'collection_timestamp': {'2.Sample1': '2014-05-29 12:24:51',
'2.Sample2': '2014-05-29 12:24:51',
'2.Sample3': '2014-05-29 12:24:51'},
'description': {'2.Sample1': 'Test Sample 1',
'2.Sample2': 'Test Sample 2',
'2.Sample3': 'Test Sample 3'},
'has_extracted_data': {'2.Sample1': True,
'2.Sample2': True,
'2.Sample3': True},
'has_physical_specimen': {'2.Sample1': True,
'2.Sample2': True,
'2.Sample3': True},
'host_subject_id': {'2.Sample1': 'NotIdentified',
'2.Sample2': 'NotIdentified',
'2.Sample3': 'NotIdentified'},
'latitude': {'2.Sample1': 42.420000000000002,
'2.Sample2': 4.2000000000000002,
'2.Sample3': 4.7999999999999998},
'longitude': {'2.Sample1': 41.409999999999997,
'2.Sample2': 1.1000000000000001,
'2.Sample3': 4.4100000000000001},
'physical_location': {'2.Sample1': 'location1',
'2.Sample2': 'location1',
'2.Sample3': 'location1'},
'required_sample_info_status': {'2.Sample1': 'received',
'2.Sample2': 'received',
'2.Sample3': 'received'},
'sample_type': {'2.Sample1': 'type1',
'2.Sample2': 'type1',
'2.Sample3': 'type1'},
'str_column': {'2.Sample1': 'NA', '2.Sample2': 'NA', '2.Sample3': 'NA'}}
QIIME_TUTORIAL_MAP_DICT_FORM = {
'BarcodeSequence': {'PC.354': 'AGCACGAGCCTA',
'PC.607': 'AACTGTGCGTAC'},
'LinkerPrimerSequence': {'PC.354': 'YATGCTGCCTCCCGTAGGAGT',
'PC.607': 'YATGCTGCCTCCCGTAGGAGT'},
'Treatment': {'PC.354': 'Control',
'PC.607': 'Fast'},
'DOB': {'PC.354': 20061218,
'PC.607': 20071112},
'Description': {'PC.354': 'Control_mouse_I.D._354',
'PC.607': 'Fasting_mouse_I.D._607'}
}
EXP_PREP_TEMPLATE = (
'sample_name\tbarcodesequence\tcenter_name\tcenter_project_name\t'
'ebi_submission_accession\temp_status\texperiment_design_description\t'
'library_construction_protocol\tlinkerprimersequence\tplatform\t'
'run_prefix\tstr_column\n'
'1.SKB7.640196\tCCTCTGAGAGCT\tANL\tTest Project\tNone\tEMP\tBBBB\tAAAA\t'
'GTGCCAGCMGCCGCGGTAA\tILLUMINA\ts_G1_L002_sequences\tValue for sample 3\n'
'1.SKB8.640193\tGTCCGCAAGTTA\tANL\tTest Project\tNone\tEMP\tBBBB\tAAAA\t'
'GTGCCAGCMGCCGCGGTAA\tILLUMINA\ts_G1_L001_sequences\tValue for sample 1\n'
'1.SKD8.640184\tCGTAGAGCTCTC\tANL\tTest Project\tNone\tEMP\tBBBB\tAAAA\t'
'GTGCCAGCMGCCGCGGTAA\tILLUMINA\ts_G1_L001_sequences\tValue for sample 2\n')
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from PIL import Image
import numpy as np
import os
import sys
import cv2
import time
import torch
import torchvision.transforms as transforms
import torchvision
from modeling.deeplab import DeepLab
sys.path.append('/opt/ros/melodic/lib/python2.7/dist-packages')
import roslib
import rospy
from cv_bridge import CvBridge, CvBridgeError
from cv_bridge.boost.cv_bridge_boost import getCvType
from sensor_msgs.msg import Image as ImageMsg
sys.path.remove('/opt/ros/melodic/lib/python2.7/dist-packages')
class DeeplabRos:
def __init__(self):
#GPU assignment
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
#Load checkpoint
self.checkpoint = torch.load(os.path.join("./src/deeplab_ros/data/model_best.pth.tar"))
#Load Model
self.model = DeepLab(num_classes=4,
backbone='mobilenet',
output_stride=16,
sync_bn=True,
freeze_bn=False)
self.model.load_state_dict(self.checkpoint['state_dict'])
self.model = self.model.to(self.device)
#ROS init
self.bridge = CvBridge()
self.image_sub = rospy.Subscriber("/cam2/pylon_camera_node/image_raw", ImageMsg, self.callback, queue_size=1, buff_size = 2**24)
self.image_pub = rospy.Publisher("segmentation_image", ImageMsg, queue_size=1)
def callback(self, data):
cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
start_time = time.time()
self.model.eval()
torch.set_grad_enabled(False)
tfms = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
inputs = tfms(cv_image).to(self.device)
output = self.model(inputs.unsqueeze(0)).squeeze().cpu().numpy()
pred = np.argmax(output, axis=0)
pred_img = self.label_to_color_image(pred)
msg = self.bridge.cv2_to_imgmsg(pred_img, "bgr8")
inference_time = time.time() - start_time
print("inference time: ", inference_time)
self.image_pub.publish(msg)
def label_to_color_image(self, pred, class_num=4):
label_colors = np.array([(0, 0, 0), (0, 0, 128), (0, 128, 0), (128, 0, 0)]) #bgr
# Unlabeled, Building, Lane-marking, Fence
r = np.zeros_like(pred).astype(np.uint8)
g = np.zeros_like(pred).astype(np.uint8)
b = np.zeros_like(pred).astype(np.uint8)
for i in range(0, class_num):
idx = pred == i
r[idx] = label_colors[i, 0]
g[idx] = label_colors[i, 1]
b[idx] = label_colors[i, 2]
rgb = np.stack([r, g, b], axis=2)
return rgb
def main():
rospy.init_node('inference', anonymous=True)
pred = DeeplabRos()
rate = rospy.Rate(8)
while not rospy.is_shutdown():
rospy.spin()
rate.sleep()
if __name__ == '__main__':
main()
|
# coding: utf-8
from NaoCreator.setting import Setting
Setting(nao_connected=True, debug=True, ip="192.168.0.1")
from NaoCreator.Tool.stop import normal_stop
from NaoQuest.wait_for import wait_for
from PlayerManager.player_manager import Player
Setting.naoFaceDetectionRecognition.enableRecognition(True)
Setting.naoFaceDetectionRecognition.subscribe(Setting.MEMORY_FACE, Setting.FACE_DETECTION_RECOGNITION_PERIOD, 0.0)
Setting.naoLed.on("AllLedsGreen")
Setting.naoMotion.setStiffnesses("Head", 1.0)
Setting.naoFaceTracker.startTracker()
p = Player("tristan")
wait_for(p)
normal_stop()
|
"""
Django settings for vestblog project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import sys
import socket
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# sys.path.append(os.path.join('..', 'vest', BASE_DIR))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '*t!#gk4uqkf!8-&5ceo%l-o5g)v-1j%kl61fpve+o)lp&v3mw+'
# SECURITY WARNING: don't run with debug turned on in production!
ALLOWED_HOSTS = ['vestlite.ru', 'www.vestlite.ru']
# Application definition
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
# vendor
'grappelli.dashboard',
'grappelli',
'django.contrib.admin',
'filebrowser',
'django_jinja',
'django_ace',
# project
'common',
'frontend',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'vestblog.middleware.MiddlewareLocale',
'vestblog.middleware.MiddlewareSessionExist',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
# 'vestblog.middleware.MiddlewareSimplePage',
# 'vestblog.middleware.MiddlewareCategory',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
'vestblog.context_processors.site_info'
)
ROOT_URLCONF = 'vestblog.urls'
WSGI_APPLICATION = 'vestblog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
# }
#
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = u'ru'
LANGUAGES = (
(u'ru', u'Russian'),
(u'en', u'English'),
)
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static_remote')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'django.contrib.staticfiles.finders.FileSystemFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
TEMPLATE_LOADERS = (
'common.loaders.load_template_source',
'django_jinja.loaders.FileSystemLoader',
'django_jinja.loaders.AppLoader',
# 'django.template.loaders.filesystem.Loader',
# 'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
DEFAULT_JINJA2_TEMPLATE_EXTENSION = '.j2'
DEFAULT_TEMPLATE_EXT = '.j2'
GRAPPELLI_INDEX_DASHBOARD = {
'vestblog.admin_super.admin_super': 'vestblog.dashboard_super.CustomIndexDashboard',
# 'vestblog.admin_client.admin_client': 'vestblog.dashboard_client.CustomIndexDashboard',
}
HOSTS = ['ld1', 'ld2', 'ld-mac.loc', '192']
LOCAL = False
JINJA2_ENVIRONMENT_OPTIONS = {
'trim_blocks': True,
'autoescape': False,
'cache_size': 0 # TODO: set cache only for static
}
SITE_INFO = {
'SITESUBTITLE': 'My blog'
}
SITE_ID = 1
if not socket.gethostname() in HOSTS:
from settings_remote import *
else:
from settings_local import *
|
"""
4. Median of Two Sorted Arrays
There are two sorted arrays nums1 and nums2 of size m and n respectively.
Find the median of the two sorted arrays. The overall run time complexity should be O(log (m+n)).
You may assume nums1 and nums2 cannot be both empty.
Example 1:
nums1 = [1, 3]
nums2 = [2]
The median is 2.0
Example 2:
nums1 = [1, 2]
nums2 = [3, 4]
The median is (2 + 3)/2 = 2.5
"""
"""
Explanation
This is a classical coding interview question. Hard and worth thinking.
We can convert the problem to the problem of finding kth element after merging Array A and B, where k is (Array A’s length + Array B’ Length) / 2.
If any of the two arrays is empty, then the kth element is the non-empty array’s kth element.
If k == 1, the kth element is the first element of A or B.
For all other cases, we compare the (k / 2) th number in A and the (k / 2) th number in B.
If the array has no more than k /2 elements, we set key = MAX_VALUE.
If keyA < keyB, we get rid of first k /2 elements.
We keep searching in the remainder for the (k – k /2) th element.
"""
# http://www.goodtecher.com/leetcode-4-median-two-sorted-arrays/
class Solution:
def findMedianSortedArrays(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: float
"""
total = len(nums1) + len(nums2)
if total % 2 == 1:
return self.findkth(nums1, 0, nums2, 0, total/2 + 1)
else:
return (self.findkth(nums1, 0, num2, 0, total / 2) + findkth(nums1, 0, nums2, 0, total /2 + 1))/2.0
def findkth(self, nums1, start1, nums2, start2, k):
if (start1 >= len(nums1)):
return nums2[start2 + k - 1]
if (start2 >= len(nums2)):
return nums1[start1 + k - 1]
if k == 1:
return min(nums1[start1], nums2[start2])
index1 = start1 + k / 2 - 1
index2 = start2 + k / 2 - 1
key1 = nums1[int(index1)] if index1 < len(nums1) else sys.maxsize
key2 = nums2[int(index2)] if index2 < len(nums2) else sys.maxsize
if (key1 < key2):
return self.findkth(nums1, start1 + k/2, nums2, start2, k - k /2)
else:
return self.findkth(nums1, start1, nums2, start2 + k/2, k - k /2)
|
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import folium
#Import CSV file into DataFrame
world_rankings = pd.read_csv('World_University_Ranks_2020.csv')
print(world_rankings.head())
#change column data type
world_rankings['Percentage_Female'] = pd.to_numeric(world_rankings['Percentage_Female'] , errors='coerce')
# Checking of missing data
world_rankings.isnull().sum()
#Dropping duplicates
world_rankings.drop_duplicates(subset=['University'])
world_rankings.sort_values('Rank_Char', ascending=False)
top_20 = world_rankings.iloc[0:21]
print(top_20.head())
#creating dictionary
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
df = {'University_Name': ['Trinity College Dublin', 'Royal College of Surgeons in Ireland (RCSI)','University College Dublin',
'National University of Ireland, Galway', 'Maynooth University', 'University College Cork', 'University of Limerick',
'Dublin City University', 'Technological University Dublin'], 'Global_Ranking': [114, 142,158,188,199,208,288,302,401]}
irish_unis = pd.DataFrame(df)
print(irish_unis)
sns.set_theme(style="darkgrid")
world_rankings = pd.read_csv('World_University_Ranks_2020.csv')
irish_unis = pd.DataFrame(df)
fig, ax = plt.subplots()
sns.scatterplot(data=irish_unis, x="Global_Ranking", y="University_Name", marker='o')
ax.set_title('How Irish universities feature in global rankings')
ax.set(xlabel="Global ranking score", ylabel='Irish University Name')
ax.set_yticklabels = 'Amount of International Students'
plt.show()
#functions
def ireland_universities(str):
print(str)
return;
#finding where Ireland uni's feature
ireland_features = world_rankings[world_rankings["Country"].str.contains("Ireland")]
print(ireland_features)
ireland_universities('Ireland has multiple universities on a global ranking scale')
def top_ten_global_unis(str):
print(str)
return;
#creating a list
irish_unis_list = ['Trinity College Dublin', 'Royal College of Surgeons in Ireland (RCSI)','University College Dublin',
'National University of Ireland, Galway', 'Maynooth University', 'University College Cork', 'University of Limerick',
'Dublin City University', 'Technological University Dublin']
print(irish_unis_list[0])
print(irish_unis_list[2])
print(len(irish_unis_list))
#for loop to use in project
count = 0
for ireland in world_rankings['Country']:
if (ireland == 'Ireland'):
count += 1
print(count, 'instances of an Irish university found in top 200 rankings')
#finding where Ireland uni's feature
ireland_features = world_rankings[world_rankings["Country"].str.contains("Ireland")]
for i in ireland_features:
print(i)
#iterrows
import pandas as pd
world_rankings = pd.read_csv('World_University_Ranks_2020.csv')
for index, row in world_rankings.head(n=2).iterrows():
print(index, row)
#numpy
import numpy as np
import pandas as pd
world_rankings = pd.read_csv('World_University_Ranks_2020.csv')
arr = np.array([world_rankings['Country']])
print(arr.dtype)
print(arr.shape)
#first visualisation
import seaborn as sns
world_rankings = pd.read_csv('World_University_Ranks_2020.csv')
df = pd.DataFrame(world_rankings.loc[0:19])
fig, ax = plt.subplots()
sns.countplot(x='Country', data=df).set_title('Top 20 Global University Overview')
ax.set(xlabel="Top 20 Countries", ylabel='Number of universities per country')
plt.show()
fig.savefig('Insight1.jpg')
#insight - America has the highest amount of top ranking universities in the Top 20 global rankings
#second visualisation
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
sns.set_theme(style="whitegrid")
top_10 = world_rankings.loc[0:9]
fig, ax = plt.subplots()
g = sns.barplot(y='University', x='International_Students_Percent', data=top_10, orient='h')
ax.set(xlabel="% of international students", ylabel = "Top 10 Global Universities")
g.set_title('Top 10 International Student Overview')
plt.show()
fig.savefig('Insight2.jpg')
#insight - analysis is that Imperial college london has highest amount of international students in top 10 unis
def top_ten_global_unis(str):
print(str)
return;
top_ten_global_unis('Imperial college london has highest amount of international students in top 10 universities')
#third visualisation
sns.set_theme(style="darkgrid")
top_20 = world_rankings.loc[0:19]
top_20_sorted = top_20.sort_values('Students_Percentage_Female',ascending=False)
g = sns.catplot(data=top_20_sorted,kind="bar",x="Students_Percentage_Female",
y="Number_students",hue="Country",ci="sd", palette="dark",
alpha=.6,height=6)
g.despine(left=True)
g.set_axis_labels("% Female Students","Total Number Students (00s)")
g.fig.subplots_adjust(top=0.9)
g.fig.suptitle("Top 20 Universities Female Makeup")
plt.savefig('Insight3.jpg')
plt.show()
#insight - analysis is that Canada has highest percentage of female students in a university top 20 unis globally
#fourth visualisation
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
sns.set_theme(style="darkgrid")
world_rankings = pd.read_csv('World_University_Ranks_2020.csv')
ireland_features = world_rankings[world_rankings["Country"].str.contains("Ireland")]
fig, ax = plt.subplots()
sns.scatterplot(data=ireland_features, x="Score_Rank", y="University", marker='o')
ax.set_title('How Irish universities feature in global rankings')
ax.set(xlabel="Global ranking score", ylabel='Irish University Name')
ax.set_yticklabels = 'Amount of International Students'
fig.savefig('Insight4.jpg')
plt.show()
ireland_universities('Ireland has 5 universities in top 200 global rankings')
#insight - Ireland has 5 universities in top 200 global rankings, and we can see who are the top three universities within Ireland
#fifth visualisation
import seaborn as sns
import matplotlib.pyplot as plt
world_rankings = pd.read_csv('World_University_Ranks_2020.csv')
df = pd.DataFrame(world_rankings.loc[0:19])
fig, ax = plt.subplots()
g = sns.barplot(x="Citations", y="Country",
hue='Country',
data=df)
ax.set_title('Citation chart for top countries')
ax.set(xlabel="Citation rate", ylabel='Total per country')
plt.legend(loc='upper right')
plt.show()
fig.savefig('Insight5.jpg')
# insight - united states has the most citations in top 20 unis
#Bokeh plot
from bokeh.models import ColumnDataSource, RadioGroup
from bokeh.plotting import figure, output_file, show
output_file("hbar_stack.html")
source = ColumnDataSource(data=dict(
top_10_uni_rank= [1,2,3,4,5,6,7,8,9,10],
top_10_female_students= [46,34,47,43,39,45,49,50,46,38],
top_10_male_students= [54,66,53,57,61,55,51,50,54,62],
))
p = figure(plot_width=400, plot_height=400, title="Student gender breakdown of top 10 universities",
x_axis_label='% of student gender breakdown', y_axis_label='Top 10 Universities')
p.hbar_stack(['top_10_female_students', 'top_10_male_students'], y='top_10_uni_rank',
height=0.8, color=("red", "blue"), source=source, legend_label=('Female', 'Male'))
p.legend.orientation = "horizontal"
p.legend.location = "bottom_right"
show(p)
#Merging dataframes
import pandas as pd
import geopandas as gpd
import matplotlib.pyplot as plt
import seaborn as sns
irish_unis_dict = {'Trinity College Dublin': 114, 'Royal College of Surgeons in Ireland (RCSI)':142,'University College Dublin':158,
'National University of Ireland, Galway':188, 'Maynooth University':199, 'University College Cork':208, 'University of Limerick':288,
'Dublin City University':302, 'Technological University Dublin':401}
df = pd.DataFrame(list(irish_unis_dict.items()),columns = ['uni_name','global_score_rank'])
print(df.head())
geometry = {'Trinity College Dublin':(53.3438, -6.2546), 'Royal College of Surgeons in Ireland (RCSI)':(53.3390, -6.2620),'University College Dublin':(53.3067, -6.2210),
'National University of Ireland, Galway':(53.2792, -9.0617), 'Maynooth University':(53.3845, -6.6011), 'University College Cork':(51.8935, -8.4921), 'University of Limerick':(52.673479,-8.564095),
'Dublin City University':(53.3861, -6.2564), 'Technological University Dublin':(53.3515, -6.2693)}
df2 = pd.DataFrame(list(geometry.items()),columns = ['uni_name','geometry'])
print(df2.head())
new_df = pd.merge(df, df2, on='uni_name')
print(new_df.head())
#visualising geographical data
import pandas as pd
irl_uni_only_data = pd.DataFrame({'irish_uni_name' :['Trinity College Dublin', 'Royal College of Surgeons in Ireland (RCSI)','University College Dublin',
'National University of Ireland, Galway', 'Maynooth University', 'University College Cork', 'University of Limerick',
'Dublin City University', 'Technological University Dublin'],
'score_rank' :[114, 142,158,188,199,208,288,302,401],
'lat':[53.3438, 53.3390,53.3067,53.2792, 53.3845, 51.8935, 52.673479,53.3861, 53.3515] ,
'long':[-6.2546, -6.2620,-6.2210, -9.0617,-6.6011, -8.4921, -8.564095, -6.2564,-6.2693]})
import folium
trinity_col = folium.Map(location=[-6.2546,53.3438], zoom_start = 21)
trinity_col.save("mymap.html")
for i in range(0,len(irl_uni_only_data)):
folium.Marker(
location=[irl_uni_only_data.iloc[i]['lat'], irl_uni_only_data.iloc[i]['long']],
popup=irl_uni_only_data.iloc[i]['irish_uni_name'],
).add_to(trinity_col)
trinity_col.save("mymap2.html")
#please note if starting location is incorrect the markers are correctly placed across Ireland
#additional code looking at geojson file on counties in Ireland
irl_map = gpd.read_file('counties.geojson')
irl_map.plot()
irl_map.crs
irl_map.geometry = irl_map.geometry.to_crs(epsg=3857) |
import numpy as np
def np2pcd(x, y, z, filename, rgb=False):
rgb_value=0.05
f=open(filename,'w')
f.write("# .PCD v0.7 - Point Cloud Data file format\n")
f.write("VERSION 0.7\n")
if (rgb==False):
f.write("FIELDS x y z\n")
f.write("SIZE 4 4 4\n")
f.write("TYPE F F F\n")
f.write("COUNT 1 1 1\n")
elif (rgb==True):
f.write("FIELDS x y z rgb\n")
f.write("SIZE 4 4 4 4\n")
f.write("TYPE F F F F\n")
f.write("COUNT 1 1 1 1\n")
f.write("WIDTH %d\n" % (x.size))
f.write("HEIGHT 1\n")
f.write("VIEWPOINT 0 0 0 1 0 0 0\n")
f.write("POINTS %d\n" % (x.size))
f.write("DATA ascii\n")
if (rgb==False):
for i in range(np.size(x)):
f.write("%.4f %.4f %.4f\n" % (x[i],y[i],z[i]))
elif (rgb==True):
for i in range(np.size(x)):
f.write("%.4f %.4f %.4f %.4f\n" % (x[i],y[i],z[i], rgb_value))
f.close()
|
import numpy as np
import matplotlib.pyplot as plt
import random
leafyFactor = .15
herbFactor = .5
predFactor = .7
leafyMat = np.full((10,10), 100)
herbMat = np.full((10,10), 50)
turn = 0
#max leafyPop = 100
leafyPop = 80
herbPop = 5
predPop = .5
def leafyGrowth():
leafyConc = leafyPop / 100
leafyInv = 1-leafyConc
leafyGrowth = leafyPop * leafyFactor * leafyInv
randy = random.uniform(.9, 1.1)
return leafyGrowth * randy
def leafyConsumption():
return herbPop/3
def herbGrowth(herbPop):
leafyConc = leafyPop / 100
growth = herbPop * herbFactor*leafyConc
randy = random.uniform(.9, 1.1)
return growth * randy
def herbStarvation():
leafyConc = leafyPop / 100
leafyInv = 1-leafyConc
randy = random.uniform(.9, 1.1)
return (leafyInv*herbPop)*randy*leafyInv/2
def herbPredation():
randy = random.uniform(.9, 1.1)
return predPop*randy
def predGrowth(predPop):
predConc = predPop / herbPop
predInv = 1-predConc
growth = predPop * predFactor*predInv
randy = random.uniform(.9, 1.1)
return growth*randy
def predStarvation():
predConc = predPop / herbPop
randy = random.uniform(.9, 1.1)
return predPop*predConc*randy
turn = 0
leafyList = [80,80]
herbList = [5,5]
predList = [.5,.5]
while turn < 1000:
turn +=1
print("Turn: " + str(turn))
leafyPop = leafyPop + leafyGrowth()
print("Leafy growth: " + str(leafyGrowth()))
leafyPop = leafyPop - leafyConsumption()
print("Leafy cons: " + str(leafyConsumption()))
if (leafyPop > 100):
leafyPop = 100
if (leafyPop < 0):
leafyPop = 1
herbPop = herbPop + herbGrowth(herbList[-2])
print()
print("Herb growth: " + str(herbGrowth(herbList[-2])))
herbPop = herbPop - herbStarvation()
print("Herb starv: " + str(herbStarvation()))
herbPop = herbPop - herbPredation()
print("Herb pred: " + str(herbPredation()))
if (herbPop < 0):
herbPop = .002
predPop = predPop + predGrowth(predList[-2])
print()
print("Pred growth: " + str(predGrowth(predList[-2])))
predPop = predPop - predStarvation()
print("Pred starv: " + str(predStarvation()))
if (predPop < 0):
print("blern")
predPop = .001
print()
print()
print("Leaf: " + str(leafyPop))
print("Herb: " + str(herbPop))
print("Pred: " + str(predPop))
print()
print()
print()
print()
print()
print()
leafyList.append(leafyPop)
herbList.append(herbPop)
predList.append(predPop)
plt.plot(leafyList, label = "Leafy")
plt.plot(herbList, label = "Herb")
plt.plot(predList, label = "Pred")
plt.legend()
plt.show()
|
#!/usr/bin/env python
# tau.yelo.at - views
# -*- coding: utf-8 -*-
import psutil
from flask import render_template
from . import app
@app.route("/")
def index():
divide_mb = 1000000
divide_gb = 1000000000
mem = psutil.virtual_memory()
disk = psutil.disk_usage('/')
cpu_percent_used = psutil.cpu_percent()
cpu_count = psutil.cpu_count()
all_the_things = {'mem_total': mem.total / divide_mb,
'mem_used': mem.active / divide_mb,
'cpu_percent_used': cpu_percent_used,
'cpu_count': cpu_count,
'disk_total': disk.total / divide_gb,
'disk_used': disk.used / divide_gb}
return render_template('index.html', info=all_the_things)
|
#!/usr/bin/env python
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
import matplotlib.pyplot as plt
import numpy as np
#import pylab as pl
import numpy as np
import os
import struct
import argparse
import glob
import sys
import math
import Diffusion2D.unit_vec as uv
class Iter_Data:
def __init__(self):
pass
class patch_data:
def __init__(self):
self.X = []
self.Y = []
self.Z = []
self.V = []
def nice_axes(a, b):
try:
p = math.floor(math.log10(b - a))
A = nice_lower(a, p)
B = nice_upper(b, p)
except:
print "a b"
print a,b
raise
return A, B
def nice_upper(a, p):
a = a / 10**p
a = math.ceil(a)
a = a * 10**p
return a
def nice_lower(a, p):
try:
a = a / 10**p
except:
print "a",a,"p",p
raise
a = math.floor(a)
a = a * 10**p
return a
def read_array(f):
N = struct.unpack('L', f.read(8))
N = N[0]
try:
shape = struct.unpack('L'*N, f.read(8*N))
except struct.error as er:
print "N =",N
raise
shape = np.array(shape)
size = np.prod(shape)
#print "N: ", N
#print "shape:",shape
#print "size: ",size
v = struct.unpack('d'*size, f.read(8*size))
return np.reshape(v, shape)
def do_file(filename):
f = open(filename, 'rb')
pd = patch_data()
pd.name = filename
f_max = -1E37
f_min = 1E37
while True:
x = [None]*3
xdir = [None]*3
try:
xdir[0] = struct.unpack('i', f.read(4))[0]
except struct.error as er:
print "eof"
break
except:
print sys.exc_info()[0]
raise
try:
xdir[1] = struct.unpack('i', f.read(4))[0]
xdir[2] = struct.unpack('i', f.read(4))[0]
#x[0] = read_array(f)
#x[1] = read_array(f)
#x[2] = read_array(f)
x[xdir[0]] = read_array(f)
x[xdir[1]] = read_array(f)
x[xdir[2]] = read_array(f)
v = read_array(f)
#v = np.transpose(v)
pd.X.append(x[0])
pd.Y.append(x[1])
pd.Z.append(x[2])
pd.V.append(v)
f_max = max(np.max(v), f_max)
f_min = min(np.min(v), f_min)
except:
print sys.exc_info()[0]
raise
f.close()
print "min max"
print "{0:16f}{1:16f}".format(f_min,f_max)
return f_min, f_max, pd
def plot_file(pd,a,b):
fig = pl.figure()
ax = fig.add_subplot(111)
ax.set_xlabel(pd.name)
clrrng = np.linspace(a,b,11)
print 'success'
#print np.shape(x)
#print np.shape(y)
#print np.shape(v)
#print x
#print y
#print v
for x,y,v in zip(pd.X,pd.Y,pd.V):
con = ax.contourf(x,y,v,clrrng)
pl.colorbar(con, ax = ax)
def plot3(patch_datas, vmin, vmax):
fig = plt.figure()
ax = fig.gca(projection='3d')
for pd in patch_datas:
plot3_patch(ax,pd,vmin,vmax)
def plot3_patch(ax,pd,vmin,vmax):
for x,y,z,v in zip(pd.X,pd.Y,pd.Z,pd.V):
N = v-vmin # clamp min to 0
N = N/(vmax-vmin) # normalize 0..1
surf = ax.plot_surface(x, y, z, rstride=1, cstride=1, facecolors=cm.jet(N),
linewidth=0, antialiased=False, shade=False)
############
def program_3d():
val_max = -100000
val_min = 100000
#print "vmin vmax"
#print val_min,val_max
patch_datas = []
for f in args.files:
fmin,fmax,pd = do_file(f)
patch_datas.append(pd)
#print "vmin vmax fmin fmax"
#print "{0:16}".format(val_min,val_max,fmin,fmax)
val_max = max(fmax, val_max)
val_min = min(fmin, val_min)
#print "vmin vmax"
#print val_min,val_max
a,b = nice_axes(val_min,val_max)
for pd in patch_datas:
#plot_file(pd,a,b)
pass
plot3(patch_datas, val_min,val_max)
plt.show()
def program_idata(args):
data = {}
for filename in args.files:
f = open(filename, 'rb')
d = Iter_Data()
d.S = read_array(f)
d.v_0 = read_array(f)
data[filename] = d
for k,d in data.items():
fig = plt.figure()
ax = fig.add_subplot(221)
ax.plot(d.S,'-o')
ax.set_xlabel('i')
ax.set_ylabel('S')
ax = fig.add_subplot(222)
ax.plot(d.v_0,'-o')
ax.set_xlabel('i')
ax.set_ylabel('v')
ax = fig.add_subplot(223)
ax.plot(d.S, d.v_0, '-o')
ax.set_xlabel('S')
ax.set_ylabel('v')
plt.show()
############
parser = argparse.ArgumentParser()
parser.add_argument('files', nargs='*')
parser.add_argument('-r', action='store_true')
args = parser.parse_args()
if len(args.files) == 0:
print "no input files"
sys.exit(1)
#print args.files
if(args.r):
program_idata(args)
else:
program_3d()
|
from panda3d.core import NodePath, CardMaker, Vec4, Quat, Vec3, SamplerState, OmniBoundingVolume, BillboardEffect
from panda3d.core import CollisionBox, CollisionNode, CollisionTraverser, CollisionHandlerQueue, BitMask32, Point3
from panda3d.core import LPlane, LineSegs, AntialiasAttrib
from bsp.leveleditor import LEGlobals
from bsp.leveleditor import LEUtils
from bsp.leveleditor.viewport.ViewportType import VIEWPORT_3D_MASK
from bsp.leveleditor.math.Ray import Ray
from bsp.leveleditor.actions.EditObjectProperties import EditObjectProperties
from bsp.leveleditor.actions.ActionGroup import ActionGroup
from bsp.leveleditor.selection.SelectionType import SelectionModeTransform
from bsp.leveleditor.menu.KeyBind import KeyBind
from .BaseTransformTool import BaseTransformTool, Rollover, Ready, Down, Global, \
Local, TransformWidget, TransformWidgetAxis
from .BoxTool import BoxAction, ResizeHandle
import math
from PyQt5 import QtWidgets, QtCore
class MoveWidgetAxis(TransformWidgetAxis):
def __init__(self, widget, axis):
TransformWidgetAxis.__init__(self, widget, axis)
self.head = base.loader.loadModel("models/editor/arrow_head.bam")
self.head.reparentTo(self)
self.head.setY(0.6)
self.head.setScale(0.7)
baseSegs = LineSegs()
baseSegs.setColor(1, 1, 1, 1)
baseSegs.setThickness(2.0)
baseSegs.moveTo(0, 0, 0)
baseSegs.drawTo(0, 0.6, 0)
self.base = self.attachNewNode(baseSegs.create())
self.base.setAntialias(AntialiasAttrib.MLine)
def cleanup(self):
self.head.removeNode()
self.head = None
self.base.removeNode()
self.base = None
TransformWidgetAxis.cleanup(self)
def getClickBox(self):
return [Vec3(-0.06, 0.0, -0.06), Vec3(0.06, 0.8, 0.06)]
class MoveWidget(TransformWidget):
def createAxis(self, axis):
return MoveWidgetAxis(self, axis)
class MoveTool(BaseTransformTool):
Name = "Move"
ToolTip = "Move Tool"
KeyBind = KeyBind.MoveTool
Icon = "resources/icons/editor-move.png"
def __init__(self, mgr):
BaseTransformTool.__init__(self, mgr)
self.transformType = SelectionModeTransform.Translate
def createWidget(self):
self.widget = MoveWidget(self)
def filterHandle(self, handle):
if base.selectionMgr.hasSelectedObjects():
return False
return True
def getUpdatedProperties(self, obj, inst):
return {"origin": inst.getPos(obj.np.getParent())}
def getActionName(self):
return "Move"
def onMouseMoveTransforming3D(self, vp):
# 3D is a little more complicated. We need to define a plane parallel to the selected
# axis, intersect the line from our camera to the 3D mouse position against the plane,
# and use the intersection point as the movement value.
now = self.getPointOnGizmo()
absolute = base.snapToGrid(self.preTransformStart + now - self.transformStart)
self.setGizmoOrigin(absolute)
self.moveBox(absolute - self.boxOriginOffset)
|
# -*- coding: utf-8 -*-
import re
from itertools import groupby, izip_longest
from operator import itemgetter
from collections import Counter
def find_middle_x(x1, x2):
cut_x1_i = x1.index('.')+11
cut_x2_i = x2.index('.')+11
cut_x1, prec_x1 = float(x1[:cut_x1_i]), x1[cut_x1_i:]
cut_x2, prec_x2 = float(x2[:cut_x2_i]), x1[cut_x2_i:]
print x1[:cut_x1_i], repr(float(x1[:cut_x1_i]))
print cut_x1, cut_x2, x2[:cut_x2_i]
fl_midl = (cut_x1 + cut_x2) / 2
print fl_midl
r = re.findall(r'^\d+.\d{0,10}', str(fl_midl))
q = re.split(r'^\d+.\d{0,10}', str(fl_midl))[1]
print r,q
s_midl = str(fl_midl)
cut_midl_i = s_midl.index('.')+10
cut_midl = s_midl[:cut_midl_i]
cut_prec = s_midl[cut_midl_i:]
if all([prec_x1, prec_x2]):
l1 = len(prec_x1)
l2 = len(prec_x2)
prec_f = int(prec_x1)*(10**(-1)*l1) + int(prec_x2)*(10**(-1)*l2)
elif prec_x1:
prec_f = int(prec_x1)*(10**(-1)*len(prec_x1))
elif prec_x2:
prec_f = int(prec_x2)*(10**(-1)*len(prec_x2))
else:
prec_f = ''
if cut_prec:
prec_f += int(cut_prec)*(10**(-1)*len(cut_prec))
print cut_midl
print prec_f
return cut_midl + ":" + str(prec_f)
# print find_middle_x('123.0000000003123', '123.0000000004123')
def get_row_dict(line):
keys = ['x1', 'y1', 'x2', 'y2', 'pol_id', 'a', 'b']
vals = line.split()[2:]
row = {k: v for (k, v) in zip(keys, vals)}
return row
def get_A_B(x1, y1, x2, y2):
if x1 == x2:
return 'undf', 'undf'
x1 = float(x1)
x2 = float(x2)
y1 = float(y1)
y2 = float(y2)
if not x1:
b = y1
a = (y2-b)/x2
elif not x2:
b = y2
a = (y1-b)/x1
else:
x_diff = x1/x2
b = (y1-x_diff*y2)/(1-x_diff)
a = (y1-b)/x1
return [a, b]
def calc_Y(x, a, b):
x = float(x)
a = float(a)
b = float(b)
return a*x+b
def update_dict_vals(proc_add, x_middle):
for add in proc_add:
add['val'] = calc_Y(x_middle, add['a'], add['b'])
def l():
print 80*'-'+'\n\n\n'
def replace_node_val(next_tree, del_val, new_info):
node = next_tree.get_node(next_tree.root, del_val)
if not node.pid1_filled:
node.a = new_info['a']
node.b = new_info['b']
node.pid1 = new_info['pol_id']
node.pid1_filled = True
else:
node.pid2 = new_info['pol_id']
def treatment_add_del(del_nodes, add_nodes):
# del/add pairs
pairs = []
del_dict = {}
add_dict = {}
for k, gr in groupby(del_nodes, lambda x: x['y2']):
del_dict[k] = list(gr)
for k, gr in groupby(add_nodes, lambda x: x['y1']):
add_dict[k] = list(gr)
for k in del_dict:
pairs.append((del_dict[k], add_dict.get(k, [])))
add_dict.pop(k, None)
# finals
f_del = [] # deletions without pair to replace
f_add = [] # addition without pair to replace
f_replace = []
for pair in pairs:
d, a = pair
for i_pair in izip_longest(d, a):
# пара (на удаление, на добавление вместо удаленного)
i_d, i_a = i_pair
if i_d and i_a:
f_replace.append((i_d, i_a))
else:
f_del.append(i_d) if i_d else f_add.append(i_a)
# если начало новых не совпало с концами удаления
for k, v in add_dict.iteritems():
f_add.extend(v)
return f_replace, f_del, f_add
# print 'del_dict', del_dict
# print 'add_dict', add_dict
# print 'pairs', pairs
# print 80*'-'
# print 'deletions without pair to replace'
# print f_del
# print 'addition without pair to replace'
# print f_add
# print 'to_replace'
# print to_replace
# # удаляем сразу по 2 значения
# for v, gr in groupby(f_del, lambda x: x['val']):
# # pol_ids = [g['pol_id'] for g in gr]
# next_tree.delete(v)
# def treatment_add_del2(del_nodes, add_nodes, x_middle):
# # у del_nodes ['val'] равны значениям старого дерева,
# # сосчитанные, x_middle старого дерева
#
# for add in add_nodes:
# add['val'] = calc_Y(x_middle, add['a'], add['b'])
#
# to_replace = []
#
# # del_nodes.sort(key=itemgetter('val'))
# # add_nodes.sort(key=itemgetter('val'))
#
# for dele in del_nodes:
# for add in add_nodes:
# if float(dele['y2']) == float(add['y1']):
# to_replace.append((dele, add))
#
# dele['val'] = calc_Y(x_middle, dele['a'], dele['b'])
def find_polygon(root, came_x, came_y):
less, more = None, None
if root is None:
print 'No {0} element in Tree'.format(came_y)
elif root.val is None:
print 'Tree is empty!'
else:
child = root
while child:
r_v = child.calc_new_val(came_x)
if r_v == came_y:
return child, child
elif r_v < came_y:
less = child
child = child.right
elif r_v > came_y:
more = child
child = child.left
print less, more
if not less and not more:
pass
elif less and more:
# fixme достаю 2 одинаковых
ids = [less.pid1, less.pid2, more.pid1, more.pid2]
print ids
return max(Counter(ids))
else:
print 'Out of territory'
return None
add_nodes = [
{'x1': 1, 'y1': 7, 'x2':2, 'y2': 9, 'val': 8, },
{'x1': 1, 'y1': 7, 'x2':2, 'y2': 7, 'val': 7, },
{'x1': 1, 'y1': 7, 'x2':2, 'y2': 5, 'val': 6, },
{'x1': 1, 'y1': 2, 'x2':2, 'y2': 2, 'val': 2, },
]
del_nodes = [
{'x1': 0, 'y1': 0, 'x2': 1, 'y2': 2,'val': 1, },
{'x1': 0, 'y1': 4, 'x2': 1, 'y2': 2,'val': 3, },
{'x1': 0, 'y1': 5, 'x2': 1, 'y2': 7,'val': 6, },
{'x1': 0, 'y1': 9, 'x2': 1, 'y2': 7,'val': 8, },
]
# treatment_add_del(del_nodes, add_nodes) |
from django.shortcuts import render
from django.http import HttpResponse
from .models import Destination
def index(request):
des1= Destination()
des1.desc= 'The city that never sleep'
des1.city='Marrakech'
des1.price=800
return render (request, 'index.html',{'des1':des1})
# Create your views here.
|
import requests
from datetime import datetime
#Default - shows for Moscow
api_key = "3e61296365ff0da7ca77775d7fd89edb"
"""
test_url = 'http://api.openweathermap.org/data/2.5/weather?id=524901&APPID=' + api_key
resp = requests.get(test_url)
if resp.status_code in [200, 201]:
weather_data = resp.json()
print("The weather in {} is {}.".format(weather_data['name'], weather_data['weather'][0]['description']))
else:
print("ERROR: " + str(resp.status_code))
"""
#Show current weather for hometown - Spartanburg
spartanburg_id = "4597204"
spartanburg_url = f"http://api.openweathermap.org/data/2.5/weather?id={spartanburg_id}&APPID={api_key}&units=imperial"
resp = requests.get(spartanburg_url)
if resp.status_code in [200, 201]:
weather_data = resp.json()
print(f"""
The current weather in {weather_data["name"]} is: {weather_data["weather"][0]["description"]}
Current temp (in Fahrenheit): {weather_data["main"]["temp"]}
Temperature range: High: {weather_data["main"]["temp_max"]} Low: {weather_data["main"]["temp_min"]}
Humidity: {weather_data["main"]["humidity"]}
""")
else:
print("ERROR: " + str(resp.status_code))
#Show 5 day forecast for vacation spot - Berlin
berlin_id = "6545310"
berlin_url_forecast = f"http://api.openweathermap.org/data/2.5/forecast?id={berlin_id}&APPID={api_key}&units=imperial"
resp = requests.get(berlin_url_forecast)
if resp.status_code in [200, 201]:
weather_data = resp.json()
print("The 5 day forecast for " + weather_data["city"]["name"] + " is: ")
for item in weather_data["list"]:
dt_obj = datetime.strptime(item["dt_txt"], "%Y-%m-%d %H:%M:%S")
if dt_obj.hour == 12:
print(f"""
On {str(dt_obj.month)}, {str(dt_obj.day)} it will be {item["weather"][0]["description"]}
Temperature (in Fahrenheit): {item["main"]["temp"]}
Temp range: High: {item["main"]["temp_max"]} Low: {item["main"]["temp_min"]}
Humidity: {item["main"]["humidity"]}
""")
if item["weather"][0]["id"] >= 200 and item["weather"][0]["id"] < 600:
print("You may want to bring an umbrella.")
if item["main"]["temp"] > 70 and item["main"]["temp"] < 110:
print("Break out the shorts!")
elif item["main"]["temp"] < 70 and item["main"]["temp"] > 50:
print("Jeans and maybe a light jacket.")
elif item["main"]["temp"] < 50 and item["main"]["temp"] > 32:
print("Definitely want a coat.")
elif item["main"]["temp"] < 32:
print("Bundle up, it's cold!")
elif item["main"]["temp"] > 110:
print("Wear SPF 1000!")
else:
print("ERROR: " + str(resp.status_code))
|
import abc
class FileDriverBase(abc.ABC):
def onLoad(self):
pass
@abc.abstractmethod
def read(self, filename, **kwargs):
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.