text
stringlengths 8
6.05M
|
|---|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Python 中的文档形式:
形式 角色
#注释 文件中的文档
dir函数 对象中可用属性的列表
文档字符串__doc__ 附加在对象上的文件中的文档
PyDoc: help函数 对象的交互帮助
PyDoc: HTML报表 浏览器中的模块文档
标准手册 正式的语言和库的说明
网站资源 在线教程、例子等
出版的书籍 商业参考书籍
"""
# 1、文档字符串 __doc__
import doc
print('-' * 40, '\ndoc.py的文档:\n', doc.__doc__)
print('-' * 40, '\ndoc.add的文档:\n', doc.add.__doc__)
# 2、PyDoc: help
print('-' * 40, '\ndoc的帮助文档:\n', help(doc))
print('-' * 40, '\ndoc.add的帮助文档:\n', help(doc.add))
# 3、HTML报表
|
import unittest
from katas.kyu_4.next_bigger_number_with_same_digits import next_bigger
class NextBiggerTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(next_bigger(12), 21)
def test_equals_2(self):
self.assertEqual(next_bigger(513), 531)
def test_equals_3(self):
self.assertEqual(next_bigger(2017), 2071)
def test_equals_4(self):
self.assertEqual(next_bigger(9), -1)
def test_equals_5(self):
self.assertEqual(next_bigger(111), -1)
def test_equals_6(self):
self.assertEqual(next_bigger(531), -1)
def test_equals_7(self):
self.assertEqual(next_bigger(1234567890), 1234567908)
|
# matrix_paths.py
def matrix_paths_recursive(m, n, i, j):
# base case
if i == m - 1 and j == n - 1:
return 1
# recurse into cells to the right and below if within bounds
count = 0
if i+1 < m:
count += matrix_paths_recursive(m, n, i+1, j)
if j+1 < n:
count += matrix_paths_recursive(m, n, i, j+1)
return count
def matrix_paths(m, n):
# initialize m by n matrix
counts = [[0 for i in range(m)] for j in range(n)]
# set first row and first column to all 1's
for i in xrange(m):
counts[i][0] = 1
for j in xrange(n):
counts[0][j] = 1
# compute count of remaining cells, by adding
# the count of the cell to the left and above.
for i in xrange(1, m):
for j in xrange(1, n):
counts[i][j] = counts[i-1][j] + counts[i][j-1]
return counts[m-1][n-1]
|
"""
Edanur Demir
Loss functions used in EENet training
"""
import sys
import torch
import torch.nn.functional as F
def loss(args, exit_tag, pred, target, conf, cost):
"""loss function
Arguments are
* args: command line arguments entered by user.
* pred: prediction result of each exit point.
* target: target prediction values.
* conf: confidence value of each exit point.
* cost: cost rate of the each exit point.
This function switches between the loss functions.
"""
if args.loss_func == 'v0':
return loss_v0(args, pred, target, conf, cost)
if args.loss_func == 'v1':
return loss_v1(args, pred, target, conf, cost)
if args.loss_func == 'v2':
return loss_v2(args, pred, target, conf, cost)
if args.loss_func == 'v3':
return loss_v3(args, pred, target, conf, cost)
if args.loss_func == 'v4':
return loss_v4(args, exit_tag, pred, target, conf, cost)
def loss_v0(args, pred, target, conf, cost):
"""loss version 0
Arguments are
* args: command line arguments entered by user.
* pred: prediction result of each exit point.
* target: target prediction values.
* conf: confidence value of each exit point.
* cost: cost rate of the each exit point.
This loss function is the cumulative loss of all exit points.
It is used in the first stage of two-stage training.
"""
pred_loss = 0
cost_loss = 0
for i in range(args.num_ee + 1):
pred_loss += F.nll_loss(pred[i].log(), target)
cum_loss = pred_loss
return cum_loss, pred_loss, cost_loss
def loss_v1(args, pred, target, conf, cost):
"""loss version 1
Arguments are
* args: command line arguments entered by user.
* pred: prediction result of each exit point.
* target: target prediction values.
* conf: confidence value of each exit point.
* cost: cost rate of the each exit point.
This loss function is the fusion loss of the cross_entropy loss and cost loss.
These loss parts are calculated in a recursive way as following:
Prediction'_i = confidence_i * prediction_i + (1 - confidence_i) * Prediction'_(i+1)
Cost'_i = confidence_i * cost_i + (1 - confidence_i) * Cost'_(i+1)
"""
cum_pred = pred[args.num_ee]
cum_cost = cost[args.num_ee]
for i in range(args.num_ee-1, -1, -1):
cum_pred = conf[i] * pred[i] + (1-conf[i]) * cum_pred
cum_cost = conf[i] * cost[i] + (1-conf[i]) * cum_cost
pred_loss = F.nll_loss(cum_pred.log(), target)
cost_loss = cum_cost.mean()
cum_loss = pred_loss + args.lambda_coef * cost_loss
return cum_loss, pred_loss, cost_loss
def loss_v2(args, pred, target, conf, cost):
"""loss version 2
Arguments are
* args: command line arguments entered by user.
* pred: prediction result of each exit point.
* target: target prediction values.
* conf: confidence value of each exit point.
* cost: cost rate of the each exit point.
This loss function is the cumulative loss of loss_v1 by recursively.
It aims to provide a more fair training.
"""
cum_pred = [None] * args.num_ee + [pred[args.num_ee]]
cum_cost = [None] * args.num_ee + [cost[args.num_ee]]
pred_loss = F.nll_loss(cum_pred[-1].log(), target)
cum_loss = pred_loss + args.lambda_coef * cum_cost[-1].mean()
for i in range(args.num_ee-1, -1, -1):
cum_pred[i] = conf[i] * pred[i] + (1-conf[i]) * cum_pred[i+1]
cum_cost[i] = conf[i] * cost[i] + (1-conf[i]) * cum_cost[i+1]
pred_loss = F.nll_loss(cum_pred[i].log(), target)
cost_loss = cum_cost[i].mean()
cum_loss += pred_loss + args.lambda_coef * cost_loss
return cum_loss, 0, 0
def loss_v3(args, pred, target, conf, cost):
"""loss version 3
Arguments are
* args: command line arguments entered by user.
* pred: prediction result of each exit point.
* target: target prediction values.
* conf: confidence value of each exit point.
* cost: cost rate of the each exit point.
This loss function uses the normalized confidence values.
"""
conf_sum = 0
for i in range(len(conf)):
conf_sum += conf[i].mean()
norm_conf = [conf[i].mean() / conf_sum for i in range(len(conf))]
cum_loss = 0
for i in range(args.num_ee + 1):
pred_loss = F.nll_loss(pred[i].log(), target)
cost_loss = cost[i].mean()
cum_loss += norm_conf[i] * (pred_loss + args.lambda_coef * cost_loss)
return cum_loss, 0, 0
def loss_v4(args, exit_tag, pred, target, conf, cost):
"""loss version 4
Arguments are
* args: command line arguments entered by user.
* exit_tag: exit tag of examples in the batch.
* pred: prediction result of each exit point.
* target: target prediction values.
* conf: confidence value of each exit point.
* cost: cost rate of the each exit point.
This loss function uses the exit tags of examples pre-assigned by the model.
"""
cum_pred = pred[args.num_ee]
cum_cost = cost[args.num_ee]
conf_loss = 0
for i in range(args.num_ee + 1):
exiting_examples = (exit_tag == i).to(args.device, dtype=torch.float)
not_exiting_examples = (exit_tag != i).to(args.device, dtype=torch.float)
cum_pred = exiting_examples * pred[i] + not_exiting_examples * cum_pred
cum_cost = exiting_examples * cost[i] + not_exiting_examples * cum_cost
exiting_rate = exiting_examples.sum().item() / len(exit_tag)
not_exiting_rate = not_exiting_examples.sum() / len(exit_tag)
conf_weights = exiting_examples * not_exiting_rate + not_exiting_examples * exiting_rate
conf_loss += F.binary_cross_entropy(conf[i], exiting_examples, conf_weights)
pred_loss = F.nll_loss(cum_pred.log(), target)
cost_loss = cum_cost.mean()
cum_loss = pred_loss + args.lambda_coef * cost_loss + conf_loss
return cum_loss, pred_loss, cost_loss
def update_exit_tags(args, batch_size, pred, target, cost):
"""loss version 4
Arguments are
* args: command line arguments entered by user.
* batch_size: current size of the batch.
* pred: prediction result of each exit point.
* target: target prediction values.
* cost: cost rate of the each exit point.
This function updates and returns the exit tags.
"""
cum_loss = (torch.ones(batch_size) * sys.maxsize).to(args.device)
exit_tag = (torch.ones(batch_size) * args.num_ee).to(args.device, dtype=torch.int)
for exit in range(args.num_ee + 1):
loss = F.nll_loss(pred[exit].log(), target, reduction='none') \
+ args.lambda_coef * cost[exit]
smaller_values = (loss < cum_loss).to(args.device, dtype=torch.float)
greater_values = (loss >= cum_loss).to(args.device, dtype=torch.float)
cum_loss = loss * smaller_values + cum_loss * greater_values
exit_tag = exit * smaller_values.int() + exit_tag * greater_values.int()
return exit_tag.reshape(-1, 1)
|
import inspect
import logging
import traceback
from .errors import BlockedFunctionError
from .events import emergency
from .logginglib import do_log
from .logginglib import get_logger
from .blocked_function import BlockedFunction
class VulnerableMachine:
"""
An abstract class that allows machines to switch off if an error occurres.
This object listens to the emergency event. If the emergency event is
executed, the resetToEmergencyState() function is executed. Also the
emergency state will be saved until it is resolved again.
Attributes
----------
_in_emergency_state : bool
Whether the current instrument is in emergency state at the moment, if
it is the user has to unblock everything manually
Listened Events
---------------
emergency
Set the machine to emergency state when the emergency event is fired
"""
def __init__(self) -> None:
"""Create the vulnerable machine object"""
super(VulnerableMachine, self).__init__()
self._in_emergency_state = False
self._logger = get_logger(self)
# add a listener to the emergency event to go in emergency state
# whenever the emergency event is created
self.emergency_event_id = "vulnerable_machine_emergency_state"
emergency[self.emergency_event_id] = self.resetToEmergencyState
def resetToEmergencyState(self, *args) -> None:
"""Set the machine to be in emergency state.
This will reset the machine to be in the safe state. In addition the
emergency case will be saved. The user needs to unblock everything
until the program can continue.
Calling this function will make all functions (except the
resolveEmergencyState() function) to throw a BlockedFunctionError.
"""
if do_log(self._logger, logging.CRITICAL):
self._logger.critical("Setting to emergency mode")
msg = "CRITICAL ERROR -- EMERGENCY STATE IS EXECUTED!"
print("")
print("+{}+".format("-" * (len(msg) + 2)))
print("| {} |".format(msg))
print("+{}+".format("-" * (len(msg) + 2)))
print("")
print("Setting {} to emergency state.".format(self.__class__.__name__))
print("")
traceback.print_stack()
print("")
self._in_emergency_state = True
self.resetToSafeState()
for name, _ in inspect.getmembers(self, predicate=inspect.ismethod):
if name != "resolveEmergencyState":
setattr(self, name, BlockedFunction(getattr(self, name), name))
def resolveEmergencyState(self) -> None:
"""Unblocks the machine and resolves the emergency state.
The functions can now be used again.
"""
for name, _ in inspect.getmembers(self, predicate=lambda x: isinstance(x, BlockedFunction)):
setattr(self, name, getattr(self, name).func)
def resetToSafeState(self) -> None:
"""Set the machine into its safe state.
The safe state will be used whenever something bad happens or when the
measurement has finished. The machine will be told to go in the safe
state. This should be a state where the machine can stay for long
times until the operator comes again.
"""
raise NotImplementedError()
|
driving = input('請問你有沒有開過車? ')
if driving != '有' and driving != '沒有': #driving不等於'有' ,也不等於'沒有'
print('只能輸入 有 或 沒有')
raise SystemExit
age = input('請問你的年齡? ')
age = int(age)
if driving == '有':
if age >= 18:
print('你通過測驗了')
else:
print('奇怪 你怎麼會開過車')
elif driving == '沒有':
if age >= 18:
print('你可以考駕照,怎麼還不去考?')
else:
print('很好,再過幾年就可以考駕照了')
|
import unittest
from katas.kyu_6.fizz_buzz import solution
class FizzBuzzTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(solution(20), [5, 2, 1])
def test_equals_2(self):
self.assertEqual(solution(2), [0, 0, 0])
def test_equals_3(self):
self.assertEqual(solution(30), [8, 4, 1])
def test_equals_4(self):
self.assertEqual(solution(300), [80, 40, 19])
|
# -*- coding: utf-8 -*-
# @Author: steve yuan
# @Date: 2017-05-27 09:21:47
# @Last Modified by: steve yuan
# @Last Modified time: 2017-06-22 22:46:53
import webbrowser
class Movies():
def __init__(self, movie_title, original_network, stars,
movie_storyline, poster_image, trailer_youtube):
"""
'__init__' is a special function can create a workspace to\
receive the arguments, such as movie_title, original_network,\
stars, movie_storyline, atc. Then we can use this arguments\
as material to create the web page. (Am I right?)
"""
self.title = movie_title
self.original_network = original_network
self.stars = stars
self.movie_storyline = movie_storyline
self.poster_image_url = poster_image
self.trailer_youtube_url = trailer_youtube
def show_trailer(self):
webbrowser.open(self.trailer_youtube_url)
|
from __future__ import print_function
import json
import optparse
import pprint
import sys
from elasticsearch import Elasticsearch
def run_elasticsearch(data_object):
es = Elasticsearch(hosts = [{"host":"localhost", "port":9200}])
if es.indices.exists("irods_audit"):
request_body = {
"sort" : [
{"@timestamp":{"order": "asc"}}
],
"size" :10000,
"query": {
"bool": {
"must" : {
"regexp": {"rule_name": "audit_pep_api_.*_pre"}
},
"must_not" : {
"regexp": {"rule_name": "audit_pep_api_auth_.*_pre"}
}
}
}
}
# sanity check
res = es.search(index = "irods_audit", body=request_body)
found = None
for counter, hits in enumerate(res["hits"]["hits"]):
# print(json.dumps(hits["_source"], sort_keys=True, indent=4, separators=(',',':')))
for key, value in hits["_source"].iteritems():
if found is not None:
break
if data_object in value:
found = True
if found is not None:
print(json.dumps(hits["_source"], sort_keys=True, indent=4, separators=(',',':')))
found = None
def main():
parser = optparse.OptionParser()
parser.add_option('-d', action='store', type='string', dest='data_object', help='logical path of data object to audit')
(options, args) = parser.parse_args()
if options.data_object is None or len(args) != 0:
parser.error('-d data_object is required')
return 1
run_elasticsearch(options.data_object)
if __name__ == '__main__':
sys.exit(main())
|
import math
from typing import List
class Solution:
def sortArray(self, nums: List[int]) -> List[int]:
def heapify(i,a):
if len(a) == 0: return
leftIdx = 2*i + 1
rightIdx = leftIdx + 1
minv = a[i]
swapIdx = -1
if leftIdx < len(a):
if a[leftIdx] < minv:
minv = a[leftIdx]
swapIdx = leftIdx
if rightIdx < len(a):
if a[rightIdx] < minv:
minv = a[rightIdx]
swapIdx = rightIdx
if swapIdx != -1:
a[i], a[swapIdx] = a[swapIdx], a[i]
heapify(swapIdx,a)
def heappop(a):
a[0], a[-1] = a[-1], a[0]
ret = a.pop()
heapify(0,a)
return ret
def makeminheap(a):
l = len(a)
if l == 0 or l == 1:
return
else:
index = int(2**math.floor(math.log(l,2)) - 2)
for i in range(index,-1,-1):
heapify(i,a)
makeminheap(nums)
ans = []
while len(nums) > 0:
ans.append(heappop(nums))
return ans
s = Solution()
print(s.sortArray([5,4,3,2,1]))
|
from Task903 import catprob
from Task902 import featprob
def docprob(bayes, item, cat):
cat_prob = catprob(bayes, cat)
feature = bayes.get_features(item)
for feat in feature:
feat_prob = featprob(bayes, feat, cat)
cat_prob = feat_prob + cat_prob
return cat_prob
|
import requests
import time
def main():
tikers = ['bitcoin', 'ethereum', 'dogecoin']
while True:
for tiker in tikers:
myapi_req = requests.get(f'http://127.0.0.1:8080/myapi/{tiker}').text
print(myapi_req)
print('\n')
time.sleep(5)
main()
|
from django.db import models
from django.contrib.auth.forms import UserCreationForm
from django.urls import reverse_lazy
from django.views import generic
class SignUp(generic.CreateView):
form_class = UserCreationForm
success_url = reverse_lazy('login')
template_name = 'signup.html'
# Create your models here.
|
#should be called once
import sqlite3
conn = sqlite3.connect('prot.db')
c = conn.cursor()
c.execute('CREATE TABLE protein (proid integer primary key, name VARCHAR(200), path VARCHAR(200))')
c.execute('CREATE TABLE tags (tagid integer primary key, name VARCHAR(200))')
c.execute('CREATE TABLE ptag (proid,tagid)')
conn.commit()
conn.close()
|
from Tkinter import Text, Tk, END, mainloop
from os.path import isfile
def read_data(file_name):
print "read operation..........."
if isfile(file_name):
f = open(file_name)
else:
f = open(file_name+"_backup")
print "Getting the data from: ", f.name
data = f.readlines()
print data
def write_data(file_name):
f=""
data = ""
if isfile(file_name):
f = open(file_name, 'a+')
f_b = open(file_name+"_backup", 'a+')
else:
f=open(file_name+"_backup","a+")
f_b = open(file_name+"_buffer", 'a+')
def keyup_write_twofiles(e):
c = e.char
f.write(c)
f_b.write(c)
f.flush()
print "Written {0} in to {1}".format(c,f.name)
f_b.flush()
print "Written {0} in to {1}".format(c,f_b.name)
def keyup_write_onefile(e):
c=e.char
f_b.write(c)
f_b.flush()
print "Written {0} in to {1}".format(c,f_b.name)
data = f.read()
root = Tk()
T = Text(root, height=100, width=100)
T.pack()
T.insert(END, data)
if isfile(file_name):
T.bind("<KeyRelease>", keyup_write_twofiles)
else:
T.bind("<KeyRelease>", keyup_write_onefile)
mainloop()
def reset_file(file_name):
backup_file = file_name+"_backup"
buffer_file = file_name+"_buffer"
if isfile(file_name):
return "Reset Not required"
elif isfile(backup_file):
f_backup=open(backup_file,'a')
if isfile(buffer_file):
f_buffer=open(buffer_file)
buffer_data = f_buffer.read()
f_backup.write(buffer_data)
res = "Reset done successfully from {0} -> {1}".format(f_buffer.name,f_backup.name)
f_buffer.close()
f_backup.close()
return res
else:
f_backup.close()
return "Reset operation failed: Buffer file not found!!"
|
# -*-coding:utf-8-*-
import time
import warnings
import numpy as np
from gensim.models.doc2vec import Doc2Vec
from gensim.models.doc2vec import TaggedDocument
import labsql
warnings.filterwarnings(action='ignore', category=UserWarning, module='gensim')
class D2V:
def __init__(self):
self.doc = []
self.id_500 = np.load('./data/train_500.npy')
self.conn = labsql.LabSQL('172.168.1.36', 'sohu', 'sa', 'scucc')
self.data = self.conn.fetch("select * from prep_all")
for i, doc in enumerate(self.data):
_, corpus, _ = list(doc)
words = list(str(corpus).split())
documents = TaggedDocument(words, tags=[i])
self.doc.append(documents)
def training_model(self, vector_size=200, epochs=5000, min_count=3, window=5, negative=10, worker=8, dm=1):
d2v = Doc2Vec(self.doc, vector_size=vector_size, min_count=min_count, window=window,
negative=negative, workers=worker, dm=dm)
d2v.train(self.doc, total_examples=d2v.corpus_count, epochs=epochs)
d2v.save('./d2v_models/d2v_dm_model.all')
if __name__ == '__main__':
st = time.time()
d2v_model = D2V()
d2v_model.training_model()
print('runtime: %s sec' % (time.time() - st))
|
# -*- coding: utf-8 -*-
# @Time : 2019/11/29 23:04
# @Author : Jeff Wang
# @Email : jeffwang987@163.com OR wangxiaofeng2020@ia.ac.cn
# @Software: PyCharm
import cv2
import numpy as np
"""
本片文档学习了findcontours以及最小外接矩形以及普通矩形
"""
"""1.读取图像、转灰度、高斯滤波、Canny求边缘"""
image = cv2.imread("./picture/coins.png")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (11, 11), 0)
edged = cv2.Canny(blurred, 30, 150)
"""2. 找轮廓、画轮廓"""
binary, contours, hierarchy = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# param1: 最好输入copy而不是图像本身, param2:轮廓的形态, param3:RETR_EXTERNAL是最外面的边缘,RETR_LIST是全部轮廓, param4:近似轮廓的方法
# return: binary是二值化图像,contour是list表示轮廓本身, hierarchy代表轮廓属性,4个元素分别表示后一个轮廓、前一个轮廓、父轮廓、内嵌轮廓的索引编号,如果没有对应项,则该值为负数。
print("I count {} coins in the image".format(len(contours)))
coins = image.copy()
cv2.drawContours(coins, contours, -1, (0, 255, 0), 2) # 第三个参数是第几个轮廓, -1 代表全部, 然后颜色,线粗细
cv2.imshow("originla", image)
cv2.imshow("coins", coins)
cv2.waitKey(0)
cv2.destroyAllWindows()
"""3. 普通裁剪部分轮廓
0. 普通矩形框 (x, y, w, h) = cv2.boundingRect(c)
1. 最小外包圆 ((centerX, centerY), radius) = cv2.minEnclosingCircle(c)
"""
# for (i, c) in enumerate(contours): # i是第几个coin, c是contour的点
# (x, y, w, h) = cv2.boundingRect(c) # 通过轮廓的点数,获得一个box框住当前的轮廓,返回这个box的左下角坐标和宽度高度
# print("Coin #{}".format(i+1))
# coin = image[y:y+h, x:x+w] # 坐标x,y的方向和 存储图像方向 相反
# cv2.imshow("Coin", coin)
#
# mask = np.zeros(image.shape[:2], dtype="uint8")
# ((centerX, centerY), radius) = cv2.minEnclosingCircle(c) # 通过轮廓的点数,获得一个circle框住当前的轮廓,返回这个circle的圆心和半径
# cv2.circle(mask, (int(centerX), int(centerY)), int(radius), 255, -1)
# mask = mask[y:y+h, x:x+w]
# cv2.imshow("masked coin", cv2.bitwise_and(coin, coin, mask=mask))
# cv2.waitKey(0)
# cv2.destroyAllWindows()
"""4. 最小外接矩形
0. rect = cv2.minAreaRect(cnt)
rect[0]为中心坐标
rect[1][0]:width
rect[1][q]:height
rect[2]:angle(度数),顺时针为正数,逆时针为负数
"""
rect = []
for (i, c) in enumerate(contours): # i是第几个coin, c是contour的点
rect = cv2.minAreaRect(c)
print("angle:{}".format(rect[2]))
box = np.int0(cv2.boxPoints(rect))
cv2.drawContours(image, [box], 0, (0, 0, 255), 2)
cv2.imshow('image', image)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
"""Admin extension tags."""
from functools import reduce
from django import template
from django.template.loader import render_to_string
from django.urls import reverse
from django.utils.safestring import mark_safe
from django.utils.translation import gettext as _, gettext_lazy
from modoboa.core import signals as core_signals
from modoboa.lib.templatetags.lib_tags import render_link
from modoboa.lib.web_utils import render_actions
from .. import signals
register = template.Library()
genders = {
"Enabled": (gettext_lazy("enabled_m"), gettext_lazy("enabled_f"))
}
@register.simple_tag
def domains_menu(selection, user, ajax_mode=True):
"""Specific menu for domain related operations.
Corresponds to the menu visible on the left column when you go to
*Domains*.
:param str selection: menu entry currently selected
:param ``User`` user: connected user
:rtype: str
:return: rendered menu (as HTML)
"""
nav_classes = "navigation"
if ajax_mode:
domain_list_url = "list/"
quota_list_url = "quotas/"
logs_url = "logs/"
nav_classes += " ajaxnav"
else:
domain_list_url = reverse("admin:domain_list")
quota_list_url = domain_list_url + "#quotas/"
logs_url = domain_list_url + "#logs/"
entries = [
{"name": "domains",
"label": _("List domains"),
"img": "fa fa-user",
"class": "ajaxnav navigation",
"url": domain_list_url},
{"name": "quotas",
"label": _("List quotas"),
"img": "fa fa-hdd-o",
"class": "ajaxnav navigation",
"url": quota_list_url},
{"name": "logs",
"label": _("Message logs"),
"img": "fa fa-list",
"class": "ajaxnav navigation",
"url": logs_url},
]
if user.has_perm("admin.add_domain"):
extra_entries = signals.extra_domain_menu_entries.send(
sender="domains_menu", user=user)
for entry in extra_entries:
entries += entry[1]
entries += [
{"name": "import",
"label": _("Import"),
"img": "fa fa-folder-open",
"url": reverse("admin:domain_import"),
"modal": True,
"modalcb": "admin.importform_cb"},
{"name": "export",
"label": _("Export"),
"img": "fa fa-share-alt",
"url": reverse("admin:domain_export"),
"modal": True,
"modalcb": "admin.exportform_cb"}
]
return render_to_string("common/menulist.html", {
"entries": entries,
"selection": selection,
"user": user
})
@register.simple_tag
def identities_menu(user, selection=None, ajax_mode=True):
"""Menu specific to the Identities page.
:param ``User`` user: the connecter user
:rtype: str
:return: the rendered menu
"""
nav_classes = "navigation"
if ajax_mode:
identity_list_url = "list/"
quota_list_url = "quotas/"
nav_classes += " ajaxnav"
else:
identity_list_url = reverse("admin:identity_list")
quota_list_url = identity_list_url + "#quotas/"
entries = [
{"name": "identities",
"label": _("List identities"),
"img": "fa fa-user",
"class": nav_classes,
"url": identity_list_url},
{"name": "quotas",
"label": _("List quotas"),
"img": "fa fa-hdd-o",
"class": nav_classes,
"url": quota_list_url},
{"name": "import",
"label": _("Import"),
"img": "fa fa-folder-open",
"url": reverse("admin:identity_import"),
"modal": True,
"modalcb": "admin.importform_cb"},
{"name": "export",
"label": _("Export"),
"img": "fa fa-share-alt",
"url": reverse("admin:identity_export"),
"modal": True,
"modalcb": "admin.exportform_cb"}
]
return render_to_string("common/menulist.html", {
"entries": entries,
"user": user
})
@register.simple_tag
def domain_actions(user, domain):
actions = [
{"name": "listidentities",
"url": u"{0}#list/?searchquery=@{1}".format(
reverse("admin:identity_list"), domain.name),
"title": _("View the domain's identities"),
"img": "fa fa-user"},
]
if domain.alarms.opened().exists():
actions.append({
"name": "listalarms",
"url": reverse("admin:domain_alarms", args=[domain.pk]),
"title": _("View domain's alarms"),
"img": "fa fa-bell"
})
if user.has_perm("admin.change_domain"):
actions.append({
"name": "editdomain",
"title": _("Edit {}").format(domain),
"url": reverse("admin:domain_change", args=[domain.pk]),
"modal": True,
"modalcb": "admin.domainform_cb",
"img": "fa fa-edit"
})
if user.has_perm("admin.delete_domain"):
actions.append({
"name": "deldomain",
"url": reverse("admin:domain_delete", args=[domain.id]),
"title": _("Delete %s?") % domain.name,
"img": "fa fa-trash"
})
responses = signals.extra_domain_actions.send(
sender=None, user=user, domain=domain)
for _receiver, response in responses:
if response:
actions += response
return render_actions(actions)
@register.simple_tag
def identity_actions(user, ident):
name = ident.__class__.__name__
objid = ident.id
if name == "User":
actions = []
result = core_signals.extra_account_actions.send(
sender="identity_actions", account=ident)
for action in result:
actions += action[1]
url = (
reverse("admin:account_change", args=[objid]) +
"?active_tab=default"
)
actions += [
{"name": "changeaccount",
"url": url,
"img": "fa fa-edit",
"modal": True,
"modalcb": "admin.editaccount_cb",
"title": _("Edit {}").format(ident.username)},
{"name": "delaccount",
"url": reverse("admin:account_delete", args=[objid]),
"img": "fa fa-trash",
"title": _("Delete %s?") % ident.username},
]
else:
actions = [
{"name": "changealias",
"url": reverse("admin:alias_change", args=[objid]),
"img": "fa fa-edit",
"modal": True,
"modalcb": "admin.aliasform_cb",
"title": _("Edit {}").format(ident)},
{"name": "delalias",
"url": "{}?selection={}".format(
reverse("admin:alias_delete"), objid),
"img": "fa fa-trash",
"title": _("Delete %s?") % ident.address},
]
return render_actions(actions)
@register.simple_tag
def check_identity_status(identity):
"""Check if identity is enabled or not."""
if identity.__class__.__name__ == "User":
if hasattr(identity, "mailbox") \
and not identity.mailbox.domain.enabled:
return False
elif not identity.is_active:
return False
elif not identity.enabled or not identity.domain.enabled:
return False
return True
@register.simple_tag
def domain_aliases(domain):
"""Display domain aliases of this domain.
:param domain:
:rtype: str
"""
if not domain.aliases.count():
return "---"
res = ""
for alias in domain.aliases.all():
res += "%s<br/>" % alias.name
return mark_safe(res)
@register.simple_tag
def identity_modify_link(identity, active_tab="default"):
"""Return the appropriate modification link.
According to the identity type, a specific modification link (URL)
must be used.
:param identity: a ``User`` or ``Alias`` instance
:param str active_tab: the tab to display
:rtype: str
"""
linkdef = {"label": identity.identity, "modal": True}
if identity.__class__.__name__ == "User":
linkdef["url"] = reverse("admin:account_change", args=[identity.id])
linkdef["url"] += "?active_tab=%s" % active_tab
linkdef["modalcb"] = "admin.editaccount_cb"
else:
linkdef["url"] = reverse("admin:alias_change", args=[identity.id])
linkdef["modalcb"] = "admin.aliasform_cb"
return render_link(linkdef)
@register.simple_tag
def domadmin_actions(daid, domid):
actions = [{
"name": "removeperm",
"url": "{0}?domid={1}&daid={2}".format(
reverse("admin:permission_remove"), domid, daid),
"img": "fa fa-trash",
"title": _("Remove this permission")
}]
return render_actions(actions)
@register.filter
def gender(value, target):
if value in genders:
trans = target == "m" and genders[value][0] or genders[value][1]
if trans.find("_") == -1:
return trans
return value
@register.simple_tag
def get_extra_admin_content(user, target, currentpage):
results = signals.extra_admin_content.send(
sender="get_extra_admin_content",
user=user, location=target, currentpage=currentpage)
if not results:
return ""
results = reduce(lambda a, b: a + b, [result[1] for result in results])
return mark_safe("".join(results))
|
# Simply the class definitions for the bot and worker declarations
# Nice way to make HTTP get requests
import requests
# A nice holder for information we need between function calls
class Bot:
double_resets = {}
def __init__ (self, token):
self.token = token
handlers = {}
# Adds a single event handler
def addHandler (self, text, func):
handlers[text] = func
# Sends a text message to the specified chat_id
def sendMessage (self, chat_id = None, text = None):
if (chat_id != None and text != None):
r = requests.post('https://api.telegram.org/bot' + self.token +
'/sendMessage' +
'?chat_id=' + str(chat_id) +
'&text=' + text)
while r.status_code != requests.codes.ok:
r = requests.post('https://api.telegram.org/bot' + self.token +
'/sendMessage' +
'?chat_id=' + str(chat_id) +
'&text=' + text)
# Sends as photo using multipart-formdata
# Note that photo is a file-like object (like a StringIO object)
def sendImage (self, chat_id = None, photo = None):
if (chat_id != None and photo != None):
data = { 'chat_id' : str(chat_id) }
files = { 'photo' : ('board-image.png', photo) }
requests.post('https://api.telegram.org/bot' + self.token +
'/sendPhoto', data = data, files = files)
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 8 12:41:42 2013
Computes the scaled correlation matrix of the MEG signals
and generates a matlab file for each patient
Divides the signal in multiple files
@author: bejar
"""
import scipy.io
from numpy import mean, std
import matplotlib.pyplot as plt
from pylab import *
import pylab as pl
from mpl_toolkits.mplot3d import Axes3D
from sklearn.svm import SVC
from sklearn.cross_validation import cross_val_score
from nitime.algorithms.cohere import coherence_bavg
import nitime.timeseries as ts
from scipy import corrcoef
from sklearn.decomposition import PCA,KernelPCA
def coherenceMatrix(mdata,linit,lend,nstep):
lstep=(lend-linit)/nstep
corr=np.zeros((mdata.shape[0],mdata.shape[0]))
for length in range(linit,lend,lstep):
a=ts.TimeSeries(mdata[:,length:length+lstep],sampling_rate=678.19)
corrs=coherence_bavg(a)
corr+=corrs
corr/=nstep
return corr
cpath='/home/bejar/MEG/Data/'
cres='/home/bejar/MEG/Resultados'
# lnames=['control1-MEG','control2-MEG','control3-MEG','control4-MEG','control5-MEG','control6-MEG','control7-MEG'
# ,'comp1-MEG','comp3-MEG','comp4-MEG' ,'comp5-MEG','comp6-MEG','comp7-MEG','comp13-MEG'
# ,'descomp1-MEG','descomp3-MEG','descomp4-MEG','descomp5-MEG','descomp6-MEG','descomp7-MEG'
# ,'control1-MMN','control2-MMN','control3-MMN','control4-MMN','control5-MMN','control6-MMN','control7-MMN'
# ,'comp1-MMN','comp3-MMN','comp4-MMN' ,'comp5-MMN','comp6-MMN','comp7-MMN','comp13-MMN'
# ,'descomp1-MMN','descomp3-MMN','descomp4-MMN','descomp5-MMN','descomp6-MMN','descomp7-MMN']
lnames=[('control1-MMN',0),('control2-MMN',0),('control3-MMN',0),('control4-MMN',0)
,('control5-MMN',0),('control6-MMN',0),('control7-MMN',0)
,('comp1-MMN',1),('comp3-MMN',1),('comp4-MMN',1) ,('comp5-MMN',1)
,('comp6-MMN',1),('comp7-MMN',1),('comp13-MMN',1)
,('descomp1-MMN',2),('descomp3-MMN',2),('descomp4-MMN',2),('descomp5-MMN',2)
,('descomp6-MMN',2),('descomp7-MMN',2)]
badchannels=['A53','A31','A94']
ntimes = 10
examps = {}
for i in range(ntimes):
examps[i] = None
lcol = []
band='all'
for name,cl in lnames:
print name
lcol.append(cl)
mats=scipy.io.loadmat( cpath+band+'/'+name+'-'+band+'.mat')
data= mats['data']
chann= mats['names']
natt=0
mdata = None
lsnames = []
for i in range(chann.shape[0]):
cname=chann[i]
if cname[0]=='A' and not cname.strip() in badchannels:
natt+=1
lsnames.append(cname)
if mdata == None:
mdata=data[i]
else:
mdata=np.vstack((mdata,data[i]))
#--
size = mdata.shape[1]
blength=int(size/ntimes)
for time in range(ntimes):
print time
cmatrix=coherenceMatrix(mdata,time*blength,(time+1)*blength,10)
examp=np.zeros((natt*(natt-1)/2))
print natt,mdata.shape,cmatrix.shape,examp.shape
p=0
for i in range(cmatrix.shape[0]-1):
for j in range(i+1,cmatrix.shape[0]):
examp[p]=cmatrix[i,j]
p+=1
if examps[time] == None:
examps[time]=examp
else:
examps[time]=np.vstack((examps[time],examp))
print examps[time].shape
for i in range(ntimes):
X=examps[i]
Y=np.array(lcol)
patdata={}
patdata['data']=X
patdata['classes']=Y
scipy.io.savemat(cres+'patcoher-'+band+'-'+str(i),patdata,do_compression=True)
|
"""Construct manual chart outlining columned sheets."""
from matplotlib import pyplot as plt
import xmlStaticOperators
import pandas as pd
class xmlColumnChart(object):
def __init__(self, section_dictionary, key, year):
self.section_dictionary = section_dictionary
self.key = key
self.year = year
self.index_list = [index for index in self.section_dictionary.keys()]
self.word_count_list = [value[1] for value in self.section_dictionary.values()]
self.word_count_list_out = self.section_continuity()
self.section_list = self.section_finder()
self.chart_sheets()
def section_continuity(self):
"""Document points where manual columns are 1 to 2."""
word_count = pd.DataFrame(self.word_count_list)
word_count.columns = ['values']
word_count['values_binary'] = [1 if value <= 4 else 0 for value in word_count['values']]
word_count_shift1 = pd.DataFrame(word_count['values_binary'].shift(-1))
word_count_shift1.columns = ['values']
word_count_shift2 = pd.DataFrame(word_count['values_binary'].shift(-2))
word_count_shift2.columns = ['values']
word_count_shift3 = pd.DataFrame(word_count['values_binary'].shift(-3))
word_count_shift3.columns = ['values']
line_item_path_df = pd.DataFrame([value[0] for value in self.section_dictionary.values()])
word_count_shift1['values'].fillna(2, inplace=True)
word_count_shift2['values'].fillna(2, inplace=True)
word_count_shift3['values'].fillna(2, inplace=True)
word_count['values_binary1'] = word_count_shift1['values'].astype(int)
word_count['values_binary2'] = word_count_shift2['values'].astype(int)
word_count['values_binary3'] = word_count_shift3['values'].astype(int)
word_count.rename(columns={'values': 'index'}, inplace=True)
word_count['index'] = word_count.index
word_count_merged = word_count.merge(line_item_path_df, left_index=True, right_index=True, how='inner')
word_count_list_out = word_count_merged.values.tolist()
return word_count_list_out
def section_finder(self):
"""Use output from section_continuity to find section boundaries."""
key = False
initial_value = False
section_list = []
continuity = 0
negative_continuity = 0
for list_item in self.word_count_list_out:
if list_item[1] == 1 and (list_item[2] == 1 or list_item[3] == 1):
continuity += 1
if continuity > 2:
negative_continuity = 0
elif list_item[1] == 0 and (list_item[2] == 0 or list_item[3] == 0):
negative_continuity += 1
if negative_continuity > 2:
continuity = 0
if (negative_continuity > 5 and list_item[1] == 0 and list_item[2] == 1 and
list_item[3] == 1 and list_item[4] == 1 and key is False):
key = True
section_list.append([list_item[0] + .5, self.key, key, list_item[-1]])
elif (continuity > 5 and list_item[1] == 1 and list_item[2] == 0 and
list_item[3] == 0 and list_item[4] == 0 and key is True):
key = False
section_list.append([list_item[0] + .5, self.key, key, list_item[-1]])
if len(section_list) > 0:
if section_list[0][2] == True:
initial_value = False
elif section_list[0][2] == False:
initial_value = True
section_list_out = [[-1, self.key, initial_value]]
for section in section_list:
section_list_out.append(section)
return section_list_out
def chart_sheets(self):
"""Chart words found in center of page by page."""
page_figure = plt.figure(figsize=(11, 8.5), dpi=150)
page_plot = page_figure.add_subplot(111)
page_plot.set_title('Word Count Distribution ({}) in Sheet Area (by index)'.format(self.key),
fontsize=13, fontweight='bold', y=1.025)
page_plot.set_xlabel('Sheet Index')
page_plot.set_ylabel('Word Count in Gutter')
page_plot.xaxis.set_label_coords(.5, -.08)
page_plot.yaxis.set_label_coords(-.08, .5)
for bound in self.section_list:
page_plot.axvline(x=bound[0])
page_plot.scatter(self.index_list, self.word_count_list, color='w', edgecolors='k', alpha=1)
if self.key == 'center':
save_name = '../../text_output/xml_firm_search_output/{}_word_distribution_center.pdf'.format(self.year)
elif self.key == 'thirds':
save_name = '../../text_output/xml_firm_search_output/{}_word_distribution_thirds.pdf'.format(self.year)
xmlStaticOperators.clear_destination(save_name)
page_figure.savefig(save_name)
|
# partial方法: 偏对象, 将一个函数copy给另一个函数, 可以改变形参. 返回的是一个可调用对象
import functools
def my_func(a, b=2):
"""my_func's doc"""
print("{}-----{}".format(a, b))
if __name__ == '__main__':
p1 = functools.partial(my_func, "para_a", b="para_b")
p1()
print(my_func)
print(p1)
print(my_func.__name__)
print(p1.func)
print(p1.args)
print(p1.keywords)
# partial返回的对象没有原方法的__name__和__doc__, 可以用update_wrapper方法将原方法的属性复制或添加到partial对象
print('-' * 40)
functools.update_wrapper(p1, my_func)
print(p1.__name__)
print(p1.__doc__)
# partial可以对类和可调用对象使用.
print('-' * 40)
class MyClass:
def __init__(self, a, b):
print(a)
print(b)
def __call__(self, c, d):
print("call", (self, c, d))
o = MyClass(a=1, b=2)
o(c=3, d=4)
p3 = functools.partial(MyClass, a=10, b=20)
p3()
p4 = functools.partial(o, 30, d=40)
p4()
# 装饰器, 如果不用wraps装饰, 转换后的函数会有「裸」函数的属性.
print('-' * 40)
def simple_decorator(f):
@functools.wraps(f)
def decorated(a="decorated", b=1):
print('do something')
return f(a, b=b)
return decorated
@simple_decorator
def decorated_myfunc(a, b):
print('qwertyuiop')
return
decorated_myfunc()
|
random_state = 9
import random
random.seed(random_state)
import numpy as np
np.random.seed(random_state)
import tensorflow as tf
tf.set_random_seed(random_state)
from src.data import DataBuildClassifier
from src.NN import get_model
from src.callbacks import LossMetricHistory
from sklearn.model_selection import train_test_split, StratifiedKFold
import numpy as np
from keras.utils import to_categorical
from keras.models import load_model
from sklearn.metrics import roc_auc_score, roc_curve
import os, sys
#if len(sys.argv) < 3:
# print("Usage: \n"
# "python classification_filtering.py path_to_data path_to_logs filtration_rate[0...0.5) \n"
# "For example, if you want to discard 10% of data in each class \n"
# "and then train the network again, use something like: \n"
# "./classification_filtering.py ../Data ./logs/cf 0.1")
# exit()
# Data import and making train, test and validation sets
sbjs = [32] #[25,26,27,28,29,30,32,33,34,35,36,37,38]
frs = [0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4] # filter rates
path_to_data = sys.argv[1] #'/home/likan_blk/BCI/NewData/' #os.path.join(os.pardir,'sample_data')
data = DataBuildClassifier(path_to_data).get_data(sbjs, shuffle=False,
windows=[(0.2, 0.5)],
baseline_window=(0.2, 0.3), resample_to=323)
# Some files for logging
logdir = sys.argv[2] #os.path.join(os.getcwd(),'logs', 'cf_fr')
if not os.path.isdir(logdir):
os.makedirs(logdir)
for fr in frs:
fname_auc = os.path.join(logdir, 'auc_scores')
with open(fname_auc+str(fr)+'.csv', 'w') as fout:
fout.write('subject,auc_noisy,auc_pure,samples_before,samples_after\n')
fname_err_ind = os.path.join(logdir, 'err_indices')
with open(fname_err_ind+str(fr)+'.csv', 'w') as fout:
fout.write('subject,class,indices\n')
epochs = 150
dropouts = (0.72,0.32,0.05)
#if len(sys.argv) > 3:
# filt_rate = sys.argv[3]
#else:
# filt_rate = "all"
# Iterate over subjects and clean label noise for all of them
for sbj in sbjs:
print("Classification filtering for subject %s data"%(sbj))
np.random.seed(random_state)
tf.set_random_seed(random_state)
X, y = data[sbj][0], data[sbj][1]
train_ind, test_ind = train_test_split(np.arange(len(y)), shuffle=True,
test_size=0.2, stratify=y,
random_state=108)
X_train, y_train, X_test, y_test = X[train_ind], y[train_ind], X[test_ind], y[test_ind]
time_samples_num = X_train.shape[1]
channels_num = X_train.shape[2]
cv = StratifiedKFold(n_splits=4, shuffle=False)
val_inds = []
fold_pairs = []
for tr_ind, val_ind in cv.split(X_train, y_train):
X_tr, X_val = X_train[tr_ind], X_train[val_ind]
y_tr, y_val = y_train[tr_ind], y_train[val_ind]
fold_pairs.append((X_tr, y_tr, X_val, y_val))
val_inds.append(train_ind[val_ind]) # indices of all the validation instances in the initial X array
pure_ind = {}
err_target_ind = {}
err_nontarg_ind = {}
for fr in frs:
pure_ind[fr] = np.array([], dtype=np.int32)
err_target_ind[fr] = np.array([], dtype=np.int32)
err_nontarg_ind[fr] = np.array([], dtype=np.int32)
bestepochs = np.array([])
for fold, (X_tr, y_tr, X_val, y_val_bin) in enumerate(fold_pairs):
y_tr = to_categorical(y_tr)
y_val = to_categorical(y_val_bin)
callback = LossMetricHistory(n_iter=epochs,verbose=1,
fname_bestmodel=os.path.join(logdir,"model%s.hdf5"%(fold)))
np.random.seed(random_state)
tf.set_random_seed(random_state)
model = get_model(time_samples_num, channels_num, dropouts=dropouts)
model.fit(X_tr, y_tr, epochs=epochs,
validation_data=(X_val, y_val), callbacks=[callback],
batch_size=64, shuffle=True)
bestepochs = np.append(bestepochs, callback.bestepoch+1)
# Classification filtering of validation data
model = load_model(os.path.join(logdir, "model%s.hdf5"%(fold)))
y_pred = model.predict(X_val)[:,1]
#if filt_rate != "all":
for fr in frs:
#filt_rate = float(filt_rate)
ind = np.array(val_inds[fold]) # indices of validation samples in the initial dataset
n_err1 = int(np.round(y_val_bin.sum()*fr)) # Number of samples in target class to be thrown away
n_err0 = int(np.round((len(y_val_bin)-y_val_bin.sum())*fr)) # Number of samples in nontarget class
# to be thrown away
argsort0 = np.argsort(y_pred[y_val_bin==0])[::-1] # Descending sorting of predictions for nontarget class
# so that the most erroneous sample are at the
#begining of the array
argsort1 = np.argsort(y_pred[y_val_bin==1]) # Ascending Sorting of predictions for target class
# so that the most erroneous sample are at the
#begining of the array
target_ind = ind[y_val_bin==1][argsort1]
nontarg_ind = ind[y_val_bin==0][argsort0]
err_target_ind[fr] = np.append(err_target_ind, target_ind[:n_err1])
err_nontarg_ind[fr] = np.append(err_nontarg_ind, nontarg_ind[:n_err0]) # Take demanded amount of error samples
pure_ind[fr] = np.append(pure_ind[fr], target_ind[n_err1:])
pure_ind[fr] = np.append(pure_ind[fr], nontarg_ind[n_err0:])
bestepoch = int(round(bestepochs.mean()))
np.random.seed(random_state)
tf.set_random_seed(random_state)
model_noisy = get_model(time_samples_num, channels_num, dropouts=dropouts)
model_noisy.fit(X_train, to_categorical(y_train),
epochs=bestepoch,
batch_size=64, shuffle=False)
# Test noisy classifier
y_pred_noisy = model_noisy.predict(X_test)
y_pred_noisy = y_pred_noisy[:, 1]
auc_noisy = roc_auc_score(y_test, y_pred_noisy)
for fr in frs:
np.random.shuffle(pure_ind[fr])
X_train_pure = X[pure_ind[fr]]
y_train_pure = y[pure_ind[fr]]
# OPTIONALLY: saving erroneous sample indices
with open(fname_err_ind+str(fr)+'.csv', 'a') as fout:
fout.write(str(sbj))
fout.write(',0,')
fout.write(','.join(map(str,err_nontarg_ind[fr])))
fout.write('\n')
fout.write(str(sbj))
fout.write(',1,')
fout.write(','.join(map(str,err_target_ind[fr])))
fout.write('\n')
# Testing and comparison of cleaned and noisy data
samples_before = y_train.shape[0]
samples_after = y_train_pure.shape[0]
y_train_pure = to_categorical(y_train_pure)
callback = LossMetricHistory(n_iter=epochs, verbose=1,
fname_bestmodel=os.path.join(logdir, "model_pure%s.hdf5" % str(fr)))
np.random.seed(random_state)
tf.set_random_seed(random_state)
model_pure = get_model(time_samples_num, channels_num, dropouts=dropouts)
model_pure.fit(X_train_pure, y_train_pure, epochs=bestepoch,
batch_size=64, shuffle=False)
y_pred_pure = model_pure.predict(X_test)
y_pred_pure = y_pred_pure[:,1]
auc_pure = roc_auc_score(y_test,y_pred_pure)
with open(fname_auc+str(fr)+'.csv', 'a') as fout:
fout.write(','.join(map(str,[sbj,auc_noisy,auc_pure,samples_before,samples_after])))
fout.write('\n')
|
#encoding: utf-8
import copy
import permutaciones
#Given n, returns a list of all the permutations
#of the list from 1..n, example:
#permutatiosnR(2) returns [[1 2] [2 1] [1 1] [2 2]]
def permutationsR(n) :
i = 2 #ya son pares...
#generar la lista [1...n]
lista = generaLista(n)
pares = paresLista(lista)
res = []
newel = []
#agregar items a pares hasta que se llegue a items de longitud n
#print pares
#print lista
while i < n :
for el in lista :
newel = []
for p in pares :
newel = list(p)
newel.append(el)
#if pertenece(newel,res) == 1 :
res.append(newel)
#print newel
pares = list(res)
res = []
i += 1
#print pares
return pares
def generaLista(n) :
i = 0
res = []
while i < n :
res.append(i+1)
i += 1
return res
#Genera todos los pares posibles de una lista de números
def paresLista(l) :
res = []
i = 0
j = 0
while i < len(l) :
j = 0
while j < len (l) :
tmp = [l[i],l[j]]
res.append(tmp)
j += 1
i += 1
return res
#retorna 0 si el elemento e pertenece a la lista l y 1 de otra manera
def pertenece(e,l) :
i = 0
while i < len(l) :
if e == l[i] :
return 0
i += 1
return 1
def hay_jaque_fila(combinacion) :
i = 0
n = len(combinacion)
while i < n:
posicion_dama = combinacion[i]
j = i + 1
while j < n:
if posicion_dama == combinacion[j]:
return 1
j += 1
i = i + 1
return 0
def hay_jaque_diag(combinacion) :
i = 0
n = len(combinacion)
while i < n:
posicion_dama = combinacion[i]
j = i + 1
while j < n :
posicion_dama2 = combinacion[j]
dif = posicion_dama2 - posicion_dama
if dif < 0 :
dif *= -1
if dif == j-i:
return 1
j = j + 1
i = i + 1
return 0
def imprime(tab) :
i = 0
n = len(tab)
while i < n :
posicion_dama = tab[i] - 1
j = 0
while j < n :
if j == posicion_dama :
print "* ",
else :
print "- ",
j = j + 1
print
i = i + 1
def main():
n = 8
tableros = permutaciones.permutaciones_tr(generaLista(n),[])
posicion = 0
for t in tableros :
if hay_jaque_fila(t) == 0 :
if hay_jaque_diag(t) == 0:
#Una solución
print t
posicion += 1
print 'Posición :', posicion
imprime(t)
main()
|
import esphome.codegen as cg
import esphome.config_validation as cv
from esphome.components import uart
from esphome.const import CONF_ID
DEPENDENCIES = ['uart']
empty_uart_component_ns = cg.esphome_ns.namespace('empty_uart_component')
EmptyUARTComponent = empty_uart_component_ns.class_('EmptyUARTComponent', cg.Component, uart.UARTDevice)
CONFIG_SCHEMA = cv.Schema({
cv.GenerateID(): cv.declare_id(EmptyUARTComponent)
}).extend(cv.COMPONENT_SCHEMA).extend(uart.UART_DEVICE_SCHEMA)
def to_code(config):
var = cg.new_Pvariable(config[CONF_ID])
yield cg.register_component(var, config)
yield uart.register_uart_device(var, config)
|
# -*- coding: utf-8 -*-
"""
Created on Fri May 8 21:05:16 2015
@author: bolaka
submission1.csv - first pass @ 1.25643166239
submission2.csv - first pass
"""
import os
os.chdir('/home/bolaka/python-workspace/CVX-timelines/')
# imports
import time
import datetime
from cvxtextproject import *
from mlclassificationlibs import *
setPath('/home/bolaka/Bike Sharing')
trainfilename = 'train.csv'
testfilename = 'test.csv'
actualsfilename = 'actuals.csv'
# read data from CSV files
idCol = 'datetime'
training = pd.read_csv(trainfilename, index_col=idCol)
testing = pd.read_csv(testfilename, index_col=idCol)
validation = pd.read_csv(actualsfilename, index_col=idCol)
validation = validation[['casual', 'registered', 'count']]
# add metrics dummy columns to test set
testing['count'] = 0
testing['casual'] = 0
testing['registered'] = 0
# merge the training and test sets
trainingLen = len(training)
pieces = [ training, testing ]
combined = pd.concat(pieces)
# extract the date-timestamps
combined['timestamp'] = [datetime.datetime.strptime(x, "%Y-%m-%d %H:%M:%S") for x in combined.index]
combined['year'] = [x.year for x in combined['timestamp'] ]
combined['month'] = [x.month for x in combined['timestamp'] ]
combined['hour'] = [x.hour for x in combined['timestamp'] ]
combined['weekday'] = [x.weekday() for x in combined['timestamp'] ]
#combined[ 'weekend'] = 0
#combined.loc[ (combined['holiday'] == 0) & (combined['workingday'] == 0) ,'weekend'] = 1
#combined['daypart'] = 4
#combined.loc[ (combined['hour'] >= 4) & (combined['hour'] < 10), 'daypart' ] = 1
#combined.loc[ (combined['hour'] >= 10) & (combined['hour'] < 15), 'daypart' ] = 2
#combined.loc[ (combined['hour'] >= 15) & (combined['hour'] < 21), 'daypart' ] = 3
combined['peak_hrs'] = 0
combined.loc[ (combined['hour'] >= 7) & (combined['hour'] <= 8), 'peak_hrs' ] = 1
combined.loc[ (combined['hour'] >= 17) & (combined['hour'] <= 18), 'peak_hrs' ] = 1
# combine hour with season
combined['working_hr'] = (combined['workingday'] * 100) + combined['hour']
combined['weather_hr'] = (combined['weather'] + combined['hour']/100) * 100
combined['temp_hr'] = (combined['temp'] / (combined['hour'] + 1))
combined['season_hr'] = (combined['season'] + combined['hour']/100) * 100
combined['season_weather'] = (combined['season'] + combined['weather']/10) * 10
combined['season_temp'] = (combined['season'] * combined['temp'])
#combined['temp_hr'] = (combined['temp'] / (combined['hour'] + 1))
# binning of continuous features
#combined['atemp_cat'] = pd.qcut(combined.atemp.values, [0, .25, .5, .75, 1], labels=[1, 2, 3, 4])
#combined['temp_cat'] = pd.cut(combined.temp.values, [combined.temp.min(), combined.temp.mean(), combined.temp.max()], labels=[1, 2]
# , include_lowest=True)
#combined['temp_cat'] = [int(x) for x in combined['temp_cat']]
#combined['temp_cat'] = pd.cut(combined.temp.values, 4, labels=[1, 2, 3, 4])
#combined['daylight'] = pd.cut(combined.hour.values, [0, 8, 24], labels=[1,2], include_lowest=True)
combined['windspeed_cat'] = pd.cut(combined.windspeed.values, 5, labels=[1, 2, 3, 4, 5])
combined['temp2'] = combined['temp']**2
combined['humidity2'] = combined['humidity']**2
# weather outlier!
#combined.loc[ (combined['weather'] == 4), 'weather' ] = 3
# separate into training and test sets
training = combined.head(trainingLen)
testing = combined.drop(training.index)
# handle skewed counts
training['registered_log10'] = np.log10(training['registered'].values + 1)
training['casual_log10'] = np.log10(training['casual'].values + 1)
combined.to_csv('combined-features.csv')
training.to_csv('training-features.csv')
testing.to_csv('testing-features.csv')
# drop metrics from the testing set
testing.drop(['count','registered','casual'], axis=1, inplace=True)
#training_nonworking = training.loc[ (training['workingday'] == 0) ]
featuresUnused1 = [ 'casual','registered','count', 'timestamp', 'atemp', 'windspeed',
'registered_log10', 'casual_log10', 'temp', 'humidity', 'working_hr' ] # , 'season'
results1 = analyzeMetricNumerical('registered_log10',training, featuresUnused1)
showFeatureImportanceNumerical(training, results1['features'], 'registered_log10')
featuresUnused2 = [ 'casual','registered','count', 'timestamp', 'atemp', 'windspeed', 'season_hr',
'registered_log10', 'casual_log10', 'temp', 'humidity', 'temp_hr', 'weather_hr',
'peak_hrs', 'workingday'] # , 'season', 'workingday'
#featuresUnused2 = [ 'casual','registered','count', 'timestamp', 'atemp', 'humidity', 'temp',
# 'registered_log10', 'casual_log10', 'workingday'] # , 'season', 'workingday'
results2 = analyzeMetricNumerical('casual_log10', training, featuresUnused2)
showFeatureImportanceNumerical(training, results2['features'], 'casual_log10')
temp1 = predict(results1['model'], testing[results1['features']], 'registered_log10')
testing['registered'] = np.power( 10, temp1['registered_log10'].values ) - 1
temp2 = predict(results2['model'], testing[results2['features']], 'casual_log10')
testing['casual'] = np.power( 10, temp2['casual_log10'].values ) - 1
testing['count'] = testing['registered'] + testing['casual']
print('rmsle of casual = ', rmsle(validation['casual'].values, testing['casual'].values) )
print('rmsle of registered = ', rmsle(validation['registered'].values, testing['registered'].values) )
print('rmsle of count = ', rmsle(validation['count'].values, testing['count'].values) )
#testing = testing[['count']]
#testing.to_csv('submission13.csv', sep=',', encoding='utf-8')
def plotByGrp(groups, x, y):
# Plot
plt.rcParams.update(pd.tools.plotting.mpl_stylesheet)
colors = pd.tools.plotting._get_standard_colors(len(groups), color_type='random')
fig, ax = plt.subplots()
fig.set_size_inches(11,8)
ax.set_color_cycle(colors)
ax.margins(0.05)
for name, group in groups:
ax.plot(group[x], group[y], marker='o', linestyle='', ms=5, label=name)
ax.legend(numpoints=1, loc='upper right')
# ax.set_xlim(-102.4, -101.8)
# ax.set_ylim(31.2, 31.8)
plt.show()
#groups = training_nonworking.groupby('temp_hr')
#plotByGrp(groups, 'hour', 'casual')
|
from ..cameras_calibration import CamerasCalibration
class TumCamerasCalibration(CamerasCalibration):
def __init__(self, final_size, original_size, device):
original_focal_x = 535.4
original_focal_y = 539.2
original_cx = 320.1
original_cy = 247.6
camera_matrix = self.calculate_camera_matrix(final_size, original_size, original_focal_x, original_focal_y,
original_cx, original_cy)
camera_baseline = 1.0
super().__init__(camera_baseline, camera_matrix, camera_matrix, device)
|
from .. import api
import time
class Winmm(api.ApiHandler):
"""
Emulates functions from winmm.dll
"""
name = 'winmm'
apihook = api.ApiHandler.apihook
impdata = api.ApiHandler.impdata
def __init__(self, emu):
super(Winmm, self).__init__(emu)
super(Winmm, self).__get_hook_attrs__(self)
@apihook('timeGetTime', argc=0)
def timeGetTime(self, emu, argv, ctx={}):
'''
DWORD timeGetTime(); // return the system time, in milliseconds
'''
return int(time.monotonic() * 1000) & 0xffffffff
|
#Copyright (c) 2012, Jakub Matys <matys.jakub@gmail.com>
#All rights reserved.
import logging
from gfcontroller.backends.base import GpuBackend
CRITICAL_SPEED = 100
MAX_SPEED = 70
LOGGER = logging.getLogger('_gfcontroller')
class GpuFanspeedController:
def __init__(self, backend):
assert isinstance(backend, GpuBackend)
self._backend = backend
self._lowest_speed = None
self._highest_speed = None
self._lowest_temp = None
self._highest_temp = None
self._temp_to_speed_ratio = None
self._last_temp = None
def initialize(self, lowest_speed, highest_speed, lowest_temp, highest_temp):
self._lowest_speed = lowest_speed
self._highest_speed = highest_speed
self._lowest_temp = lowest_temp
self._highest_temp = highest_temp
self._temp_to_speed_ratio = (self._highest_speed - self._lowest_speed) / (self._highest_temp - self._lowest_temp)
self._last_temp = self._backend.temperature
LOGGER.info('Controller initialized: lowest_speed = %d, highest_speed = %d, lowest_temp = %d, highest_temp = %d,'
' temp_to_speed_ratio = %f, last_temp = %d'
% (self._lowest_speed, self._highest_speed, self._lowest_temp, self._highest_temp,
self._temp_to_speed_ratio, self._last_temp))
@property
def lowest_speed(self):
return self._lowest_speed
@property
def highest_speed(self):
return self._highest_speed
@property
def lowest_temp(self):
return self._lowest_temp
@property
def highest_temp(self):
return self._highest_temp
def control(self):
actual_temp = self._backend.temperature
actual_speed = self._backend.fanspeed
if actual_temp < self._lowest_temp:
new_speed = self._lowest_speed
elif self._lowest_temp <= actual_temp <= self._highest_temp:
temp_diff = actual_temp - self._lowest_temp
new_speed = self._lowest_speed + self._temp_to_speed_ratio * temp_diff
elif actual_temp > self._highest_temp:
new_speed = CRITICAL_SPEED
LOGGER.debug('Controlling: device = %s, last_temp = %d, last_speed = %d, actual_temp = %d, new_speed = %f'
% (self._backend.device_name, self._last_temp, actual_speed, actual_temp, new_speed))
self._backend.fanspeed = int(new_speed)
self._last_temp = actual_temp
|
from menu import Menu
class Main:
def __init__(self):
menu = Menu()
while not menu.exit_program:
menu.display_menu()
menu.menu_option()
if __name__ == "__main__":
main = Main()
|
def mod10(card):
y = len(card)
digit = 0
for x in range (0, y):
if x%2 != 0:
digit +=(card[x] * 2)
if card[x]*2 >= 10:
digit -= 9
else:
digit += (card[x])
if digit%10 == 0:
return 1
else:
return 0
def isValid(card):
key = 1
if len(card) < 13 or len(card) > 16:
key = 0
elif card[0] < 3 or card[0] > 6:
key = 0
elif card[0] == 3 and card[1] != 7:
key = 0
elif mod10(card[::-1]) == 0:
key = 0
return key
while True:
try:
num = int(input("Input Card Number: "))
except ValueError:
continue
else:
break
card = [int(x) for x in str(num)]
if isValid(card)== 1:
print("CARD IS VALID")
elif isValid(card)== 0:
print("CARD IS INVALID")
|
from setuptools import setup
setup(
name='TicTacToe',
version='1.0',
description='Simple tictactoe game',
author='Alicja Polanowska',
py_modules=['tictactoe'],
)
|
import sys
import os
# image load/save
import imageio
# image manipulation
import numpy as np
import math
def filler2(canvas, pattern, i, j, h, w):
canvas[i:i + h, j:j + w] = pattern[0:h, 0:w]
def filler3(canvas, pattern, i, j, h, w):
canvas[i:i + h, j:j + w, :] = pattern[0:h, 0:w, :]
def pattern_extender(src, dest, new_shape):
pattern = imageio.imread(src)
assert pattern.shape[0] <= new_shape[0] and pattern.shape[1] <= new_shape[1],\
("Extended image's shape should be at least as large as the pattern shape, "
"but got ({}, {}) whereas pattern shape is ({}, {})".format(*new_shape,
*pattern.shape))
is_bw = len(pattern.shape) == 2 # is black and white
fill = filler2 if is_bw else filler3
canvas = np.zeros(new_shape if is_bw else (*new_shape, 3))
# prepare progress bar
progressbar_width, progress = 50, 0
sys.stdout.write("[%s]" % (" " * progressbar_width))
sys.stdout.flush()
sys.stdout.write("\b" * (progressbar_width + 1))
i = 0
while i < new_shape[0]:
j = 0
while j < new_shape[1]:
h = (pattern.shape[0]
if i + pattern.shape[0] <= new_shape[0]
else new_shape[0] - i)
w = (pattern.shape[1]
if j + pattern.shape[1] <= new_shape[1]
else new_shape[1] - j)
fill(canvas, pattern, i, j, h, w)
j += pattern.shape[1]
i += pattern.shape[0]
# update progress bar
current_progress = math.floor((i / new_shape[0]) * progressbar_width)
for p in range(current_progress - progress):
sys.stdout.write("-")
sys.stdout.flush()
progress = current_progress
sys.stdout.write("]\nsaving...\n")
imageio.imsave(dest, canvas.astype('uint8'))
argv = sys.argv
assert len(argv) == 5, ("input must be 4 arguments: pattern name,"
"output image name, pixel height and width, "
"but got {} argument{}"
.format(len(argv) - 1, 's' if len(argv) > 2 else ''))
src_path, dest_path = argv[1], argv[2]
new_shape = (int(argv[3]), int(argv[4]))
pattern_extender(src_path, dest_path, new_shape)
print("Completed creating new image: {}".format(src_path))
|
# coding: utf-8
import urllib.parse
from requests_oauthlib import OAuth1
from .httpclient import requests
from .config import get_config
from .log import lg
from . import color
class OauthError(Exception):
pass
def get_oauth_token():
config = get_config()
consumer_key = config['consumer_key']
consumer_secret = config['consumer_secret']
request_token_url = 'https://api.twitter.com/oauth/request_token?oauth_callback=oob'
access_token_url = 'https://api.twitter.com/oauth/access_token'
authorize_url = 'https://api.twitter.com/oauth/authorize'
# consumer = oauth.Consumer(consumer_key, consumer_secret)
# client = oauth.Client(consumer)
oauth = OAuth1(consumer_key, client_secret=consumer_secret)
# Step 1: Get a request token
resp = requests.post(request_token_url, auth=oauth)
if resp.status_code != 200:
raise OauthError(
'Invalid response on request token {} {}.'.format(resp.status_code, resp.content))
request_token = dict(urllib.parse.parse_qsl(resp.content))
lg.debug('Request token (oauth_token, oauth_token_secret): %s, %s',
request_token['oauth_token'], request_token['oauth_token_secret'])
# Step 2: Redirect to the provider
print('Go to the following link in your browser:')
print(color.blue(color.underline('%s?oauth_token=%s' % (authorize_url, request_token['oauth_token']))))
print()
verifier = input('Enter PIN: ')
print()
# Step 3: get access token & secret
oauth = OAuth1(
consumer_key,
client_secret=consumer_secret,
resource_owner_key=request_token['oauth_token'],
resource_owner_secret=request_token['oauth_token_secret'],
verifier=verifier)
resp = requests.post(access_token_url, auth=oauth)
if resp.status_code != 200:
raise OauthError(
'Invalid response on access token {} {}.'.format(resp.status_code, resp.content))
access_token = dict(urllib.parse.parse_qsl(resp.content))
access_key = access_token['oauth_token']
access_secret = access_token['oauth_token_secret']
print('Access Token:')
print(' - oauth_token = %s' % access_key)
print(' - oauth_token_secret = %s' % access_secret)
print()
return access_key, access_secret
|
import pytest
from dash import Dash
from rubicon_ml.viz import MetricListsComparison
def test_metric_lists_comparison(viz_experiments):
metric_comparison = MetricListsComparison(
column_names=["var_0", "var_1", "var_2", "var_3", "var_4"],
experiments=viz_experiments,
selected_metric="test metric 2",
)
expected_experiment_ids = [e.id for e in viz_experiments]
for experiment in metric_comparison.experiments:
assert experiment.id in expected_experiment_ids
expected_experiment_ids.remove(experiment.id)
assert len(expected_experiment_ids) == 0
assert metric_comparison.column_names == ["var_0", "var_1", "var_2", "var_3", "var_4"]
assert metric_comparison.selected_metric == "test metric 2"
def test_metric_lists_comparison_load_data(viz_experiments):
metric_comparison = MetricListsComparison(experiments=viz_experiments)
metric_comparison.load_experiment_data()
expected_experiment_ids = [e.id for e in viz_experiments]
expected_metric_names = ["test metric 2", "test metric 3"]
for experiment_id, record in metric_comparison.experiment_records.items():
assert experiment_id in expected_experiment_ids
for metric_name in expected_metric_names:
assert metric_name in record
assert metric_comparison.selected_metric == expected_metric_names[0]
def test_metric_lists_comparison_load_data_throws_error(viz_experiments):
metric_comparison = MetricListsComparison(
experiments=viz_experiments,
selected_metric="nonexistant metric",
)
with pytest.raises(ValueError) as e:
metric_comparison.load_experiment_data()
assert "no metric named `selected_metric` 'nonexistant metric'" in str(e.value)
def test_metric_list_comparison_layout(viz_experiments):
metric_comparison = MetricListsComparison(experiments=viz_experiments)
metric_comparison.load_experiment_data()
layout = metric_comparison.layout
assert len(layout.children) == 2
assert layout.children[-1].children.id == "metric-heatmap-container"
assert layout.children[-1].children.children.id == "metric-heatmap"
@pytest.mark.parametrize("is_linked,expected", [(False, 1), (True, 2)])
def test_metric_correlation_plot_register_callbacks(viz_experiments, is_linked, expected):
metric_comparison = MetricListsComparison(experiments=viz_experiments)
metric_comparison.app = Dash(__name__, title="test callbacks")
metric_comparison.register_callbacks(link_experiment_table=is_linked)
callback_values = list(metric_comparison.app.callback_map.values())
assert len(callback_values) == 1
registered_callback_name = callback_values[0]["callback"].__name__
registered_callback_len_input = len(callback_values[0]["inputs"])
assert registered_callback_name == "update_metric_heatmap"
assert registered_callback_len_input == expected
|
from devmgr.devices.models import Device
from piston.utils import rc
from client_token_factory import ClientLoginTokenFactory
import urllib
import urllib2
# handler to send a c2dm message
class C2DMSender():
def __init__(self, collaps_key = 'boguskey'):
self.url = 'https://android.apis.google.com/c2dm/send'
self._collaps_key = collaps_key
self.token_factory = ClientLoginTokenFactory()
# Send a push notification to the device
def send_push_msg(self, device_id, message="Test c2dm message"):
print "sending a c2dm msg"
try:
device = Device.objects.get(pk=device_id)
except Device.DoesNotExist:
print "ERROR: device with id %s not found" % device_id
return False
registration_id = device.google_id
print "got goog id: %s" % registration_id
values = {
'collapse_key' : self._collaps_key,
'registration_id' : registration_id,
'data.payload' : message,
}
body = urllib.urlencode(values)
request = urllib2.Request(self.url, body)
request.add_header('Authorization', 'GoogleLogin auth=' + self.token_factory.get_token())
response = urllib2.urlopen(request)
if(response.code == 200):
print('Attempted to send message to device with registraion id:')
print(registration_id)
print('was successfull.')
print('The body returned is:')
print(response.read())
return True
else:
print "Request failed %d" % response.code
return False
|
"""
Defines a class for storing sudoku puzzles.
"""
from sys import stdout, stdin
class SudokuPuzzle:
def __init__(self, size=3):
"""
Defines a new puzzle. The given size will be squared to give the width/height.
"""
self.size = size
self.width = size ** 2
# Generates a table, filled with zeroes, of size^2 width
self.table = [[0 for _ in range(size ** 2)] for __ in range(size ** 2)]
def is_valid(self):
"""Returns whether or not the puzzle is valid"""
# Check each row for collisions
for row in self.table:
for num in range(1, self.width + 1):
if row.count(num) > 1:
return False
# Generate all columns and check there are no collisions
for col_num in range(len(self.table[0])):
col = []
for row in self.table:
col.append(row[col_num])
for num in range(1, self.width + 1):
if row.count(num) > 1:
return False
# Loop through each big square
# Check each one contains no more than one of each number
for i in range(self.size):
for j in range(self.size):
# Picked an individual big square
big_square = []
# Add all items to it
for k in range(self.size): # Row
for l in range(self.size): # Column
# Select the right individual square and add it to the list
big_square.append(self.table[3*i + k][3*j + l])
# Check to make sure there are no repeats
for num in range(1, self.width + 1):
if row.count(num) > 1:
return False
# If all three tests passed, then the puzzle is valid
return True
def write(self, file=stdout):
"""Writes out a CSV version of the puzzle to the file"""
for row in self.table:
print(",".join(row), file=file)
@staticmethod
def read(file=stdin):
"""Reads in from the file (stream object) and returns a corresponding SudokuPuzzle"""
# Read from the file and split into lines
text = file.read()
lines = text.split("\n")
table = []
# For each line, create a row and add it to the table
for line in lines:
row = []
squares = line.split(",")
for square in squares:
row.append(int(square))
table.append(row)
# Turn from a list of lists into a puzzle
puzzle = SudokuPuzzle()
puzzle.table = table
return puzzle
|
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 05 00:24:30 2015
@author: lenovo
"""
## Lec 2.5, slide 4
x = 3
x = x*x #square value of x
print(x)
y = float(raw_input('Enter a number: '))
print(y*y)
# Lec 2.6, slide 2
x = int(raw_input('Enter an integer: '))
if x%2 == 0:
print('')
print('Even')
else:
print('')
print('Odd')
print('Done with conditional')
# Lec 2.6, slide 4
x = 6
if x%2 == 0:
if x%3 == 0:
print('Divisible by 2 and 3')
else:
print('Divisible by 2 and not by 3')
elif x%3 == 0:
print('Divisible by 3 and not by 2')
|
baseline=15
minimum_detectable_effect=5.0/15*100.0
print minimum_detectable_effect
sample_size_per_variant=870
import math
yellowstone_weeks_observing=math.ceil(sample_size_per_variant / 507.0)
bryce_weeks_observing=math.ceil(sample_size_per_variant / 250.0)
print yellowstone_weeks_observing
print bryce_weeks_observing
|
import math
r = {1 : "leg a", 2 : "hypotenuse c", 3 : "altitude h", 4 : "are S "}
d = []
i = int(input(""))
#i = 4
print("i : ", i)
N = float(input(""))
#N = 64
print(r[i],":",N)
if i == 1:
a = N
c = math.sqrt(2) * a
h = c / 2
S = c * h / 2
elif i == 2:
c = N
a = c / math.sqrt(2)
h = c / 2
S = c * h / 2
elif i == 3:
h = N
c = 2 * h
a = c / math.sqrt(2)
S = c * h / 2
elif i == 4:
S = N
c = math.sqrt(S * 4)
a = c / math.sqrt(2)
h = c / 2
d.append(a)
d.append(c)
d.append(h)
d.append(S)
print()
print("Elelments of a right isosceles tringale:")
for i in range(0,4):
print(r[i+1],":",d[i])
|
import re
from cms.models.pagemodel import Page
from easy_thumbnails.files import get_thumbnailer
from haystack import indexes
from django.contrib.auth.models import AnonymousUser
from django.test.client import RequestFactory
from django.utils.encoding import force_text
from django.utils.translation import activate
rf = RequestFactory()
class PageIndex(indexes.SearchIndex):
text = indexes.CharField(document=True, use_template=False)
url = indexes.CharField(stored=True, indexed=False, model_attr="get_absolute_url")
title = indexes.CharField(stored=True, indexed=True, model_attr="get_title")
image = indexes.CharField(use_template=False, null=True)
excerpt = indexes.CharField(use_template=False, null=True)
type = indexes.CharField(use_template=False, null=True, faceted=True)
date = indexes.DateField(null=True, use_template=False)
def fancy_strip_tags(self, str_):
return re.sub(r"<[^>]*?>", " ", force_text(str_))
def prepare(self, obj, lang):
request = rf.get("/")
request.session = {}
request.LANGUAGE_CODE = lang
request.current_page = obj
request.user = AnonymousUser()
activate(lang)
text = ""
self.prepared_data = super(PageIndex, self).prepare(obj)
self.prepared_data["type"] = "page"
self.prepared_data["date"] = (
obj.publication_date and obj.publication_date.date() or None
)
for placeholder in obj.placeholders.all():
for plugin in placeholder.cmsplugin_set.filter(language=lang):
instance, plugin_type = plugin.get_plugin_instance()
if hasattr(instance, "search_fields"):
try:
text += " ".join(
getattr(instance, field) for field in instance.search_fields
)
except Exception:
pass
for plugin in placeholder.cmsplugin_set.filter(
language=lang, plugin_type="BannerPlugin"
):
if self.prepared_data.get("image"):
continue
instance, plugin_type = plugin.get_plugin_instance()
if (
instance
and instance.image
and instance.style != "content-banner-block"
):
self.prepared_data["image"] = get_thumbnailer(instance.image)[
"feed-thumbs"
].url
excerpt = self.fancy_strip_tags(text.replace("\n", " ").replace("\t", " "))
if len(excerpt) > 400:
excerpt = excerpt[:400] + "…"
self.prepared_data["excerpt"] = excerpt
self.prepared_data["text"] = self.fancy_strip_tags(
obj.get_title(language=lang) + " " + text
)
self.prepared_data["title"] = self.fancy_strip_tags(self.prepared_data["title"])
# fixme: set the correct self.prepared_data['tab'] using this:
# print [p.get_slug() for p in obj.get_ancestors()] + [obj.get_slug()]
if self.prepared_data["text"] and self.prepared_data["title"]:
return self.prepared_data
def index_queryset(self, lang, *args, **kwargs):
page_ids = set()
for p in Page.objects.published().filter(login_required=False).distinct():
if p.get_public_object() and not p.get_redirect(lang):
page_ids.add(p.get_public_object().pk)
return Page.objects.filter(pk__in=list(page_ids))
def get_model(self):
return Page
class FrPageIndex(PageIndex, indexes.Indexable):
def prepare(self, obj):
return super().prepare(obj, "fr")
def index_queryset(self, *args, **kwargs):
return super().index_queryset("fr", *args, **kwargs)
class EnPageIndex(PageIndex, indexes.Indexable):
def prepare(self, obj):
return super().prepare(obj, "en")
def index_queryset(self, *args, **kwargs):
return super().index_queryset("en", *args, **kwargs)
|
# coding:utf-8
import pandas as pd
import numpy as np
import datetime
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVR
from sklearn.metrics import r2_score,mean_absolute_error,mean_squared_error
def mianProcess(train_dt, test_dt):
train_df = pd.DataFrame(
columns=["train_num", "time_pice", "day_type", "duration", "leav_time", "arr_num", "total_num",
"board_num", "left_num", "pre_1", "pre_2", "pre_3"])
print('Combining data ...')
for date_str in train_dt:
temp_df = pd.read_csv(
'E:\Pycharm\PythonProjects\Subway\data\TrainData\TrainData_for14_line1_' + date_str + '.csv')
train_df = train_df.append(temp_df, ignore_index=True)
test_df = pd.read_csv('E:\Pycharm\PythonProjects\Subway\data\TrainData\TrainData_for14_line1_' + test_dt + '.csv')
X_train = pd.DataFrame({'train_num': train_df.train_num,
'time_pice': train_df.time_pice,
'day_type': train_df.day_type,
'duration': train_df.duration,
'leav_time': train_df.leav_time,
'total_num': train_df.total_num,
'pre_1': train_df.pre_1,
'pre_2': train_df.pre_2,
'pre_3': train_df.pre_3},
columns=['train_num', 'time_pice', 'day_type', 'duration', 'leav_time', 'total_num',
'pre_1', 'pre_2', 'pre_3']).values
Y_train = train_df.board_num.values
X_test = pd.DataFrame({'train_num': test_df.train_num,
'time_pice': test_df.time_pice,
'day_type': test_df.day_type,
'duration': test_df.duration,
'leav_time': test_df.leav_time,
'total_num': test_df.total_num,
'pre_1': test_df.pre_1,
'pre_2': test_df.pre_2,
'pre_3': test_df.pre_3},
columns=['train_num', 'time_pice', 'day_type', 'duration', 'leav_time', 'total_num',
'pre_1', 'pre_2', 'pre_3']).values
Y_test = test_df.board_num.values
ss_X = StandardScaler() # 训练数据和测试数据的放缩
ss_y = StandardScaler()
X_train = ss_X.fit_transform(X_train)
X_test = ss_X.transform(X_test)
Y_train = ss_y.fit_transform(Y_train.reshape(-1, 1))
Y_test = ss_y.transform(Y_test.reshape(-1, 1))
print('Start SVR ...')
linear_svr = SVR(kernel='linear', C=1000) # 线性核函数初始化的SVR
linear_svr.fit(X_train, Y_train)
linear_svr_Y_predict = linear_svr.predict(X_test)
rbf_svr = SVR(kernel='rbf', C=1000) # 径向基核函数初始化的SVR
rbf_svr.fit(X_train, Y_train)
rbf_svr_Y_predict = rbf_svr.predict(X_test)
print('R-squared value of linear SVR is', linear_svr.score(X_test, Y_test))
print('The mean squared error of linear SVR is', mean_squared_error(ss_y.inverse_transform(Y_test),
ss_y.inverse_transform(linear_svr_Y_predict)))
print('The mean absolute error of linear SVR is', mean_absolute_error(ss_y.inverse_transform(Y_test),
ss_y.inverse_transform(linear_svr_Y_predict)))
print(' ')
print('R-squared value of RBF SVR is', rbf_svr.score(X_test, Y_test))
print('The mean squared error of linear SVR is', mean_squared_error(ss_y.inverse_transform(Y_test),
ss_y.inverse_transform(rbf_svr_Y_predict)))
print('The mean absolute error of RBF SVR is', mean_absolute_error(ss_y.inverse_transform(Y_test),
ss_y.inverse_transform(rbf_svr_Y_predict)))
# Y_test = pd.DataFrame(ss_y.inverse_transform(Y_test))
# Y1 = pd.DataFrame(ss_y.inverse_transform(linear_svr_Y_predict))
# Y2 = pd.DataFrame(ss_y.inverse_transform(rbf_svr_Y_predict))
# x = test_df.train_num
# #x = test_df.leav_time
# plt.plot(x, Y_test, c='b', label='target')
# plt.plot(x, Y1, c='r', label='linear')
# plt.plot(x, Y2, c='c', label='rbf')
# plt.legend()
# plt.show()
# return Y_test
train_dt = ['20141201', '20141202', '20141203', '20141204', '20141205', '20141206', '20141207'
, '20141208', '20141209', '20141210', '20141211', '20141212', '20141213', '20141214'
, '20141215', '20141216', '20141217', '20141218', '20141219', '20141220', '20141221'
, '20141223', '20141224', '20141225', '20141226', '20141227', '20141228']
'''train_dt = ['20141201', '20141202', '20141203', '20141204', '20141205'
, '20141208', '20141209', '20141210', '20141211', '20141212'
, '20141215', '20141216', '20141217', '20141218', '20141219'
, '20141223', '20141224', '20141225', '20141226','20141229']'''
test_dt = '20141229'
mianProcess(train_dt, test_dt)
|
#!/usr/bin/env python
#
# Copyright (c) 2019 Opticks Team. All Rights Reserved.
#
# This file is part of Opticks
# (see https://bitbucket.org/simoncblyth/opticks).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os, logging, sys
log = logging.getLogger(__name__)
import numpy as np
from opticks.analytic.treebase import Tree
from opticks.ana.base import opticks_main
from opticks.ana.nbase import Buf
from opticks.ana.pmt.ddbase import Dddb
from opticks.ana.pmt.ddpart import ddpart_manual_mixin
from opticks.ana.pmt.treepart import treepart_manual_mixin
from GPmt import GPmt
if __name__ == '__main__':
#apmtpathtmpl_default = "$TMP/GPmt/%(apmtidx)s/GPmt.npy"
#apmtpathtmpl_default = "$IDPATH/GPmt/%(apmtidx)s/GPmt.npy"
#apmtpathtmpl_default = "$OPTICKS_INSTALL_PREFIX/opticksdata/export/DayaBay/GPmt/%(apmtidx)s/GPmt.npy"
#args = opticks_main(apmtpathtmpl=apmtpathtmpl_default, apmtidx=2)
args = opticks_main(apmtidx=2)
ddpart_manual_mixin() # add partitioner methods to Tubs, Sphere, Elem and Primitive
treepart_manual_mixin() # add partitioner methods to Node and Tree
apmtpath = args.apmtpath
print "\nAiming to write serialized analytic PMT to below apmtpath\n%s\n" % apmtpath
if args.yes:
print "proceeding without asking"
else:
proceed = raw_input("Enter YES to proceed... (use eg \"--apmtidx 3\" to write to different index whilst testing, skip dialog with --yes) ... ")
if proceed != "YES": sys.exit(1)
pass
xmlpath = args.apmtddpath
log.info("\n\nparsing %s -> %s " % (xmlpath, os.path.expandvars(xmlpath)))
log.info("\n\nDddb.parse xml \n")
g = Dddb.parse(xmlpath)
log.info("\n\ng.logvol \n")
lv = g.logvol_("lvPmtHemi")
log.info("\n\nTree(lv) \n")
tr = Tree(lv)
log.info("\n\nDump Tree \n")
tr.dump()
log.info("\n\nPartition Tree into parts list **ddpart.py:ElemPartitioner.parts** IS THE HUB \n")
parts = tr.parts()
log.info("\n\nDump parts : type(parts):%s \n", type(parts))
for pt in parts:
print pt
assert hasattr(parts, 'gcsg') and len(parts.gcsg) > 0
log.info("\n\nConvert parts to Buf (convert method mixed in from treepart.py applying as_quads to each part) \n")
buf = tr.convert(parts)
assert type(buf) is Buf
log.info("\n\nmake GPmt from Buf \n")
gp = GPmt(apmtpath, buf )
log.info("\n\nsave GPmt\n")
gp.save() # to apmtpath and sidecars
|
from documentRead import DocumentRead
def community_member(directory):
# directory ='F:\\Git Repository\\InfluenceScore_result\\'
documentReader=DocumentRead(directory)
documentReader.load_document(key_word_list='memberOfCommunity')
document_name=documentReader.get_documents_name()
community_member={}
for index in range(0,len(document_name)):
community_member[index]={}
f=open(directory+document_name[index])
for line in f.readlines():
line=line.strip('\n')
community_member[index][line]=0
return community_member
|
# Generated by Django 3.2.6 on 2021-09-27 23:33
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('api_auction', '0003_alter_auction_end_date'),
]
operations = [
migrations.AlterField(
model_name='auction',
name='vendor',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='auction_vendor', to='auth.user'),
),
]
|
from django.contrib import admin
from django.urls import path
from rest_framework.urlpatterns import format_suffix_patterns
from books import views
urlpatterns = [
path('admin/', admin.site.urls),
path('books/', views.BooksList.as_view()),
path('books/<int:pk>/', views.BookDetail.as_view()),
# path('db/', views.BooksUpdateList.as_view()),
]
urlpatterns = format_suffix_patterns(urlpatterns)
|
"""
This is experimental
"""
from docutils.parsers.rst import directives
import glob
import copy
class Include(directives.misc.Include):
def run(self):
if self.arguments[0].endswith('*'):
out = list()
paths = glob.glob(self.arguments[0])
for path in paths:
directive = copy.copy(super(Include, self))
directive.arguments[0] = directives.path(path)
out = out + directive.run()
return out
else:
return super(Include, self).run()
def setup(sphinx):
pass
#sphinx.add_directive('include', Include)
|
import base64
from django.contrib.auth import login, logout
from django.core.paginator import Paginator, EmptyPage
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.shortcuts import render
from .forms import AnswerForm, AskForm, LoginForm, SignupForm
from .models import Question, Answer
def test(request, *args, **kwargs):
return HttpResponse('OK')
def paginate(request, qs, baseurl='/'):
try:
limit = int(request.GET.get('limit', 10))
except ValueError:
limit = 10
if limit > 100:
limit = 10
try:
page = int(request.GET.get('page', 1))
except ValueError:
raise Http404
paginator = Paginator(qs, limit)
paginator.baseurl = baseurl
try:
page = paginator.page(page)
except EmptyPage:
page = paginator(paginator.num_pages)
return page, paginator
def index(request):
print(request.user)
try:
new_questions = Question.objects.new()
except Question.DoesNotExist:
raise Http404
page, paginator = paginate(request, new_questions, '/?page=')
return render(request, 'qa/index.html', {'page': page,
'paginator': paginator,
'questions': page.object_list})
def popular(request):
try:
popular_questions = Question.objects.popular()
except Question.DoesNotExist:
raise Http404
page, paginator = paginate(request, popular_questions, '/popular/?page=')
return render(request, 'qa/popular.html', {'page': page,
'paginator': paginator,
'questions': page.object_list})
def question(request, id):
if id:
try:
question = Question.objects.get(pk=id)
except Question.DoesNotExist:
raise Http404
try:
answers = Answer.objects.filter(question=question)
except Answer.DoesNotExist:
raise Http404
if request.method == 'POST':
form = AnswerForm(request.POST,
initial={'question': question.id})
form._user = request.user
if form.is_valid():
question = form.save()
else:
form = AnswerForm(initial={'question': question.id})
return render(request, 'qa/question.html', {'question': question,
'answers': answers,
'form': form})
return Http404
def ask(request):
if request.method == 'POST':
form = AskForm(request.POST)
form._user = request.user
if form.is_valid():
question = form.save()
url = question.get_url()
return HttpResponseRedirect(url)
else:
form = AskForm()
return render(request, 'qa/ask.html', {'form': form})
def login_view(request):
if request.method == 'POST':
form = LoginForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data['username']
password = form.cleaned_data['password']
user = form.cleaned_data['user']
login(request, user)
return do_sessionid(username, password)
else:
form = LoginForm()
return render(request, 'qa/login.html', {'form': form})
def signup(request):
if request.method == 'POST':
form = SignupForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data['username']
password = form.cleaned_data['password']
user = form.cleaned_data['user']
login(request, user)
return do_sessionid(username, password)
else:
form = SignupForm()
return render(request, 'qa/signup.html', {'form': form})
def do_sessionid(username, password):
sessionid = base64.b64encode(username+password)
if sessionid:
response = HttpResponseRedirect('/')
response.set_cookie('sessionid', sessionid)
return response
def logout_view(request):
logout(request)
return HttpResponseRedirect('/')
|
from django.core.serializers import json
from python import Deserializer
json.PythonDeserializer = Deserializer
from django.core.serializers.json import *
|
import pandas as pd
import numpy as np
from random import randint
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import pylab as pylab
from scipy import stats
import os.path
def guessCorrectness(guessedRightCnt, guessUpCnt, guessDownCnt, guessSkipCnt, rewardSum, guessCnt, profit, period):
correctnessList = []
upList = []
downList = []
skiplist = []
scoreList = []
cnt = 0
upSum = 0
downSum = 0
skipSum = 0
scoreSum = 0
rightSum = 0
for i in range(len(guessCnt)):
cnt += 1
upSum += guessUpCnt[i]
downSum += guessDownCnt[i]
skipSum += guessSkipCnt[i]
scoreSum += rewardSum[i]
rightSum += guessedRightCnt[i]
if cnt == period:
if (upSum + downSum) != 0:
correctness = ( rightSum / (downSum + upSum ) ) * 100
correctnessList.append(correctness)
upList.append(upSum/period)
downList.append(downSum/period)
skiplist.append(skipSum/period)
scoreList.append(scoreSum/period)
upSum = 0
downSum = 0
skipSum = 0
scoreSum = 0
rightSum = 0
cnt = 0
return correctnessList, upList, downList, skiplist, scoreList
def reduceCnt(eps, eps_period):
epsList = []
cnt = 0
temp = 0
for i in range(len(eps)):
cnt += 1
temp += eps[i]
if cnt == eps_period:
epsList.append(temp*9)
cnt = 0
temp = 0
return epsList
log_nr = "35"
imageName = "run_" + log_nr
t_period = 100
e_period = 50
eps_period = 1
e_file = os.path.exists("/home/andras/PycharmProjects/TradingGame/logs/evalLog_0" + log_nr + ".csv")
a_log = pd.read_csv("/home/andras/PycharmProjects/TradingGame/logs/actionLog_0" + log_nr + ".csv", sep=",", index_col=0)
a_price = a_log.BTCPrice
a_bought = a_log.bought
a_sold = a_log.sold
priceMax = np.amax(a_price)
priceMin = np.amin(a_price)
if e_file == True:
e_log = pd.read_csv("/home/andras/PycharmProjects/TradingGame/logs/evalLog_0" + log_nr + ".csv", sep=",", index_col=0)
e_sumPercent = e_log.sumPercent
e_rewardSum = e_log.rewardSum
e_profit = e_log.profit
e_guessedRightCnt = e_log.guessedRightCnt
e_guessedWrongCnt = e_log.guessedWrongCnt
e_guessSkipCnt = e_log.guessSkipCnt
e_guessCnt = e_log.guessCnt
e_guessUpCnt = e_log.guessUpCnt
e_guessDownCnt = e_log.guessDownCnt
print("e_Log Count:", len(e_profit))
print("e_Log Period:", e_period)
e_profit = e_log.profit
fig = plt.figure(figsize=(12, 10))
if e_file == True:
#e_correctnessList, e_upList, e_downList, e_skiplist, e_scoreList = guessCorrectness(e_guessedRightCnt, e_guessUpCnt, e_guessDownCnt, e_guessSkipCnt, e_rewardSum, e_guessCnt, e_profit,e_period)
#latest = e_correctnessList[-10:]
#print("Correct:", np.mean(latest))
# AX 1
ax1 = fig.add_subplot(211)
ax1.plot(a_price, "-", color='b', linewidth=1)
ax1.plot(a_bought, "*", color='g', linewidth=1)
ax1.plot(a_sold, "*", color='r', linewidth=1)
ax1.set_ylim([priceMin, priceMax])
# AX 1
ax2 = fig.add_subplot(212)
ax2.plot(e_profit, "*", color='g', linewidth=1)
#ax2.set_ylim([35, 65])
plt.axhline(0, color='black', linewidth=0.5)
#plt.title("Eval Success Percentage")
# # AX 3 -
# ax3 = fig.add_subplot(223)
# ax3.plot(e_correctnessList, "-", color='g', linewidth=1)
# ax3.set_ylim([35, 65])
# plt.axhline(50, color='black', linewidth=0.5)
# #plt.title("Eval Success Percentage")
# # AX 4 -
# ax4 = fig.add_subplot(224)
# ax4.plot(e_upList, "-", color='g', linewidth=1)
# #ax4.plot(e_downList, "-", color='r', linewidth=1)
# ax4.plot(e_skiplist, "-", color='b', linewidth=1)
# #ax4.set_ylim([-70, 70])
# #ax4.set_xlim([0, Epoch])
# #plt.axhline(0, color='black', linewidth=0.5)
# #plt.title("Eval Guess Occurences")
fig.suptitle(imageName) # or plt.suptitle('Main title')
#ax1.legend()
#fig.tight_layout(rect=[0, 0.03, 1, 0.95])
fileName = "/home/andras/PycharmProjects/TradingGame/lab/img_" + imageName + ".png"
fig.savefig(fileName)
plt.show()
'''
321
322
323
324
325
326
'''
|
import classes
lib=classes.library()
for i in range(5):
lib.add_book(classes.create_new_book(Year=str(i)))
lib.all_books_info()
print("\n\n")
lib.book_(2)
lib.delete_book(2)
print("\n\n")
lib.all_books_info()
print("\n\n")
lib.book_(2)
|
from django.shortcuts import render,redirect
from django.http import HttpResponse
import datetime as dt
from django.http import Http404
from .models import Image
# Create your views here.
def welcome(request):
title='Gallery Webpage'
images= Image.objects.all()
return render(request, 'all-photos/image.html',{"images":images, "title":title})
def search_results(request):
if 'image' in request.GET and request.GET["image"]:
search_term = request.GET.get("image")
searched_images = Image.search_by_Category(search_term)
message = f"{search_term}"
return render(request, 'all-photos/search.html',{"message":message,"images": searched_images})
else:
message = "You haven't searched for any term"
return render(request, 'all-photos/search.html',{"message":message})
def image(request,image_id):
try:
images = Image.objects.get(id = image_id)
except DoesNotExist:
raise Http404()
return render(request,"all-photos/image_details.html", {"images":images})
|
from builder import window
from data_grabber import data
from label_maker import *
root = window()
root = root.get_window()
text = ["this is a test", "this is a second test"]
data_object = data()
data_object.assign_labels(text)
label_object = labels(root)
label_object.build_labels(data_object)
my_labels = label_object.get_labels()
my_labels[0].grid(row=0)
my_labels[1].grid(row=1)
root.mainloop()
|
from django.shortcuts import render
from .models import Project
# Create your views here.
def home(request):
projects = Project.objects.all()
context = {'project': projects
}
return render(request, 'homepage.html', context)
def project_index(request):
projects = Project.objects.all()
context = {
'projects': projects
}
return render(request, 'project_index.html', context)
def project_detail(request, pk):
project = Project.objects.get(pk=pk)
context = {
'project': project
}
return render(request, 'project_detail.html', context)
def services_provided(request):
return render(request, 'services.html', {})
def about_me(request):
return render(request, 'about_page.html', {})
def contact(request):
return render(request, 'contact.html', {})
|
#Setup Start
# Import required Python libraries
import time
import RPi.GPIO as GPIO
# Use BCM GPIO referencesinstead of physical pin numbers
# Throughout this book you will be using BCM GPIO reference to maintain the consistency
GPIO.setmode(GPIO.BCM)
# Defines the GPIO port number which will be used for Trigger and Echo
# As mentioned before Pin 24 (BCM GPIO Pin number 8) and Pin 26 (BCM GPIO Pin number 7) will be used for Trigger and Echo respectively.
GPIO_TRIGGER = 8
GPIO_ECHO = 7
#setup the GPIO port of raspberry pi to OUTPUT and INPUT
# It will set GPIO Trigger Pin as OUTPUT as you will be sending Trigger signal to the Ultrasonic module. Similarly it will set the GPIO Echo Pin as INPUT as you will get the response from the Ultrasoinc module once you have send the Trigger Pulse to the module.
GPIO.setup(GPIO_TRIGGER,GPIO.OUT) # Trigger
GPIO.setup(GPIO_ECHO,GPIO.IN) # Echo
# Set trigger to False (Low)
GPIO.output(GPIO_TRIGGER, False)
# Allow module to settle
time.sleep(0.5)
#SetUp Ends
#Processing Start
# As mentioned in the Ultrasonic sensor module section , first you need to send the 10uS Trigger pulse to Trigger Pin and then module will send the output to the Echo pin
# Send 10us pulse to trigger
GPIO.output(GPIO_TRIGGER, True)
time.sleep(0.00001)
GPIO.output(GPIO_TRIGGER, False)
# Once you have send the Trigger Pulse , as explained in the Ultrasonic sensor module section , after some time you will receive the reflected sound waves. You need to calculate the time that is taken by sound waves to bounce back.
start = time.time()
while GPIO.input(GPIO_ECHO)==0:
start = time.time()
while GPIO.input(GPIO_ECHO)==1:
stop = time.time()
# Calculate pulse length
elapsed = stop-start
# Distance pulse travelled in that time is time
# multiplied by the speed of sound (cm/s)
distance = elapsed * 34000
# Thatis the total distance that is covered by the sound waves. To calculate the distance to the nearest wall , you need to divide it by 2
distance = distance / 2
print "Ultrasonic Measurement Distance : %.1f" % distance
# Reset GPIO settings
GPIO.cleanup()
#Processing Ends
|
import numpy as np
Output_filename = "ImageDataSetUnbalanced"
# Loading Datasets
Air = np.load('air_raw_3d_dataset.npy')
Air_mean = np.load('Mean_Array_air.npy')
Air_std = np.load('Std_Array_air.npy')
P_water = np.load('pr_wtr_raw_3d_dataset.npy')
P_water_mean = np.load('Mean_Array_pr_wtr.npy')
P_water_std = np.load('Std_Array_pr_wtr.npy')
Slp = np.load('slp_raw_3d_dataset.npy')
Slp_mean = np.load('Mean_Array_slp.npy')
Slp_std = np.load('Std_Array_slp.npy')
Hor_Wind = np.load('uwnd_raw_3d_dataset.npy')
Hor_Wind_mean = np.load('Mean_Array_uwnd.npy')
Hor_wind_std = np.load('Std_Array_uwnd.npy')
Vert_Wind = np.load('vwnd_raw_3d_dataset.npy')
Vert_Wind_mean = np.load('Mean_Array_vwnd.npy')
Vert_Wind_std = np.load('Std_Array_vwnd.npy')
# Scaling
Air = (Air - Air_mean) / Air_std
P_water = (P_water - P_water_mean) / P_water_std
Slp = (Slp - Slp_mean) / Slp_std
Hor_Wind = (Hor_Wind - Hor_Wind_mean) / Hor_wind_std
Vert_Wind = (Vert_Wind - Vert_Wind_mean) / Vert_Wind_std
print("Shapes of datasets:\n")
print(Air.shape)
print(P_water.shape)
print(Slp.shape)
print(Hor_Wind.shape)
print(Vert_Wind.shape)
# Saving to File
MasterDataSet = np.stack((Air, P_water, Slp, Hor_Wind, Vert_Wind), axis = 3)
np.save(Output_filename, MasterDataSet)
|
#!/usr/bin/env python
# coding: utf-8
# In[78]:
import requests #Used to service API connection
from lxml import html #Used to parse XML
from bs4 import BeautifulSoup #Used to read XML table on webpage
import pandas as pd
#from pandas import DataFrame
import numpy as np
import wget
from common import cFunction as cf
from sqlalchemy import create_engine
# get dataList from filesystem to load and write
#dataList = pd.read_excel("../../data/inbound/dataList.xlsx")
# In[79]:
# get dataList from spreadsheet to load and write
dataList = pd.read_csv("../../data/inbound/workSheet.csv")
print("### The total number of target data is " + str(len(dataList)))
# In[80]:
regionCdData = pd.read_csv("../../data/infomations/RegionCode.csv")
print(regionCdData)
# In[81]:
# Filtering -> get dataList only defined url
dataList = dataList[(dataList["제공항목(데이터셋)"] == '한국감정원 주택거래 현황') & (dataList["제공방식"] == 'OPENAPI')]
print("### The total number of filtered data is " + str(len(dataList)))
dataList
# In[82]:
###################################################
# Filtering -> for your own object
#dataList = dataList[ dataList['번호'] == "352" ]
###################################################
# create folder to save result
outPath = "../../data/outbound/"
folderList = dataList["폴더명"].tolist()
# In[83]:
for i in folderList:
cf.createFolder(outPath+i)
# In[84]:
dataList = dataList.fillna("")
dataList = dataList.reset_index(drop=True)
dataList
# In[85]:
dataCount = 0
# In[86]:
# get dataList to load and write
inputUrl = dataList.loc[dataCount, "사이트"]
inputKey = dataList.loc[dataCount, "서비스키"]
inputParameter = dataList.loc[dataCount, "파라미터"]
inputFolder = dataList.loc[dataCount, "폴더명"]
inputFile = dataList.loc[dataCount, "서비스명"]
inputDataType = dataList.loc[dataCount, "데이터타입"]
inputRefUrl = dataList.loc[dataCount, "참고문서"]
inputRefType = dataList.loc[dataCount, "참고문서타입"]
inputbParameter = dataList.loc[dataCount, "비고_파라미터설명"]
len(inputbParameter)
# In[87]:
url = cf.makeURL(inputUrl,inputKey,inputParameter)
print("fullUrl is " + url)
# In[88]:
newDF = pd.DataFrame()
if (inputDataType == "xml"):
newDF = cf.operatorXmlProcess(url, inputbParameter)
elif(inputDataType == "json"):
newDF = cf.jsonProcess(url)
elif(inputDataType == "csv"):
newDF = cf.csvProcess(url)
# In[89]:
newDF
# In[90]:
fullOutPath = outPath+inputFolder+"/"+inputFolder+inputFile+".csv"
print(fullOutPath)
# In[91]:
# try:
# newDF.to_csv(fullOutPath, index=False, encoding="utf-8")
# except Exception as x:
# print(x)
# In[94]:
try:
engine = create_engine('postgresql://postgres:postgres@192.168.110.23:5432/postgres')
newDF.to_sql(inputFolder + inputFile + ".csv", engine, if_exists='replace', index = False)
except Exception as x:
print(x)
# In[93]:
fullOutRefPath = outPath + inputFolder + "/" + inputFolder + inputFile + "."+inputRefType
try:
wget.download(inputRefUrl, fullOutRefPath)
except Exception as e:
print(inputFolder+"참고문서 Error")
print(e)
pass
# In[ ]:
# In[ ]:
# In[ ]:
|
from itertools import groupby
import json
from pprint import pprint
import re
import sys
with open(sys.argv[1]) as f:
chunks = re.split(r"\n\n+", f.read())
with open(sys.argv[2]) as f:
known_chars = json.load(f)
# play is a list of lists, each list represents an act
# act is a list of lists, each list represents a scene
play = [list(group) for k, group in groupby(chunks, lambda s:
s.startswith("ACT")) if not k][1:]
for i in xrange(len(play)):
play[i] = [list(group) for k, group in groupby(play[i], lambda s:
s.startswith("Scene")) if not k]
def act(n):
return play[n]
def scene(i, j):
return play[i][j]
def characters(scene):
characters = set(s.split()[0] for s in scene if not s.startswith("["))
characters = set(known_chars[char] for char in characters if char in known_chars)
return sorted(list(characters))
all_chars = []
for i in xrange(len(play)):
for j in xrange(len(play[i])):
all_chars += characters(scene(i, j))
all_chars = sorted(list(set(all_chars)))
def length(scene):
return len(''.join(scene))
scenes = []
start = 0
scene_id = 0
for i in xrange(len(play)):
for j in xrange(len(play[i])):
d = {
"duration": length(scene(i, j)),
"start": start,
"id": scene_id,
"chars": characters(scene(i, j))
}
scenes.append(d)
start += length(scene(i, j))
scene_id += 1
pprint(scenes)
|
import requests
import json
url = 'http://localhost:8888'
url += '/v1/version'
headers = {
'Content-Type': 'application/x-www-form-urlencoded'
}
if __name__ == '__main__':
response = requests.get(url, headers=headers)
print(json.dumps(json.loads(response.text), indent=4, ensure_ascii=False))
|
#!/usr/bin/env python3
import base64
import binascii
import sys
import struct
class Message():
def __init__(self, dir, raw, tstamp):
self.som = None
self.message_length = None
self.message_class = None
self.seq_number = None
self.message_type = None
self.message = None
self.checksum = None
self.eom = None
self.garbage = None
self.direction = dir
self.timestamp = tstamp
self.bytes = bytes.fromhex(raw)
def parse(self):
print(f"[{self.direction}] RAW Message '{str.upper(self.bytes.hex())}'")
self.som = str.upper(self.bytes[0:1].hex()) # byte 0
self.message_length = struct.unpack("<H", self.bytes[1:3])[0] # byte 1,2; 16-bit length
self.message_class = str.upper(self.bytes[3:4].hex()) # byte 3
self.seq_number = str.upper(self.bytes[4:5].hex()) # byte 4
self.message_type = str.upper(self.bytes[5:6].hex()) # byte 5
self.message = str.upper(self.bytes[6:-3].hex()) # bytes
self.message_bytes = self.bytes[6:-3]
self.checksum = struct.unpack("<H", self.bytes[-3:-1])[0] # byte -2
self.eom = str.upper(self.bytes[-1:].hex()) # byte -1
try:
self.computed_checksum = (self.bytes[self.message_length - 2] << 8) + self.bytes[self.message_length - 3]
except IndexError:
self.computed_checksum = None
def validate(self):
if self.som != 'F0':
print(f"SOM '{self.som} != F0")
return False
if self.message_class not in ['01', '02', '03']:
print(f"Unknown message class: '0x{self.message_class}'")
return False
if self.message_type not in ['03', '04', '05', '06', '1C', '1D', '1F', '22', '2F', '3C', '28']:
print(f"Unknown message type: '0x{self.message_type}'")
return False
if self.eom != '55':
print(f"EOM {self.eom} != 55")
return False
def pp_message_type(self):
if self.message_type == '03':
return "03, aperture"
elif self.message_type == '04':
return "04, unknown (preceds an 0x05 aperture status message)"
elif self.message_type == '05':
return "05, aperture status"
elif self.message_type == '06':
return "06, focus position status"
elif self.message_type == '1C':
return "1C, stop af"
elif self.message_type == '1D':
return "1D, abs or rel motor movement"
elif self.message_type == '1F':
return "1F, af hunt"
elif self.message_type == '22':
return "22, abs motor movement"
elif self.message_type == '2F':
return "2F, echo request"
elif self.message_type == '3C':
return "3C, move at speed"
elif self.message_type == '28':
return "28, take picture ?"
else:
return f"{self.message_type} UNKNOWN"
def pp_message_class(self):
if self.message_class == '01':
return "01, normal"
elif self.message_class == '02':
return "02, init or shutdown"
elif self.message_class == '03':
return "03, UNKNOWN"
else:
return f"{self.message_class}, UNKNOWN TOO"
def message_bruteforce(self):
print("=== bruteforcing message as lsb unsigned short")
for i in range(0, len(self.message_bytes)):
try:
print(struct.unpack('<H', self.message_bytes[i:i+2])[0])
#print(struct.unpack('<h', self.message_bytes[i:i+2])[0])
#print(struct.unpack('<I', self.message_bytes[i:i+4])[0])
#print(struct.unpack('<i', self.message_bytes[i:i+4])[0])
#print(struct.unpack('<B', self.message_bytes[i:i+1])[0].split('\0', 1)[0])
except struct.error as e:
print(f"error: {e}; index: {i}, msg length: {len(self.message_bytes)}")
print("=== bruteforcing end")
def decode_message(self):
if self.message_type == '06': # focus position status
print("Decoding of focus position status:")
print(f" Limit flags: {self.message_bytes[0:1].hex()}")
print(f" static?(00): {self.message_bytes[1:2].hex()}")
if self.message_bytes[2:3] == '00':
print(f" focus position (MAX): {self.message_bytes[2:3].hex()}")
else:
print(f" focus position: {self.message_bytes[2:3].hex()}")
print(f" static?(10): {self.message_bytes[3:4].hex()}")
print(f" static?(00): {self.message_bytes[4:5].hex()}")
print(f" static?(00): {self.message_bytes[5:6].hex()}")
print(f" static?(00): {self.message_bytes[6:7].hex()}")
print(f" static?(00): {self.message_bytes[7:8].hex()}")
print(f" static?(00): {self.message_bytes[8:9].hex()}")
print(f" (3F): {self.message_bytes[9:10].hex()}")
print(f" (10): {self.message_bytes[10:11].hex()}")
print(f" (00): {self.message_bytes[11:12].hex()}")
print(f" leftover?: {self.message_bytes[12:].hex()}")
elif self.message_type == '05': # aperture status
print("Decoding of aperture status:")
print(f" Focus ?: {self.message_bytes[20:22].hex()}")
print(f" Focus pos: {self.message_bytes[23:24].hex()}")
print(f" Aperture (00 brightest; 4AB darkest): {self.message_bytes[30:32].hex()}")
print(f" Aperture??: {self.message_bytes[33:40].hex()}")
print(f" Focus moving flag: {self.message_bytes[60:61].hex()} (00 no motion; 255/ff focus++; 01 focus--??; linked to 0x06 focus position status byte 2: position)")
print(f" Target 1: {self.message_bytes[77:78].hex()}")
print(f" Target 2: {self.message_bytes[78:79].hex()}")
print(f" ??: {self.message_bytes[81:82].hex()}")
print(f" {self.message_bytes[84:85].hex()}")
elif self.message_type == '03': # aperture
print("Decoding of aperture:")
print(f" Liveness? (00/01): {self.message_bytes[12:13].hex()}")
print(f" Target 1? (15/17): {self.message_bytes[21:22].hex()}")
print(f" Target 2? (15/17): {self.message_bytes[22:23].hex()}")
def prettyprint(self):
print(f"[{self.direction}] SOM: 0x{self.som}, length: {self.message_length} (ushortle), class: 0x{self.pp_message_class()}, seq: 0x{self.seq_number}, type: 0x{self.pp_message_type()}")
print(f" Message (hex): {self.message}")
print(f" Message (bytes): {self.bytes}")
try:
print(f" Message (string): {self.bytes.decode('utf-8')}")
except UnicodeDecodeError:
pass
print(f" Checksum: {self.checksum} (ushortle), EOM: 0x{self.eom}")
print(f" Valid checksum: {self.checksum == self.computed_checksum}")
print(f" Valid length: {self.message_length == len(self.bytes)}")
print()
self.decode_message()
#self.message_bruteforce()
def pretty(line):
l = line.split(" ")
direction = l[0]
raw = l[1]
timestamp = l[2]
message = Message(direction, raw, timestamp)
message.parse()
message.validate()
message.prettyprint()
if len(sys.argv) <= 1 or len(sys.argv) > 3:
print(f"Usage: {sys.argv[0]} <trace file name.txt> [number of lines to process]")
exit()
if len(sys.argv) == 2:
process = None
else:
process = int(sys.argv[2])
processed = 0
with open(sys.argv[1], 'r') as file:
for line in file.readlines():
if not line.startswith("#"):
if process and processed >= process:
break
try:
pretty(line)
print("")
processed += 1
except IndexError:
print(f"Invalid message: {line}")
|
"""Object Services Classes."""
import logging
from .anyprotocolportobjects import AnyProtocolPortObjects
from .applications import Applications
from .applications import Application
from .applicationcategories import ApplicationCategories
from .applicationcategories import ApplicationCategory
from .applicationfilters import ApplicationFilters
from .applicationfilters import ApplicationFilter
from .applicationproductivities import ApplicationProductivities
from .applicationproductivities import ApplicationProductivity
from .applicationrisks import ApplicationRisks
from .applicationrisks import ApplicationRisk
from .applicationtags import ApplicationTags
from .applicationtags import ApplicationTag
from .applicationtypes import ApplicationTypes
from .applicationtypes import ApplicationType
from .certenrollments import CertEnrollments
from .certenrollments import CertEnrollment
from .continents import Continents
from .continents import Continent
from .countries import Countries
from .countries import Country
from .dnsservergroups import DNSServerGroups
from .endpointdevicetypes import EndPointDeviceTypes
from .extendedaccesslist import ExtendedAccessList
from .fqdns import FQDNS
from .geolocation import Geolocation
from .icmpv4objects import ICMPv4Objects
from .icmpv4objects import ICMPv4Object
from .icmpv6objects import ICMPv6Objects
from .icmpv6objects import ICMPv6Object
from .ikev1ipsecproposals import IKEv1IpsecProposals
from .ikev1policies import IKEv1Policies
from .ikev2ipsecproposals import IKEv2IpsecProposals
from .ikev2policies import IKEv2Policies
from .interfacegroups import InterfaceGroups
from .interfacegroups import InterfaceGroup
from .interfaceobjects import InterfaceObjects
from .interfaceobjects import InterfaceObject
from .networkaddresses import NetworkAddresses
from .networkaddresses import IPAddresses
from .hosts import Hosts
from .hosts import IPHost
from .networks import Networks
from .networks import IPNetwork
from .ranges import Ranges
from .ranges import IPRange
from .isesecuritygrouptags import ISESecurityGroupTags
from .networkgroups import NetworkGroups
from .networkgroups import NetworkGroup
from .portobjectgroups import PortObjectGroups
from .portobjectgroups import PortObjectGroup
from .ports import Ports
from .protocolportobjects import ProtocolPortObjects
from .protocolportobjects import ProtocolPort
from .realms import Realms
from .realmusergroups import RealmUserGroups
from .realmusers import RealmUsers
from .securitygrouptags import SecurityGroupTags
from .securityzones import SecurityZones
from .securityzones import SecurityZone
from .siurlfeeds import SIUrlFeeds
from .siurllists import SIUrlLists
from .slamonitors import SLAMonitors
from .slamonitors import SLAMonitor
from .tunneltags import TunnelTags
from .urls import URLs
from .urls import URL
from .urlcategories import URLCategories
from .urlcategories import URLCategory
from .urlgroups import URLGroups
from .urlgroups import URLGroup
from .variablesets import VariableSets
from .variablesets import VariableSet
from .vlangrouptags import VlanGroupTags
from .vlangrouptags import VlanGroupTag
from .vlantags import VlanTags
from .vlantags import VlanTag
logging.debug("In the object_services __init__.py file.")
__all__ = [
"AnyProtocolPortObjects",
"ApplicationCategories",
"ApplicationCategory",
"Applications",
"Application",
"ApplicationFilters",
"ApplicationFilter",
"ApplicationProductivities",
"ApplicationProductivity",
"ApplicationRisks",
"ApplicationRisk",
"ApplicationTags",
"ApplicationTag",
"ApplicationTypes",
"ApplicationType",
"CertEnrollments",
"CertEnrollment",
"Continents",
"Continent",
"Countries",
"Country",
"DNSServerGroups",
"EndPointDeviceTypes",
"ExtendedAccessList",
"FQDNS",
"Geolocation",
"Hosts",
"IPHost",
"ICMPv4Objects",
"ICMPv4Object",
"ICMPv6Objects",
"ICMPv6Object",
"IKEv1IpsecProposals",
"IKEv1Policies",
"IKEv2IpsecProposals",
"IKEv2Policies",
"InterfaceGroups",
"InterfaceGroup",
"InterfaceObjects",
"InterfaceObject",
"ISESecurityGroupTags",
"NetworkAddresses",
"IPAddresses",
"NetworkGroups",
"NetworkGroup",
"Networks",
"IPNetwork",
"PortObjectGroups",
"PortObjectGroup",
"Ports",
"ProtocolPortObjects",
"ProtocolPort",
"Ranges",
"IPRange",
"Realms",
"RealmUserGroups",
"RealmUsers",
"SecurityGroupTags",
"SecurityZones",
"SecurityZone",
"SIUrlFeeds",
"SIUrlLists",
"SLAMonitors",
"SLAMonitor",
"TunnelTags",
"URLCategories",
"URLCategory",
"URLGroups",
"URLGroup",
"URLs",
"URL",
"VariableSets",
"VariableSet",
"VlanGroupTags",
"VlanGroupTag",
"VlanTags",
"VlanTag",
]
|
#!/usr/bin/python3
"""
Unittest for max_integer([..])
"""
import unittest
max_integer = __import__('6-max_integer').max_integer
class TestMaxInteger(unittest.TestCase):
""" methods for testing the function 'max_integer()' """
def test_null(self):
""" find greater- list null """
self.assertAlmostEqual(max_integer([None]), None)
def test_empty(self):
""" find greater - list is empty """
self.assertAlmostEqual(max_integer(), None)
def test_greater(self):
""" find greater - list of one element """
self.assertAlmostEqual(max_integer([1]), 1)
def test_greater(self):
""" find greater - max at the beginning """
self.assertAlmostEqual(max_integer([2, 1]), 2)
def test_greater(self):
""" find greater - max in the middle """
self.assertAlmostEqual(max_integer([1, 3, 2]), 3)
def test_greater(self):
""" find greater - max at the end """
self.assertAlmostEqual(max_integer([-4, -3, -2, -1, 0, 1, 2, 3, 4]), 4)
def test_greater(self):
""" find greater - one negative number in the list """
self.assertAlmostEqual(max_integer([-1, 0, 1, 2, 3, 4, 5]), 5)
def test_greater(self):
""" find greater - only negative numbers in the list """
self.assertAlmostEqual(max_integer([-4, -3, -2, -1]), -1)
|
from dllist import *
def test_create_list():
colors = DoubleLinkedList()
colors.dump()
def test_push():
print(f"\n\nTesting Push.")
colors = DoubleLinkedList()
colors.push("Pthalo Blue")
colors._invariant()
assert colors.count() == 1
colors.push("Ultramarine Blue")
assert colors.count() == 2
colors._invariant()
colors.dump()
def test_pop():
print(f"\n\nTesting pop.")
colors = DoubleLinkedList()
colors.push("Magenta")
colors._invariant()
colors.push("Alizarin")
colors.push("Van Dyke")
colors._invariant()
colors.dump("before first pop()")
assert colors.pop() == "Van Dyke"
colors._invariant()
colors.dump("After pop()")
# assert colors.get(1) == "Alizarin"
assert colors.pop() == "Alizarin"
assert colors.pop() == "Magenta"
colors._invariant()
assert colors.pop() == None
def test_shift():
print(f"\n\nTesting Shift")
colors = DoubleLinkedList()
colors.shift("Cadmium Orange")
assert colors.count() == 1
colors.shift("Carbazole Violet")
assert colors.count() == 2
assert colors.pop() == "Carbazole Violet"
assert colors.count() == 1
assert colors.pop() == "Cadmium Orange"
assert colors.count() == 0
colors.dump()
def test_unshift():
print(f"\n\nTesting Unshift")
colors = DoubleLinkedList()
colors.shift("Viridian")
colors.shift("Sap Green")
colors.shift("Van Dyke")
colors.dump("Before unshifting.")
assert colors.unshift() == "Viridian"
assert colors.unshift() == "Sap Green"
assert colors.unshift() == "Van Dyke"
assert colors.unshift() == None
def test_remove():
print(f"\n\nTesting Unshift")
colors = DoubleLinkedList()
colors.push("Cobalt")
colors.push("Zinc White")
colors.push("Nickle Yellow")
colors.push("Perinone")
colors.dump("Before Removing.")
assert colors.remove("Cobalt") == 0
colors._invariant()
colors.dump("before perinone")
assert colors.remove("Perinone") == 2
colors._invariant()
colors.dump("after perinone")
assert colors.remove("Nickle Yellow") == 1
colors._invariant()
assert colors.remove("Zinc White") == 0
colors._invariant()
def test_first():
print(f"\n\nTesting first()")
colors = DoubleLinkedList()
colors.push("Cadmium Red Light")
colors.dump("before first()")
assert colors.first() == "Cadmium Red Light"
colors.dump("before first()")
colors.push("Hansa Yellow")
assert colors.first() == "Cadmium Red Light"
colors.dump("before first()")
colors.shift("Pthalo Green")
assert colors.first() == "Cadmium Red Light"
def test_last():
colors = DoubleLinkedList()
colors.push("Cadmium Red Light")
assert colors.last() == "Cadmium Red Light"
colors.push("Hansa Yellow")
assert colors.last() == "Hansa Yellow"
colors.shift("Pthalo Green")
assert colors.last() == "Pthalo Green"
def test_get():
print(f"\n\nTesting get()")
colors = DoubleLinkedList()
colors.push("Vermillion")
colors.dump("B4 first get()")
assert colors.get(0) == "Vermillion"
colors.push("Sap Green")
colors.dump("B4 next get()")
assert colors.get(0) == "Vermillion"
assert colors.get(1) == "Sap Green"
colors.push("Cadmium Yellow Light")
colors.dump("Before next get()")
assert colors.get(0) == "Vermillion"
assert colors.get(1) == "Sap Green"
assert colors.get(2) == "Cadmium Yellow Light"
assert colors.pop() == "Cadmium Yellow Light"
assert colors.get(0) == "Vermillion"
assert colors.get(1) == "Sap Green"
assert colors.get(2) == None
colors.pop()
colors.dump("After pop()")
assert colors.get(0) == "Vermillion"
colors.pop()
assert colors.get(0) == None
def test_reverse():
print("\n\nTesting Reverse")
colors = DoubleLinkedList()
colors.push("Vermillion")
colors.push("Sap Green")
colors.push("Cadmium Yellow Light")
colors.push("Hansa Yellow")
assert colors.get(0) == "Vermillion"
assert colors.get(1) == "Sap Green"
assert colors.get(2) == "Cadmium Yellow Light"
assert colors.get(3) == "Hansa Yellow"
colors.dump()
colors.reverse()
colors.dump()
assert colors.get(0) is "Hansa Yellow"
assert colors.get(1) is "Cadmium Yellow Light"
assert colors.get(2) is "Sap Green"
assert colors.get(3) is "Vermillion"
|
"""
token url =
https://oauth.vk.com/authorize?client_id=6320433&display=page&scope=140492191&response_type=token&v=5.8
"""
# vk settings
TOKEN = "971d715516dfe12c4321ec449d531a63b78dff794ee1682e849ff1e069bdecec47191be1e48b8051812b1"
API_VERSION = "5.69"
MAIN_USER_ID = 69128170
# protege settings
ONTOLOGY_NAME = "OntologyVKontakteUsers"
|
import mimo
_backbuffer = []
_image = []
_buffer = []
_current_color = 0xf00
def reset():
global _backbuffer
global _image
global _buffer
_backbuffer = [
[0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0]
]
_image = [
[0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0]
]
_buffer = [
[0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0]
]
reset()
def clear():
mimo.clean_matrix()
reset()
def renderBuffer(image):
global _image
global _backbuffer
for j in range(0, 8):
for i in range(0, 8):
if _image[j][i] != _buffer[j][i]:
if _buffer[j][i] == 0:
_backbuffer[j][i] = -1
else:
_backbuffer[j][i] = _buffer[j][i]
_image[j][i] = _buffer[j][i]
else:
_backbuffer[j][i] = 0
# assuming image = 8x8 pixels
def drawImage(image, x=0, y=0, blend=0xfff):
global _buffer
x = int(x)
y = int(y)
width = len(image[0])
height = len(image)
for j in range(0, 8):
for i in range(0, 8):
if i-x<0 or j-y<0 or j-y>=height or i-x>=width or image[j-y][i-x] == 0: continue
_buffer[j][i] = image[j-y][i-x]&blend
def drawMonoPixels(pixels, x=0, y=0):
global _buffer
x = int(x)
y = int(y)
width = len(pixels[0])
height = len(pixels)
for j in range(0, 8):
for i in range(0, 8):
if i-x<0 or j-y<0 or j-y>=height or i-x>=width or pixels[j-y][i-x] == 0: continue
plot(i, j)
def render():
global _buffer
renderBuffer(_buffer)
_buffer = [
[0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0]
]
colors = {}
for j in range(0, 8):
for i in range(0, 8):
color = _backbuffer[j][i]
if color != 0:
if color == -1: color = 0
if color not in colors:
colors[color] = []
colors[color].append(j*8 + i)
encoded = []
for color in colors:
red = (color>>8)
green = (color >> 4)&0xf
blue = color&0xf
encoded.append((red<<4) + red)
encoded.append((green<<4) + green)
encoded.append((blue<<4) + blue)
encoded += colors[color] + [0]
if len(encoded) == 0:
return
mimo.display_image(encoded)
def plot(x, y):
global _buffer
_buffer[y][x] = _current_color
def plotLineLow(x0,y0, x1,y1):
dx = x1 - x0
dy = y1 - y0
yi = 1
if dy < 0:
yi = -1
dy = -dy
D = 2*dy - dx
y = y0
for x in range(x0, x1+1):
plot(x,y)
if D > 0:
y = y + yi
D = D - 2*dx
D = D + 2*dy
def plotLineHigh(x0,y0, x1,y1):
dx = x1 - x0
dy = y1 - y0
xi = 1
if dx < 0:
xi = -1
dx = -dx
D = 2*dx - dy
x = x0
for y in range(y0, y1+1):
plot(x,y)
if D > 0 :
x = x + xi
D = D - 2*dy
D = D + 2*dx
def plotLine(x0,y0, x1,y1):
if abs(y1 - y0) < abs(x1 - x0):
if x0 > x1:
plotLineLow(x1, y1, x0, y0)
else:
plotLineLow(x0, y0, x1, y1)
else:
if y0 > y1:
plotLineHigh(x1, y1, x0, y0)
else:
plotLineHigh(x0, y0, x1, y1)
def drawRect(x, y, width, height):
for i in range(x,width+x):
plot(i, y)
plot(i, y+height-1)
for j in range(y+1, height-1+y):
plot(x, j)
plot(x+width-1, j)
def setColor(color):
global _current_color
_current_color = color
# got a 3 value array, [r, g, b]
def setColorRGB(color):
global _current_color
_current_color = getColorRGB(color)
def getColorFromRGB(color):
return ((color[0]>>4)<<8) + ((color[1]>>4)<<4) + (color[2]>>4)
# returns a
def wheel(pos):
pos = 255 - pos
if(pos < 85):
return [255 - pos * 3, 0, pos * 3]
if(pos < 170):
pos -= 85
return [0, pos * 3, 255 - pos * 3]
pos -= 170;
return [pos * 3, 255 - pos * 3, 0]
def get_color_wheel(pos):
return getColorFromRGB(wheel(pos))
|
from django.db import models
from django.core.urlresolvers import reverse
from django.db.models.signals import post_save, pre_save, m2m_changed
from django.utils.text import slugify
class Category(models.Model):
name = models.CharField(max_length=45)
word = models.ManyToManyField('Word', related_name="category", blank=True, null=True)
def __unicode__(self):
return self.name
class Tag(models.Model):
name = models.CharField(max_length=45)
word = models.ManyToManyField('Word', related_name="tag", blank=True, null=True)
def __unicode__(self):
return self.name
BOOK_NAME = (
('nce3', 'NCE3'),
('nce4', 'NCE4'),
('bbc', 'BBC'),
('voa', 'VOA'),
('cctvnews', 'CCTVNEWS'),
('mail', 'MAIL'),
('life', 'LIFE'),
('20000', '20000'),
('22000', '22000'),
('100days', '100days'),
('IELTS', 'IELTS'),
('BSWX', 'BSWX'),
('YOUDAO', 'YOUDAO'),
('other', 'Other'),
)
# Create your models here.
class Word(models.Model):
name = models.CharField(max_length=100, unique=True)
slug = models.SlugField(max_length=100, blank=True)
phonetic = models.CharField(max_length=45, null=True, blank=True)
explain = models.TextField(max_length=500,blank=True, null=True, default = '')
progress = models.DecimalField(max_digits=50, decimal_places=0, default = 0 )
in_plan = models.BooleanField(default=False)
is_favorite = models.BooleanField(default=False)
# members = models.ManyToManyField('Word', through='Membership')
# linked_word = models.ManyToManyField('Word', related_name='related_word', blank=True)
linked_word = models.ManyToManyField('Word', blank=True, null=True)
etyma_word = models.ManyToManyField('Word', related_name='etyma_word_reverse', blank=True, null=True)
resemblance_word = models.ManyToManyField('Word', related_name='resemblance_word_reverse', blank=True, null=True)
semantic_word = models.ManyToManyField('Word', related_name='semantic_word_reverse', blank=True, null=True)
antonymy_word = models.ManyToManyField('Word', related_name='antonymy_word_reverse', blank=True, null=True)
book = models.CharField(max_length=120, choices=BOOK_NAME, blank=True, null=True)
timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Meta:
ordering = 'name',
def __unicode__(self):
return self.name
def get_absolute_url(self):
try:
# return reverse("word_detail", kwargs={"pk": self.pk})
return reverse("word_detail", kwargs={"slug": self.slug})
except:
return '#'
def reading_required_words(self, **kwargs):
if not kwargs:
param = {'in_plan':True, 'progress__lt':100}
return param
return kwargs
def get_next_by_name(self, field='name', **kwargs):
field = self.__class__._meta.get_field(field, 'name')
param = self.reading_required_words(**kwargs)
try:
return self._get_next_or_previous_by_FIELD(field, is_next=True, **param)
except Word.DoesNotExist:
return None
def get_previous_by_name(self, field='name', **kwargs):
field = self.__class__._meta.get_field(field, 'name')
param = self.reading_required_words(**kwargs)
try:
return self._get_next_or_previous_by_FIELD(field, is_next=False, **param)
except Word.DoesNotExist:
return None
def get_related_name_reverse(name):
name_dict = {
'etyma' : 'etyma_word',
'resemblance' : 'resemblance_word',
'semantic': 'semantic_word',
'antonymy': 'antonymy_word',
}
return name_dict.get(name, None)
# def get_related_word(name):
def save_related_words(instance1, instance2, related_name_reverse):
if not related_name_reverse:
return False
updated = False
object1_reserve_set = getattr(instance2, related_name_reverse).all()
# if not (instance1 in object1_reserve_set) and not (instance1 is instance2):
if not getattr(instance2, related_name_reverse).filter(name=instance1.name).count() \
and not (instance1.name == instance2.name):
print "{} add {}".format(instance2.name, instance1.name)
updated = True
getattr(instance2, related_name_reverse).add(instance1) # this will trigger another signal
else:
# print "instance2 related set is {}".format(object1_reserve_set)
pass
return updated
def save_words(instance, name):
ever_updated = False
if hasattr(instance, name):
wordexp_set = getattr(instance, name).all()
for _ in wordexp_set:
word = Word.objects.filter(name=_.name).first()
if word:
related_name_reverse = get_related_name_reverse(name)
if related_name_reverse:
saved = [False, False]
saved[0] = save_related_words(instance, word, related_name_reverse)
saved[1] = save_related_words(word,instance, related_name_reverse)
if saved[0] or saved[1]:
ever_updated = True
word.save()
# if not (instance in word.linked_word.all()) and not (instance is word):
# print ">>>>>>> {} add {}".format(word.name, instance.name)
# updated[0] = True
# ever_updated = True
# word.linked_word.add(instance) # this will trigger another signal
# else:
# print "a ha 1, word.linked_word.all is {}".format(word.linked_word.all())
updated = [False, False]
updated[0] = save_related_words(instance, word, 'linked_word')
# this doesn't work? add m2m_changed to complete this reverse action
# if not (word in instance.linked_word.all()) and not (instance is word):
# print "<<<<<<<< {} add {}".format(instance.name, word.name)
# updated[1] = True
# instance.linked_word.add(word)
# ever_updated = True
# else:
# print "a ha 2, instance.linked_word.all is {}".format(instance.linked_word.all())
updated[1] = save_related_words(word, instance, 'linked_word')
if updated[0] or updated[1]:
ever_updated = True
word.save()
if ever_updated:
instance.save()
def words_changed(sender, instance, **kwargs):
instance.slug = slugify(instance.name)
if instance:
save_words(instance, 'wordexp')
save_words(instance, 'etyma')
save_words(instance, 'resemblance')
save_words(instance, 'semantic')
save_words(instance, 'antonymy')
# if you want to remove one relationship, to avoid the recursively deadlock, delete one work link,
# DON'T save the 2nd time before you delete the link in the relevant word
def save_words1(instance, name):
qs = getattr(instance, name)
for _ in qs.get_queryset():
obj = getattr(_,name)
if not instance in obj.get_queryset():
obj.add(instance)
_.save()
def words_changed1(sender, instance, **kwargs):
instance.slug = slugify(instance.name)
if instance:
save_words1(instance, 'etyma_word')
save_words1(instance, 'resemblance_word')
save_words1(instance, 'semantic_word')
save_words1(instance, 'antonymy_word')
def words_changed2(sender, instance, **kwargs):
if not instance.wordexp.get_queryset().filter(sentence__isnull=True).count():
obj = WordExp(name=instance.name)
obj.save()
obj.word.add(instance)
obj.save()
post_save.connect(words_changed1, sender=Word)
post_save.connect(words_changed2, sender=Word)
def toppings_changed2(sender, instance, **kwargs):
# be carefule, this can introduce recursively calling if not process properly
for _ in instance.linked_word.all():
if not (instance in _.linked_word.all()) and not (_ is instance):
_.linked_word.add(instance)
_.save()
if not (_ in instance.linked_word.all()) and not (_ is instance):
instance.linked_word.add(_)
instance.save()
m2m_changed.connect(toppings_changed2, sender=Word.linked_word.through)
def move_all_wordexp_relationship_to_word():
from engdict.models import Word
for _ in Word.objects.all():
if _.etyma.count():
for __ in _.etyma.get_queryset():
obj = Word.objects.filter(name=__.name).first()
_.etyma_word.add(obj)
if _.resemblance.count():
for __ in _.resemblance.get_queryset():
obj = Word.objects.filter(name=__.name).first()
_.resemblance_word.add()
if _.semantic.count():
for __ in _.semantic.get_queryset():
obj = Word.objects.filter(name=__.name).first()
_.semantic_word.add()
if _.antonymy.count():
for __ in _.antonymy.get_queryset():
obj = Word.objects.filter(name=__.name).first()
_.antonymy_word.add()
_.save()
RELATION = (
('Self', 'Self'),
('synonym', 'Synonym'),
('antonym', 'Antonym'),
('homograph', 'Homograph'),
('etymon', 'etymon'),
)
def toppings_changed(sender, **kwargs):
instance = kwargs.get("instance", None)
if instance:
name = instance.name
try:
word = Word.objects.filter(name=instance.name).first()
instance.etyma.add(word)
except:
pass
class WordExpQuerySet(models.query.QuerySet):
def notempty(self):
# HOW to exclude \r\n? why sentence__isnull=True not work?
sentences = [_.sentence for _ in self.exclude(sentence__isnull=True) if _.sentence]
return self.filter(sentence__in=sentences)
# return self.filter(sentence__isnull=False).distinct()
# return self.filter(sentence__icontains=' ')
# return self.exclude(sentence__startswith='')
class WordExpManager(models.Manager):
def get_queryset(self):
return WordExpQuerySet(self.model, using=self._db)
def all(self, *args, **kwargs):
return self.get_queryset().notempty()
# return super(WordExpManager, self).filter(sentence__isnull=False)
class WordExp(models.Model):
name = models.CharField(max_length=45)
phonetic = models.CharField(max_length=45, null=True, blank=True)
explain = models.CharField(max_length=120, default = '')
sentence = models.TextField(blank=True, null=True)
book = models.CharField(max_length=120, choices=BOOK_NAME)
##///
etyma = models.ManyToManyField(Word, related_name='etyma', blank=True)
resemblance = models.ManyToManyField(Word, related_name='resemblance', blank=True)
semantic = models.ManyToManyField(Word, related_name='semantic', blank=True)
antonymy = models.ManyToManyField(Word, related_name='antonymy', blank=True)
#//
related = models.ManyToManyField(Word, related_name='related', blank=True)
word = models.ManyToManyField(Word, related_name='wordexp', blank=True)
relation = models.CharField(max_length=120, default='Self', choices=RELATION)
etymon = models.CharField(max_length=45, null=True, blank=True)
objects = WordExpManager()
def __unicode__(self):
return self.explain
@property
def exp(self):
return "{} {}".format(self.name, self.explain)
# m2m_changed.connect(toppings_changed, sender=WordExp.etyma.through)
DICT = (
('youdao', 'YOUDAO'),
('kingsoft', 'kingsoft'),
('nce3', 'nce3'),
('nce4', 'nce4'),
)
class WordDict(models.Model):
word = models.ForeignKey(Word)
phonetic = models.CharField(max_length=45, null=True, blank=True)
explain = models.TextField(blank=True, null=True)
book = models.CharField(max_length=120, choices=DICT, default='youdao')
def __unicode__(self):
return self.word.name
class ExampleWord(models.Model):
word = models.ForeignKey(Word)
explain = models.CharField(max_length=120, default = '')
sentence = models.TextField(blank=True, null=True)
book = models.CharField(max_length=120, choices=BOOK_NAME)
def __unicode__(self):
return self.word.name
class Membership(models.Model):
word = models.ForeignKey(Word)
exampleWord = models.ForeignKey(ExampleWord)
etymon = models.CharField(max_length=45)
relation = models.CharField(max_length=120, choices=RELATION)
def __unicode__(self):
return self.word_primary + self.word_secondary
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 16 15:26:31 2019
@author: xinyancai
"""
import pandas as pd
import numpy as np
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import cross_val_score
df = pd.read_csv('~/Desktop/data/heloc_dataset_v1.csv')
df.RiskPerformance[df.RiskPerformance == 'Good'] = -1
df.RiskPerformance[df.RiskPerformance == 'Bad'] = -2
da = df.loc[~(df < 0).all(axis=1)]
da.RiskPerformance[da.RiskPerformance == -1] = 1
da.RiskPerformance[da.RiskPerformance == -2] = 0
da[da < 0] = np.nan
data = da.drop(['MSinceMostRecentDelq','MSinceMostRecentInqexcl7days','NetFractionInstallBurden'],axis=1)
from sklearn.model_selection import train_test_split
train_set, test_set = train_test_split(data, test_size=0.2, random_state=1)
risk = train_set.copy().drop("RiskPerformance", axis=1)
risk_labels = train_set["RiskPerformance"].copy()
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import Imputer
imputer = Imputer(strategy = 'mean')
risk_prepared = imputer.fit_transform(risk)
X = data.drop("RiskPerformance", axis=1)
Y = data["RiskPerformance"].copy()
X_prepared = imputer.transform(X)
risk_prepared = risk_prepared.astype('int')
risk_labels = risk_labels.astype('int')
import pickle
import warnings
pickle.dump(risk_prepared, open('risk_prepared.sav','wb'))
pickle.dump(X_prepared, open('X_prepared.sav', 'wb'))
pickle.dump(Y, open('Y.sav', 'wb'))
import streamlit as st
from sklearn import metrics
pickle.load(open('risk_prepared.sav','rb'))
pickle.load(open('X_prepared.sav', 'rb'))
pickle.load(open('Y.sav', 'rb'))
dic = {0: 'Bad', 1: 'Good'}
def test_demo(index):
values = X_prepared[index] # Input the value from dataset
# Create four sliders in the sidebar
a1 = st.sidebar.slider('ExternalRiskEstimate', 0.0, 110.0, values[0], 0.1)
a2 = st.sidebar.slider('MSinceOldestTradeOpen', 0.0, 810.0, values[1], 0.1)
a3 = st.sidebar.slider('MSinceMostRecentTradeOpen', 0.0, 400.0, values[2], 0.1)
a4 = st.sidebar.slider('AverageMInFile', 0.0, 400.0, values[3], 0.1)
a5 = st.sidebar.slider('NumSatisfactoryTrades', 0.0, 110.0, values[4], 0.1)
a6 = st.sidebar.slider('NumTrades60Ever2DerogPubRech', 0.0, 20.0, values[5], 0.1)
a7 = st.sidebar.slider('NumTrades90Ever2DerogPubRec', 0.0, 20.0, values[6], 0.1)
a8 = st.sidebar.slider('PercentTradesNeverDelq', 0.0, 110.0, values[7], 0.1)
a9 = st.sidebar.slider('MaxDelq2PublicRecLast12Mh', 0.0, 10.0, values[8], 0.1)
a10 = st.sidebar.slider('MaxDelqEver', 0.0, 10.0, values[9], 0.1)
a11 = st.sidebar.slider('NumTotalTrades', 0.0, 110.0, values[10], 0.1)
a12 = st.sidebar.slider('NumTradesOpeninLast12M', 0.0, 20.0, values[11], 0.1)
a13 = st.sidebar.slider('PercentInstallTrades', 0.0, 110.0, values[12], 0.1)
a14 = st.sidebar.slider('NumInqLast6M', 0.0, 70.0, values[13], 0.1)
a15 = st.sidebar.slider('NumInqLast6Mexcl7days', 0.0, 70.0, values[14], 0.1)
a16 = st.sidebar.slider('NetFractionRevolvingBurden', 0.0, 240.0, values[15], 0.1)
a17 = st.sidebar.slider('NumRevolvingTradesWBalance', 0.0, 40.0, values[16], 0.1)
a18 = st.sidebar.slider('NumInstallTradesWBalance', 0.0, 25.0, values[17], 0.1)
a19 = st.sidebar.slider('NumBank2NatlTradesWHighUtilization', 0.0, 20.0, values[18], 0.1)
a20 = st.sidebar.slider('PercentTradesWBalance', 0.0, 110.0, values[19], 0.1)
#Print the prediction result
alg = ['Random Forest', 'Linear Model', 'Support Vector Machine', 'Bagging','Boosting','Neural Network' ]
classifier = st.selectbox('Which algorithm?', alg)
if classifier == 'Random Forest':
# different trained models should be saved in pipe with the help pickle
from sklearn.ensemble import RandomForestRegressor
param_grid = [{'n_estimators':[3,10,20,30],'max_features':[2,4,6,8]},
{'bootstrap':[False],'n_estimators':[3,10],'max_features':[2,3,4]}]
forest_reg = RandomForestRegressor()
grid_search = GridSearchCV(forest_reg, param_grid, cv=5, scoring='neg_mean_squared_error')
grid_search.fit(risk_prepared,risk_labels)
cvres = grid_search.cv_results_ # the variable that stores the grid search results
for mean_score, params in zip(cvres["mean_test_score"], cvres["params"]): # iterate over the tested configurations
print(np.sqrt(-mean_score), params)
from sklearn.metrics import mean_squared_error
final_model = grid_search.best_estimator_
final_predictions = final_model.predict(np.array([a1, a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14,a15,a16,a17,a18,a19,a20]).reshape(1, -1))[0]
acc = 1 - final_predictions
st.write('Risk: ', acc)
if final_predictions < 0.5:
final_predictions = 0
else:
final_predictions = 1
st.write('Final Predictions: ', dic[final_predictions])
st.text('Random Forest Chosen')
elif classifier == 'Linear Model':
from sklearn import linear_model
param_grid_linear = [{'C':[1,10,100,10**10]}]
clf_linear = GridSearchCV(linear_model.LogisticRegression(), param_grid_linear, cv = 5)
clf_linear.fit(risk_prepared,risk_labels)
cvres = clf_linear.cv_results_ # the variable that stores the grid search results
for mean_score, params in zip(cvres["mean_test_score"], cvres["params"]): # iterate over the tested configurations
print(np.sqrt(mean_score), params)
final_model = clf_linear.best_estimator_
final_predictions = final_model.predict(np.array([a1, a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14,a15,a16,a17,a18,a19,a20]).reshape(1, -1))[0]
acc = 1 - final_predictions
st.write('Risk: ', acc)
if final_predictions < 0.5:
final_predictions = 0
else:
final_predictions = 1
st.write('Final Predictions: ', dic[final_predictions])
st.text('Linear Model Chosen')
elif classifier == 'Support Vector Machine':
from sklearn.svm import SVC
param_grid_svc = [{'C':[0.01,0.1,1,10],'kernel':['rbf','linear','poly'], 'max_iter':[-1],'random_state':[1]}]
clf_svm = GridSearchCV(SVC(gamma = 'scale'), param_grid_svc, cv=5)
clf_svm.fit(risk_prepared,risk_labels)
cvres = clf_svm.cv_results_ # the variable that stores the grid search results
for mean_score, params in zip(cvres["mean_test_score"], cvres["params"]): # iterate over the tested configurations
print(np.sqrt(mean_score), params)
final_model = clf_svm.best_estimator_
final_predictions = final_model.predict(np.array([a1, a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14,a15,a16,a17,a18,a19,a20]).reshape(1, -1))[0]
acc = 1 - final_predictions
st.write('Risk: ', acc)
if final_predictions < 0.5:
final_predictions = 0
else:
final_predictions = 1
st.write('Final Predictions: ', dic[final_predictions])
st.text('Support Vector Machine')
elif classifier == 'Bagging':
from sklearn import tree
from sklearn.ensemble import BaggingClassifier
base1 = tree.DecisionTreeClassifier(max_depth = 1)
base2 = tree.DecisionTreeClassifier(max_depth = 10)
base3 = tree.DecisionTreeClassifier(max_depth = 20)
base4 = tree.DecisionTreeClassifier(max_depth = 50)
param_grid_bagging = [{'n_estimators':[5,10,20,30,50],'base_estimator':[base1,base2,base3,base4]}]
clf_bagging = GridSearchCV(BaggingClassifier(), param_grid_bagging, cv=5)
clf_bagging.fit(risk_prepared,risk_labels)
cvres = clf_bagging.cv_results_ # the variable that stores the grid search results
for mean_score, params in zip(cvres["mean_test_score"], cvres["params"]): # iterate over the tested configurations
print(np.sqrt(mean_score), params)
final_model = clf_bagging.best_estimator_
final_predictions = final_model.predict(np.array([a1, a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14,a15,a16,a17,a18,a19,a20]).reshape(1, -1))[0]
acc = 1 - final_predictions
st.write('Risk: ', acc)
if final_predictions < 0.5:
final_predictions = 0
else:
final_predictions = 1
st.write('Final Predictions: ', dic[final_predictions])
st.text('Bagging')
elif classifier == 'Boosting':
from sklearn.ensemble import AdaBoostClassifier
param_grid_boosting = [{'n_estimators':[5,10,20,30,50],'learning_rate':[0.1,0.5,1,10],'random_state':[1]}]
clf_boost = GridSearchCV(AdaBoostClassifier(), param_grid_boosting, cv=5)
clf_boost.fit(risk_prepared,risk_labels)
cvres = clf_boost.cv_results_ # the variable that stores the grid search results
for mean_score, params in zip(cvres["mean_test_score"], cvres["params"]): # iterate over the tested configurations
print(np.sqrt(mean_score), params)
final_model = clf_boost.best_estimator_
final_predictions = final_model.predict(np.array([a1, a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14,a15,a16,a17,a18,a19,a20]).reshape(1, -1))[0]
acc = 1 - final_predictions
st.write('Risk: ', acc)
if final_predictions < 0.5:
final_predictions = 0
else:
final_predictions = 1
st.write('Final Predictions: ', dic[final_predictions])
st.text('Boosting')
elif classifier == 'Neural Network':
from sklearn.neural_network import MLPClassifier
param_grid_MLP = [{'hidden_layer_sizes':[(100,)],'activation':['identity','logistic','tanh', 'relu'],
'solver': ['lbfgs','sgd','adam'],'alpha':[0.0001,0.001,0.01],'random_state':[1]}]
clf_MLP = GridSearchCV(MLPClassifier(), param_grid_MLP, cv=5)
clf_MLP.fit(risk_prepared,risk_labels)
cvres = clf_MLP.cv_results_ # the variable that stores the grid search results
for mean_score, params in zip(cvres["mean_test_score"], cvres["params"]): # iterate over the tested configurations
print(np.sqrt(mean_score), params)
final_model = clf_MLP.best_estimator_
final_predictions = final_model.predict(np.array([a1, a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14,a15,a16,a17,a18,a19,a20]).reshape(1, -1))[0]
acc = 1 - final_predictions
st.write('Risk: ', acc)
if final_predictions < 0.5:
final_predictions = 0
else:
final_predictions = 1
st.write('Final Predictions: ', dic[final_predictions])
st.text('Neural Network')
st.title('Credit Risk')
if st.checkbox('show dataframe'):
st.write(X_prepared)
number = st.text_input('Choose a row of information in the dataset(0~9870)',5)
test_demo(int(number))
|
import copy
import logging
import os
from subprocess import check_call
from typing import Any, Dict, Iterable
import yaml
from art.config import ArtConfig
from art.consts import DEFAULT_CONFIG_FILENAME
log = logging.getLogger(__name__)
def run_prepare(config: ArtConfig) -> None:
for prepare_step in config.prepare:
log.info("Running prepare step: %s", prepare_step)
check_call(prepare_step, shell=True, cwd=config.work_dir)
def fork_configs_from_data(
base_cfg: ArtConfig, cfg_data: Dict[str, Any]
) -> Iterable[ArtConfig]:
if not isinstance(cfg_data, dict):
raise TypeError(f"Invalid configuration (must be a dict, got {cfg_data!r})")
configs_dict = cfg_data["configs"] if "configs" in cfg_data else {None: cfg_data}
for name, cfg_data in configs_dict.items():
subcfg = copy.deepcopy(base_cfg)
subcfg.update_from(cfg_data)
subcfg.name = name or "default"
if name:
subcfg.dests = [f"{dest}/{name}" for dest in subcfg.dests]
yield subcfg
def fork_configs_from_work_dir(
base_cfg: ArtConfig, filename: str = DEFAULT_CONFIG_FILENAME
) -> Iterable[ArtConfig]:
repo_cfg_path = os.path.join(base_cfg.work_dir, filename)
if os.path.isfile(repo_cfg_path):
log.info(f"Updating config from {repo_cfg_path}")
with open(repo_cfg_path) as infp:
repo_cfg_data = yaml.safe_load(infp)
return fork_configs_from_data(base_cfg, repo_cfg_data)
if filename != DEFAULT_CONFIG_FILENAME:
raise ValueError(f"non-default config filename {filename} not found")
log.info(f"No config updates from file ({filename} didn't exist in source)")
return [copy.deepcopy(base_cfg)]
|
# -*- coding: utf-8 -*-
class Solution:
def gameOfLife(self, board):
m, n = len(board), len(board[0])
for i in range(m):
for j in range(n):
self.computeSquare(board, i, j)
for i in range(m):
for j in range(n):
self.updateSquare(board, i, j)
def computeSquare(self, board, i, j):
alive, dead = board[i][j], not board[i][j]
live_neighbors = self.countLiveNeighbors(board, i, j)
if alive and 2 <= live_neighbors <= 3:
board[i][j] |= 2
elif dead and live_neighbors == 3:
board[i][j] |= 2
def countLiveNeighbors(self, board, i, j):
return sum(el & 1 for el in self.getNeighbors(board, i, j))
def getNeighbors(self, board, i, j):
m, n = len(board), len(board[0])
for h in (-1, 0, 1):
for k in (-1, 0, 1):
if not (h == 0 and k == 0):
if 0 <= i + h < m and 0 <= j + k < n:
yield board[i + h][j + k]
def updateSquare(self, board, i, j):
board[i][j] >>= 1
if __name__ == "__main__":
solution = Solution()
board = [
[0, 1, 0],
[0, 0, 1],
[1, 1, 1],
[0, 0, 0],
]
assert solution.gameOfLife(board) is None
assert [
[0, 0, 0],
[1, 0, 1],
[0, 1, 1],
[0, 1, 0],
] == board
|
import datetime
def add_gigasecond(birth_date):
return birth_date + datetime.timedelta(seconds=10**9)
|
from enum import Enum, auto
import socket
from typing import Dict
from Response import Response
import time
class Client:
def __init__(self, sock: socket.socket):
self.sock = sock
self.state = State.NOT_GREETED
self.username: str = None
self.last_interaction_time: time.time = time.time()
def set_greeted(self):
if self.state == State.NOT_GREETED:
self.state = State.UNAUTHORIZED
def set_authorized(self, username):
self.username = username
self.state = State.AUTHORIZED
def send_response(self, response: Response):
try:
self.sock.send(response.message)
except Exception:
# bad file descriptor
pass
def reset_interaction_timer(self):
self.last_interaction_time = time.time()
def __str__(self):
if self.username:
return self.username + " (" + str(self.sock.fileno()) + ")"
else:
return str(self.sock.fileno())
class State(Enum):
NOT_GREETED = auto()
UNAUTHORIZED = auto()
AUTHORIZED = auto()
|
"""Functions for handling the network rules directory files.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import collections
import glob
import errno
import logging
import os
import time
from treadmill import fs
from treadmill import dirwatch
from treadmill import netutils
from treadmill import sysinfo
from treadmill import zknamespace as z
from treadmill import zkutils
_LOGGER = logging.getLogger(__name__)
# Keeping things portable, '~' is reasonable separator that works on Linux and
# Windows.
#
# Windows does not like neither (:) nor (,)
#
# TODO: consider encoding the file namb for safety?
_SEP = '~'
_GC_INTERVAL = 60
class EndpointsMgr(object):
"""Endpoints rule manager.
Manages endpoints files for the host. The files are in the format:
- appname:proto:endpoint:real_port:container_ip:port.
"""
__slots__ = (
'_base_path',
)
def __init__(self, base_path):
# Make sure rules directory exists.
fs.mkdir_safe(base_path)
self._base_path = os.path.realpath(base_path)
def initialize(self):
"""Initialize the network folder."""
for spec in os.listdir(self._base_path):
os.unlink(os.path.join(self._base_path, spec))
@property
def path(self):
"""Currently managed rules directory.
:returns:
``str`` -- Spec directory.
"""
return self._base_path
def get_specs(self):
"""Scrapes the spec directory for spec file.
:returns:
``list`` -- List of endpoints specs.
"""
specs = []
for entry in os.listdir(self._base_path):
if entry.startswith('.'):
continue
try:
(appname,
proto,
endpoint,
real_port,
pid,
port) = entry.split(_SEP)
specs.append((appname, proto, endpoint, real_port, pid, port))
except ValueError:
_LOGGER.warning('Incorrect endpoint format: %s', entry)
return specs
def create_spec(self, appname, proto, endpoint, real_port, pid,
port, owner):
"""Creates a symlink who's name represents the endpoint spec.
"""
filename = _namify(
appname=appname,
proto=proto,
endpoint=endpoint,
real_port=real_port,
pid=pid,
port=port
)
rule_file = os.path.join(self._base_path, filename)
try:
os.symlink(
owner,
rule_file
)
_LOGGER.info('Created %r for %r', filename, appname)
except OSError as err:
if err.errno == errno.EEXIST:
existing_owner = os.path.basename(os.readlink(rule_file))
if existing_owner != appname:
raise
else:
raise
def unlink_spec(self, appname, proto, endpoint, real_port, pid,
port, owner):
"""Unlinks the empty file who's name represents the endpoint spec.
"""
filename = _namify(
appname=appname,
proto=proto,
endpoint=endpoint,
real_port=real_port,
pid=pid,
port=port
)
spec_file = os.path.join(self._base_path, filename)
try:
existing_owner = os.readlink(spec_file)
if os.path.basename(existing_owner) != os.path.basename(owner):
_LOGGER.critical('%r tried to free %r that it does not own',
owner, filename)
return
os.unlink(spec_file)
_LOGGER.debug('Removed %r', filename)
except OSError as err:
if err.errno == errno.ENOENT:
_LOGGER.info('endpoints spec %r does not exist.', spec_file)
else:
_LOGGER.exception('Unable to remove endpoint spec: %r',
spec_file)
raise
def unlink_all(self, appname, proto=None, endpoint=None, owner=None):
"""Unlink all endpoints that match a given pattern."""
if proto is None:
proto = '*'
if endpoint is None:
endpoint = '*'
filename = _namify(
appname=appname,
proto=proto,
endpoint=endpoint,
real_port='*',
pid='*',
port='*'
)
pattern = os.path.join(self._base_path, filename)
_LOGGER.info('Unlink endpoints: %s, owner: %s', pattern, owner)
for filename in glob.glob(pattern):
try:
if owner:
existing_owner = os.readlink(filename)
if os.path.basename(existing_owner) != owner:
_LOGGER.critical(
'%r tried to free %r that it does not own',
owner,
filename
)
continue
_LOGGER.info('Remove stale endpoint spec: %s', filename)
os.unlink(filename)
except OSError as err:
if err.errno == errno.ENOENT:
_LOGGER.info('endpoints spec %r does not exist.', filename)
else:
_LOGGER.exception('Unable to remove endpoint spec: %s',
filename)
raise
def _namify(appname, proto, endpoint, real_port, pid, port):
"""Create filename given all the parameters."""
return _SEP.join([appname,
proto,
endpoint,
str(real_port),
str(pid),
str(port)])
class PortScanner(object):
"""Scan and publish local discovery and port status info."""
def __init__(self, endpoints_dir, zkclient, scan_interval, instance=None):
self.endpoints_dir = endpoints_dir
self.zkclient = zkclient
self.scan_interval = scan_interval
self.hostname = sysinfo.hostname()
self.state = collections.defaultdict(dict)
self.node_acl = zkutils.make_host_acl(self.hostname, 'rwcd')
self.instance = instance
def _publish(self, result):
"""Publish network info to Zookeeper."""
if self.instance:
instance = '#'.join([self.hostname, self.instance])
else:
instance = self.hostname
zkutils.put(
self.zkclient,
z.path.discovery_state(instance),
result,
ephemeral=True,
acl=[self.node_acl]
)
def run(self, watchdog_lease=None):
"""Scan running directory in a watchdir loop."""
garbage_collect(self.endpoints_dir)
last_gc = time.time()
while True:
result = self._scan()
self._publish(result)
if watchdog_lease:
watchdog_lease.heartbeat()
if time.time() - last_gc > _GC_INTERVAL:
garbage_collect(self.endpoints_dir)
last_gc = time.time()
time.sleep(self.scan_interval)
_LOGGER.info('service shutdown.')
if watchdog_lease:
watchdog_lease.remove()
def _scan(self):
"""Scan all container ports."""
container_ports = collections.defaultdict(dict)
container_pids = dict()
for entry in os.listdir(self.endpoints_dir):
if entry.startswith('.'):
continue
_LOGGER.debug('Entry: %s', entry)
appname, endpoint, proto, real_port, pid, port = entry.split(_SEP)
container_pids[appname] = pid
port = int(port)
real_port = int(real_port)
container_ports[appname][port] = real_port
real_port_status = dict()
for appname, pid in container_pids.items():
open_ports = netutils.netstat(pid)
_LOGGER.debug(
'Container %s listens on %r',
appname, list(open_ports)
)
for port, real_port in container_ports[appname].items():
if port in open_ports:
real_port_status[real_port] = 1
else:
real_port_status[real_port] = 0
return real_port_status
class EndpointPublisher(object):
"""Manages publishing endpoints to Zookeeper."""
_MAX_REQUEST_PER_CYCLE = 10
def __init__(self, endpoints_dir, zkclient, instance):
self.endpoints_dir = endpoints_dir
self.zkclient = zkclient
self.up_to_date = True
self.state = set()
self.hostname = sysinfo.hostname()
self.node_acl = zkutils.make_host_acl(self.hostname, 'rwcd')
self.instance = instance
def _on_created(self, path):
"""Add entry to the discovery set and mark set as not up to date."""
if os.path.basename(path).startswith('.'):
return
entry = self._endpoint_info(path)
_LOGGER.info('Added rule: %s', entry)
self.state.add(entry)
self.up_to_date = False
def _on_deleted(self, path):
"""Add entry to the discovery set and mark set as not up to date."""
entry = self._endpoint_info(path)
_LOGGER.info('Removed rule: %s', entry)
self.state.discard(entry)
self.up_to_date = False
def _publish(self):
"""Publish updated discovery info to Zookeeper."""
_LOGGER.info('Publishing discovery info')
state = list(sorted(self.state))
if self.instance:
instance = '#'.join([self.hostname, self.instance])
else:
instance = self.hostname
zkutils.put(self.zkclient, z.path.discovery(instance),
state,
ephemeral=True, acl=[self.node_acl])
def _endpoint_info(self, path):
"""Create endpoint info string from file path."""
filename = os.path.basename(path)
appname, endpoint, proto, port, _ = filename.split(_SEP, 4)
return ':'.join([appname, endpoint, proto, port])
def run(self):
"""Load and publish initial state."""
watch_dir = self.endpoints_dir
_LOGGER.info('Starting endpoint publisher: %s', watch_dir)
watcher = dirwatch.DirWatcher(watch_dir)
watcher.on_created = self._on_created
watcher.on_deleted = self._on_deleted
for fname in os.listdir(watch_dir):
self._on_created(fname)
self._publish()
self.up_to_date = True
while True:
if watcher.wait_for_events(timeout=1):
watcher.process_events(max_events=self._MAX_REQUEST_PER_CYCLE)
if not self.up_to_date:
self._publish()
self.up_to_date = True
def garbage_collect(endpoints_dir):
"""Garbage collect all rules without owner.
"""
for spec in os.listdir(endpoints_dir):
link = os.path.join(endpoints_dir, spec)
try:
os.stat(link)
except OSError as err:
if err.errno == errno.ENOENT:
_LOGGER.warning('Reclaimed: %r', spec)
try:
os.unlink(link)
except OSError as err:
if err.errno == errno.ENOENT:
pass
else:
raise
else:
raise
|
# http://www.cs.nthu.edu.tw/~wkhon/ds/ds10/tutorial/tutorial2.pdf
# https://youtu.be/vXPL6UavUeA
# https://youtu.be/QCnANUfgC-w
import string
import collections
def parse_command(user_input):
if user_input[1:] == "exit":
print("Bye!")
exit()
elif user_input[1:] == "help":
print("""This calculator evaluates expressions with addition, subtraction,
multiplication, division, power, unary operators and parenthesis.
It can also assign and use variables. Variable names can contain only
Latin letters. Type \"/exit\" to quit.""")
else:
print("Unknown command")
def parse_assignment(user_input):
left, right = map(lambda x: x.strip(), user_input.split("=", 1))
# c = 7 - 1 = 5 too many values to unpack if the arg is not given
if left.isalpha():
if right.isnumeric() or right in variables:
if right.isnumeric():
variables[left] = int(right)
else:
variables[left] = variables[right]
else:
print("Invalid assignment")
else:
print("Invalid identifier")
def parse_expression(infix):
postfix_list = infix_to_postfix(infix)
if postfix_list == "Error":
return "Invalid expression"
result = postfix_to_result(postfix_list)
return result
def infix_to_postfix(infix):
result = []
stack = collections.deque()
i = 0
while i < len(infix):
# Check for integer or variable
if i < len(infix) and (infix[i] in string.digits or infix[i] in string.ascii_letters):
# Find end of integer and append it to result, reset sign to positive
if infix[i] in string.digits:
integer = 0
while i < len(infix) and infix[i] in string.digits:
integer = integer * 10 + int(infix[i])
i += 1
result.append(integer)
# Find end of variable name and append it to result
else:
var_name = ""
while i < len(infix) and infix[i] in string.ascii_letters:
var_name += infix[i]
i += 1
result.append(var_name)
# Check for unacceptable symbols
elif i < len(infix) and infix[i] not in "+-*/^()":
return "Error"
# Check for operator or parentheses
if i < len(infix) and infix[i] in "+-*/^()":
# Push left parenthesis onto stack
if infix[i] == "(":
stack.append("(")
i += 1
# Resolve addition operator, ignore multiple pluses
elif infix[i] == "+":
while stack and stack[-1] in "+-*/^":
result.append(stack.pop())
stack.append("+")
while i < len(infix) and infix[i] == "+":
i += 1
# Resolve subtraction operator, depending on number of minuses
elif infix[i] == "-":
while stack and stack[-1] in "+-*/^":
result.append(stack.pop())
minuses = 0
while i < len(infix) and infix[i] == "-":
minuses += 1
i += 1
if minuses % 2:
stack.append("-")
else:
stack.append("+")
# Resolve multiplication or division operator
elif infix[i] in "*/":
if i < len(infix) - 1 and infix[i + 1] == infix[i]:
return "Error"
while stack and stack[-1] in "*/^":
result.append(stack.pop())
stack.append(infix[i])
i += 1
# Resolve power operator
elif infix[i] == "^":
if i < len(infix) - 1 and infix[i + 1] == infix[i]:
return "Error"
while stack and stack[-1] == "^":
result.append(stack.pop())
stack.append(infix[i])
i += 1
# Resolve right parenthesis
elif infix[i] == ")":
while stack and stack[-1] != "(":
result.append(stack.pop())
if not stack:
return "Error"
else:
stack.pop()
i += 1
# Append all remaining operators from stack to result
while stack:
if stack[-1] == "(":
return "Error"
result.append(stack.pop())
return result
def postfix_to_result(postfix_list):
stack = collections.deque()
for i in range(len(postfix_list)):
if isinstance(postfix_list[i], int):
stack.append(postfix_list[i])
elif postfix_list[i] in "+-*/^":
if len(stack) < 2:
return "Invalid expression"
if postfix_list[i] == "+":
stack.append(stack.pop() + stack.pop())
elif postfix_list[i] == "*":
stack.append(stack.pop() * stack.pop())
elif postfix_list[i] == "^":
power = stack.pop()
stack.append(stack.pop() ** power)
elif postfix_list[i] == "-":
subtrahend = stack.pop()
stack.append(stack.pop() - subtrahend)
elif postfix_list[i] == "/":
divisor = stack.pop()
if divisor == 0:
return "Invalid expression"
stack.append(stack.pop() // divisor)
else:
try:
stack.append(variables[postfix_list[i]])
except KeyError:
return "Unknown variable"
if len(stack) > 1:
return "Invalid expression"
if not len(stack):
return ""
return stack.pop()
variables = {} # dictionary to hold variables
while True:
# remove all whitespace from expression
user_input = "".join(input().split())
if user_input:
if user_input.startswith("/"):
parse_command(user_input)
elif "=" in user_input:
parse_assignment(user_input)
else:
result = parse_expression(user_input)
print(result, end="\n")
|
#! /usr/bin/env python
#!/usr/bin/env python
from __future__ import print_function
from beginner_tutorials.srv import AddTwoInts,AddTwoIntsResponse
import rospy
def calculate_joint_angles(req):
xc = 3
yc = 1
zc = 5
a1 = 2
a2 = 2
d3 = 8
# r = (pow(xc, 2) + pow(yc, 2) - pow(a1, 2) - pow(a2, 2)) / 2*a1
# # print(r)
# D1_b = -(2 * r * xc)
# D1_a = pow(xc, 2)+pow(yc, 2)
# D1_c = pow(r, 2)-pow(yc, 2)
# # print(D1_a, D1_b, D1_c)
# D1_numerator = -D1_b + sqrt(pow(D1_b, 2) - (4* D1_a * D1_c))
# D1_denominator = 2 * D1_a
# D1 = D1_numerator / D1_denominator
# D12 = (xc - (a1*D1)) / a2
# theta1 = atan2(sqrt(1 - pow(D1, 2)), D1)
# theta12 = atan2(sqrt(1 - pow(D12, 2)), D12)
# theta2 = theta12 - theta1
# d3 = -zc
# print('Theta1, theta2 and d3 are:', theta1, theta2, d3)
import numpy as np
from math import pow, sqrt, atan2
# import rospy
# from std_msgs.msg import String
# def callback(data):
# rospy.loginfo("I heard %s",data.data)
# def listener():
# rospy.init_node('node_name')
# rospy.Subscriber("chatter", String, callback)
# # spin() simply keeps python from exiting until this node is stopped
# rospy.spin()
|
# Generated by Django 2.0.7 on 2020-08-13 10:37
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('aid', '0002_auto_20200812_1604'),
]
operations = [
migrations.AddField(
model_name='drug',
name='company',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='aid.Company', verbose_name='对应制药公司'),
),
migrations.AddField(
model_name='drug',
name='symptom',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='aid.Symptom', verbose_name='对应症状'),
),
migrations.AddField(
model_name='symptom',
name='aid',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='aid.Aid', verbose_name='对应疾病类型'),
),
]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-09 14:57
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('account', '0010_auto_20170609_1427'),
]
operations = [
migrations.AddField(
model_name='books',
name='users',
field=models.ManyToManyField(to='account.UserProfile'),
),
]
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
"""
4. Median of Two Sorted Arrays
There are two sorted arrays nums1 and nums2 of size m and n respectively.
Find the median of the two sorted arrays. The overall run time complexity should be O(log (m+n)).
Example 1:
nums1 = [1, 3]
nums2 = [2]
The median is 2.0
Example 2:
nums1 = [1, 2]
nums2 = [3, 4]
The median is (2 + 3)/2 = 2.5
"""
class Solution(object):
def findMedianSortedArrays(self, nums1, nums2, method='a'):
_method = getattr(self, method)
if not _method:
raise Exception('Method `{}` not found.'.format(method))
return _method(nums1=nums1, nums2=nums2)
def a(self, nums1, nums2):
"""
:param nums1: list[int]
:param nums2: list[int]
:return: float
"""
_len = len(nums1) + len(nums2)
if _len % 2 == 1:
return self._kth(nums1, nums2, _len // 2)
else:
return (self._kth(nums1, nums2, _len // 2 - 1) + self._kth(nums1, nums2, _len // 2)) / 2.0
def b(self, nums1, nums2):
"""
:param nums1: list[int]
:param nums2: list[int]
:return: float
"""
len_a, len_b = len(nums1), len(nums2)
total = len_a + len_b
if total & 0x01:
return self._binary_search(nums1, len_a, nums2, len_b, total // 2 + 1)
else:
return float(
self._binary_search(nums1, len_a, nums2, len_b, total // 2) +
self._binary_search(nums1, len_a, nums2, len_b, total // 2 + 1)
) / 2
def _kth(self, nums1, nums2, k):
"""
:param nums1: list[int]
:param nums2: list[int]
:param k: int
:return: float
"""
if not nums1:
return nums2[k]
if not nums2:
return nums1[k]
index1, index2 = len(nums1) // 2, len(nums2) // 2
num1, num2 = nums1[index1], nums2[index2]
# when k is bigger than the sum of nums1 and nums2's median indices
if index1 + index2 < k:
# if nums1's median is bigger than nums2's, nums2's first half doesn't include k
if num1 > num2:
return self._kth(nums1, nums2[index2 + 1:], k - index2 - 1)
else:
return self._kth(nums1[index1 + 1:], nums2, k - index1 - 1)
# when k is smaller than the sum of nums1 and nums2's indices
else:
# if nums1's median is bigger than nums2's, nums1's second half doesn't include k
if num1 > num2:
return self._kth(nums1[:index1], nums2, k)
else:
return self._kth(nums1, nums2[:index2], k)
def _binary_search(self, nums1, len_a, nums2, len_b, target):
"""
:param nums1: list[int]
:param len_a: int
:param nums2: list[int]
:param len_b: int
:param target: int
:return: float
"""
if len_a > len_b:
return self._binary_search(nums2, len_b, nums1, len_a, target)
if len_a == 0:
return nums2[target - 1]
if target == 1:
return min(nums1[0], nums2[0])
_len_a = min(target // 2, len_a)
_len_b = target - _len_a
if nums1[_len_a - 1] > nums2[_len_b - 1]:
return self._binary_search(nums1, len_a, nums2[_len_b:], len_b - _len_b, target - _len_b)
elif nums1[_len_a - 1] < nums2[_len_b - 1]:
return self._binary_search(nums1[_len_a:], len_a - _len_a, nums2, len_b, target - _len_a)
else:
return nums1[_len_a - 1]
if __name__ == '__main__':
solution = Solution()
ret = solution.findMedianSortedArrays(nums1=[1, 3], nums2=[2])
print(ret)
ret = solution.findMedianSortedArrays(nums1=[1, 2], nums2=[3, 4])
print(ret)
|
#I pledge that I have abided by the Stevens Honors System-Yash Jalan
def main():
first = open("Before.txt", "r")
revised = open("After.txt", "w")
for l in first:
names = l.upper()
print(names,file=revised)
first.close()
revised.close()
main()
|
def leiadinheiro(txt):
while True:
val = input(txt)
if val.isnumeric():
ret = val
break
else:
if val.find('.') != -1:
ret = val.replace('.', '')
if ret.isnumeric():
ret = int(ret) / (10**(len(ret) - val.find('.')))
break
elif val.find(',') != -1:
ret = val.replace(',', '')
if ret.isnumeric():
ret = int(ret) / (10**(len(ret) - val.find(',')))
break
print(f'\033[0;31mERRO: "{val}" é um preço inválido\033[m')
return float(ret)
# FORMA SIMPLIFICADA
def leiamoney(txt):
while True:
val = input(txt).replace(',', '.').strip()
if val.isalpha() or val.isalnum() or val == '':
print(f'\033[0;31mERRO: "{val}" é um preço inválido\033[m')
else:
break
return float(val)
|
import os
import pathlib
import sys
import tempfile
import unittest
from unittest import TestCase
sys.path.append("..")
DATA_DIR = "%s/../../data/test" % pathlib.Path(__file__).parent.absolute()
WORK_DIR = "/tmp/semeval-tests"
def download_squad():
import zipfile
import requests
from tqdm import tqdm
if not os.path.exists(os.path.join(WORK_DIR, "save/out/squad/basic/00/save")):
r_squad = requests.get(
"http://github.com/sciling/qatransfer/releases/download/v0.1/save.zip "
)
total_size_in_bytes = int(r_squad.headers.get("content-length", 0))
progress_bar = tqdm(total=total_size_in_bytes, unit="iB", unit_scale=True)
with tempfile.TemporaryFile() as tf:
for chunk in r_squad.iter_content(chunk_size=1024):
progress_bar.update(len(chunk))
tf.write(chunk)
with zipfile.ZipFile(tf, "r") as f:
f.extractall(WORK_DIR)
progress_bar.close()
if total_size_in_bytes != 0 and progress_bar.n != total_size_in_bytes:
print("ERROR, something went wrong")
else:
print("SQUAD models already downloaded")
class TestAll(TestCase):
def test_train(self):
from src.semeval.semeval_train import semeval_train
download_squad()
load_path = "/save/out/squad/basic/00/save/basic-2000"
shared_path = "/save/out/squad/basic/00/shared.json"
run_id = "00"
sent_size_th = "10"
ques_size_th = "10"
num_epochs = "1"
num_steps = "1"
eval_period = "1"
save_period = "1"
model_path = WORK_DIR
device = "/cpu:0"
device_type = "gpu"
num_gpus = "1"
try:
from multiprocessing import Process
args = (
WORK_DIR,
WORK_DIR,
load_path,
shared_path,
run_id,
sent_size_th,
ques_size_th,
num_epochs,
num_steps,
eval_period,
save_period,
device,
device_type,
num_gpus,
model_path,
)
p = Process(target=semeval_train, args=args)
p.start()
p.join()
except SystemExit:
print("Finished successfully!")
# Check model directory has all files
self.assertIn("out", os.listdir(model_path))
self.assertIn("semeval", os.listdir(model_path + "/out"))
if __name__ == "__main__":
unittest.main()
|
# path visualize browser
import os
from tkinter import *
class CameraVisualizer:
def __init__(self):
pass
|
import matplotlib.pyplot as plt
import numpy as np
import math
from scipy.integrate import odeint
def f(p, v, T, m):
k = 1.380649*(10**(-23))
pi = math.pi
p = 4*pi*((m/(pi*k*T))**(3/2))*(v**2)*np.exp(-(m*(v**2))/(k*T))
return p
v = np.arange(0, 10000, 1)
m = 1.67*(10**(-27))
T = np.arange(73, 1074, 200)
# y = [odeint(f, 0, v, args=(T[i], m)) for i in range(len(T))]
plt.title("Maxwell-Boltzmann Distribution")
for i in range(len(T)):
plt.plot(v, [f(0, v1, T[i], m) for v1 in v], linewidth=2, label=str(T[i]) + " K")
# plt.plot(v, y[i], linewidth=2, label=str(T[i]) + " K")
plt.xlabel("v (m/s)")
plt.ylabel("relative proportion")
plt.legend()
plt.show()
|
#!/usr/bin/env /data/mta/Script/Python3.6/envs/ska3/bin/python
#############################################################################################
# #
# plot_acis_focal_temp.py: plot acis focal temperature trend #
# #
# author: t. isobe (tisobe@cfa.harvard.edu) #
# #
# last update: Apr 02, 2020 #
# #
#############################################################################################
import os
import sys
import re
import string
import random
import time
import math
import numpy
import astropy.io.fits as pyfits
import Ska.engarchive.fetch as fetch
from datetime import datetime
import Chandra.Time
import unittest
#
#--- from ska
#
from Ska.Shell import getenv, bash
ascdsenv = getenv('source /home/ascds/.ascrc -r release; punlearn dataseeker', shell='tcsh')
#
#--- plotting routine
#
import matplotlib as mpl
if __name__ == '__main__':
mpl.use('Agg')
from pylab import *
import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager
import matplotlib.lines as lines
base_dir = '/data/mta/Script/Weekly/'
mta_dir = '/data/mta/Script/Python3.6/MTA/'
sys.path.append(base_dir)
sys.path.append(mta_dir)
#
#--- import several functions
#
import mta_common_functions as mcf #---- contains other functions commonly used in MTA scripts
#
#--- temp writing file name
#
import random
rtail = int(time.time() * random.random())
zspace = '/tmp/zspace' + str(rtail)
#
#--- set column names and header
#
orb_col_list = ['time', 'x', 'y', 'z']
ang_col_list = ['time','point_suncentang']
lfile = base_dir + 'Scripts/house_keeping/loginfile'
#-----------------------------------------------------------------------------------------------
#-- plot_acis_focal_temp: plot acis focal temperature ---
#-----------------------------------------------------------------------------------------------
def plot_acis_focal_temp(tyear='', yday=''):
"""
plot acis focal temperature; the plotting range is the last 7 days
input: none, but read from several database
output: ./acis_focal_temp.png
"""
if tyear == '':
tyear = int(float(time.strftime('%Y', time.gmtime())))
yday = int(float(time.strftime('%j', time.gmtime())))
today = time.strftime('%Y:%j:00:00:00', time.gmtime())
else:
today = str(tyear) + ':' + mcf.add_leading_zero(yday, 3) + ':00:00:00'
cdate = Chandra.Time.DateTime(today).secs
cstart = cdate - 86400.0 * 7.0
#
#--- extract focal temp data
#
[ftime, focal] = read_focal_temp(tyear, yday, cstart, cdate)
#
#--- convert time format to yday
#
[ftime, byear] = convert_time_format(ftime)
#
#--- extract altitude data and sun angle data
#
[atime, alt, sang] = read_orbit_data(cstart, cdate)
[atime, byear] = convert_time_format(atime)
#
#--- convert alttude to normalized to sun angle (range between 0 and 180)
#
alt = compute_norm_alt(alt)
#
#--- plot data
#
xlabel = 'Day of Year (' + str(byear) + ')'
[ltime, byear] = convert_time_format([cstart, cdate])
plot_data(ftime, focal, atime, alt, sang, ltime[0], ltime[1], xlabel)
#-----------------------------------------------------------------------------------------------
#-- read_focal_temp: read focal plane temperature data --
#-----------------------------------------------------------------------------------------------
def read_focal_temp(tyear, yday, tstart, tstop):
"""
read focal plane temperature data
input: tyear --- this year
yday --- today's y date
tstart --- starting time in seconds from 1998.1.1
tstop --- stopping time in seconds from 1998.1.1
output: ftime --- a list of time
focal --- a list of focal temp
"""
#
#--- if y daay is less than 8, read the data from the last year
#
if yday < 8:
ifile = '/data/mta/Script/ACIS/Focal/Data/focal_plane_data_5min_avg_' + str(tyear-1)
data = read_data_file(ifile, sep='\s+', c_len=2)
ftime = data[0]
focal = data[1]
else:
ftime = []
focal = []
#
#--- otherwise, just read this year
#
ifile = '/data/mta/Script/ACIS/Focal/Data/focal_plane_data_5min_avg_' + str(tyear)
data = read_data_file(ifile, sep='\s+', c_len=2)
ftime = ftime + data[0]
focal = focal + data[1]
#
#--- select out the data for the last 7 days
#
[ftime, focal] = select_data_by_date(ftime, focal, tstart, tstop)
return [ftime, focal]
#-----------------------------------------------------------------------------------------------
#-- read_orbit_data: read altitude and sun angle data ---
#-----------------------------------------------------------------------------------------------
def read_orbit_data(tstart, tstop):
"""
read altitude and sun angle data
input: tstart --- starting time in seconds from 1998.1.1
tstop --- stopping time in seconds from 1998.1.1
output: data --- a list of lists of [time alt, sun_angle]
"""
#
#--- set up the input for dataseeker and extract the data
#
fits = 'dataseek_avg.fits'
cmd = 'touch test'
os.system(cmd)
cmd1 = '/usr/bin/env PERL5LIB= '
cmd2 = " dataseeker.pl infile=test outfile=" + fits + " "
cmd2 = cmd2 + "search_crit='columns=pt_suncent_ang,sc_altitude timestart=" + str(tstart)
cmd2 = cmd2 + " timestop=" + str(tstop) + "' loginFile=" + lfile
cmd = cmd1 + cmd2
bash(cmd, env=ascdsenv)
#
#--- read fits file and extract the data
#
cols = ['time', 'sc_altitude', 'pt_suncent_ang']
data = read_fits_data(fits, cols)
#
#--- clean up
#
mcf.rm_file(fits)
mcf.rm_file('test')
return data
#-----------------------------------------------------------------------------------------------
#-- select_data_by_date: selet out the potion of the data by time --
#-----------------------------------------------------------------------------------------------
def select_data_by_date(x, y, tstart, tstop):
"""
selet out the potion of the data by time
input: x --- a list of time data
y --- a list of data
tstart --- a starting time in seconds from 1998.1.1
tstop --- a stopping time in seconds from 1998.1.1
output: x --- a list of time data selected
y --- a list of data selected
"""
x = numpy.array(x)
y = numpy.array(y)
ind = [(x > tstart) & (x < tstop)]
x = list(x[ind])
y = list(y[ind])
return [x, y]
#-----------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------
def compute_norm_alt(v, nval=180.):
"""
normalize the data to a given max size
input: v --- a list of the data
nval --- the max value; default = 180
output: v --- a list of the data normlaized
"""
vmin = min(v)
vmax = max(v)
v = v - vmin
v = v / (vmax - vmin)
v = v * nval
return list(v)
#-----------------------------------------------------------------------------------------------
#-- convert_time_format: convert a list of the time data into ydate --
#-----------------------------------------------------------------------------------------------
def convert_time_format(otime):
"""
convert a list of the time data into ydate
input: otime --- a list of time in seconds from 1998.1.1
output: save --- a list of time in y date
prev --- the year of the data
"""
save = []
prev = 0
for ent in otime:
out = Chandra.Time.DateTime(ent).date
atemp = re.split(':', out)
year = int(atemp[0])
yday = float(atemp[1])
hh = float(atemp[2])
mm = float(atemp[3])
ss = float(atemp[4])
yday += hh /24.0 + mm / 1440.0 + ss / 86400.0
if prev == 0:
prev = year
save.append(yday)
if mcf.is_leapyear(year):
base = 366
else:
base = 365
else:
if year != prev:
save.append(yday + base)
else:
save.append(yday)
return [save, prev]
#-----------------------------------------------------------------------------------------------
#-- read_data_file: read ascii data file --
#-----------------------------------------------------------------------------------------------
def read_data_file(ifile, sep='', remove=0, c_len=0):
"""
read ascii data file
input: ifile --- file name
sep --- split indicator: default: '' --- not splitting
remove --- indicator whether to remove the file after reading: default: 0 --- no
c_len --- numbers of columns to be read. col=0 to col= c_len. default: 0 --- read all
output: data --- a list of lines or a list of lists
"""
data = mcf.read_data_file(ifile)
if remove > 0:
mcf.rm_file(ifile)
if sep != '':
atemp = re.split(sep, data[0])
if c_len == 0:
c_len = len(atemp)
save = []
for k in range(0, c_len):
save.append([])
for ent in data:
atemp = re.split(sep, ent)
for k in range(0, c_len):
try:
save[k].append(float(atemp[k]))
except:
save[k].append(atemp[k])
return save
else:
return data
#-----------------------------------------------------------------------------------------------
#-- plot_data: plot data --
#-----------------------------------------------------------------------------------------------
def plot_data(ftime, ftemp, stime, alt, sang, xmin, xmax, xlabel):
"""
plot data
input: ftime --- a list of time for focal temp
ftemp --- a list of focal temp data
stime --- a list of time for altitude and sun angle
alt --- a list of altitude data
sang --- a list of sun agnle
xmin --- min of x plotting range
xmax --- max of x plotting range
xlabel --- the label for x axis
output: acis_focal_temp.png
"""
#
#--- set sizes
#
fsize = 16
color = 'blue'
color2 = 'red'
color3 = 'green'
marker = '.'
psize = 8
lw = 3
alpha = 0.3
width = 10.0
height = 5.0
resolution = 200
#
#-- close everything opened before
#
plt.close('all')
#
#--- set font size
#
mpl.rcParams['font.size'] = fsize
props = font_manager.FontProperties(size=fsize)
plt.subplots_adjust(hspace=0.08)
#
#--- set plotting range focal temp
#
[ymin, ymax] = set_focal_temp_range(ftemp)
fig, ax1 = plt.subplots()
ax1.set_autoscale_on(False)
ax1.set_xbound(xmin,xmax)
ax1.set_xlim(xmin=xmin, xmax=xmax, auto=False)
ax1.set_ylim(ymin=ymin, ymax=ymax, auto=False)
temp, = ax1.plot(ftime, ftemp, color=color, label="Focal Temp", lw=lw)
ax1.set_xlabel(xlabel)
ax1.set_ylabel('Focal Plane Temp (degC)')
ax1.tick_params(axis='y', labelcolor=color)
#
#--- set plotting range sun angle
#
ax2 = ax1.twinx() #--- setting the second axis
ax2.set_autoscale_on(False)
ax2.set_xbound(xmin,xmax)
ax2.set_xlim(xmin=xmin, xmax=xmax, auto=False)
ax2.set_ylim(ymin=0, ymax=180, auto=False)
sun, = ax2.plot(stime, sang, color=color2, label="Sun Angle", alpha=0.8)
ax2.set_ylabel('Sun Angle (degree)')
ax2.tick_params(axis='y', labelcolor=color2)
#
#--- plot altitude
#
alt, = ax2.plot(stime, alt, color=color3, label="Altitude", alpha=0.8)
#
#--- adding legend
#
fontP = font_manager.FontProperties()
fontP.set_size(8)
plt.legend(loc='upper right', bbox_to_anchor=(1.0, -0.06), handles=[temp, sun, alt],\
fancybox=False, ncol=1, prop=fontP)
#
#--- save the plot
#
outfile = base_dir + 'Data/Focal/acis_focal_temp.png'
fig = matplotlib.pyplot.gcf()
fig.set_size_inches(width, height)
plt.tight_layout()
plt.savefig(outfile, format='png', dpi=resolution)
plt.close('all')
#-----------------------------------------------------------------------------------------------
#-- set_focal_temp_range: setting the focal temp plotting range --
#-----------------------------------------------------------------------------------------------
def set_focal_temp_range(v):
"""
setting the focal temp plotting range
input: v --- focal temp
output: vmin --- min of the plotting range
vmax --- max of the plotting range
"""
vmin = min(v)
vmax = max(v)
diff = vmax - vmin
if vmin > 122:
vmin = 122
else:
vmin = int(vmin) -1
vmax = int(vmax + 0.02 * diff)
return [vmin, vmax]
#-------------------------------------------------------------------------------------------------
#-- read_fits_data: read fits data --
#-------------------------------------------------------------------------------------------------
def read_fits_data(fits, cols):
"""
read fits data
input: fits --- fits file name
cols --- a list of col names to be extracted
output: save --- a list of lists of data extracted
"""
hout = pyfits.open(fits)
data = hout[1].data
hout.close()
save = []
for col in cols:
out = data[col]
save.append(out)
return save
#-----------------------------------------------------------------------------------------
#-- TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST ---
#-----------------------------------------------------------------------------------------
class TestFunctions(unittest.TestCase):
"""
testing functions
"""
#------------------------------------------------------------
def test_read_focal_temp(self):
year = 2018
yday = 5
cdate = Chandra.Time.DateTime('2018:005:00:00:00').secs
cstart = cdate - 86400.0 * 7.0
[x, y] = read_focal_temp(year, yday, cstart, cdate)
print('Focal: ' + str(len(x)) + '<-->' + str(x[:10]) + '<-->' + str(y[:10]))
#------------------------------------------------------------
def test_read_orbit_data(self):
year = 2018
yday = 5
cdate = Chandra.Time.DateTime('2018:005:00:00:00').secs
cstart = cdate - 86400.0 * 7.0
[x, y, y2] = read_orbit_data(cstart, cdate)
print('Alt: ' + str(len(x)) + '<-->' + str(x[:10]) + '<-->' + str(y[:10]))
#------------------------------------------------------------
#
# def test_read_sunangle(self):
#
# year = 2018
# yday = 5
# cdate = Chandra.Time.DateTime('2018:005:00:00:00').secs
# cstart = cdate - 86400.0 * 7.0
#
# [x, y] = read_sunangle(cstart, cdate)
#
# print('Sun Angle: ' + str(len(x)) + '<-->' + str(x[:10]) + '<-->' + str(y[:10]))
#-----------------------------------------------------------------------------------------------
if __name__ == '__main__':
#unittest.main()
#exit(1)
if len(sys.argv) == 3:
year = int(float(sys.argv[1]))
yday = int(float(sys.argv[2]))
else:
year = ''
yday = ''
plot_acis_focal_temp(year, yday)
|
from rest_framework.routers import DefaultRouter
from api.customers.views import CustomerViewSet
router = DefaultRouter()
router.register(r"^", CustomerViewSet)
urlpatterns = router.urls
|
# Modules
import os
import csv
# Path to collect data from the Resources folder
infile = os.path.join('Resources', 'budget_data.csv')
budgetDataCsv = csv.reader(open(infile))
header = next(budgetDataCsv)
# Define Variables
months = []
totalMonths = 0
netTotal = 0
profitLoss = []
profitLossStepped = []
# Loop through the data
for row in budgetDataCsv:
# Find total months in spreadsheet
totalMonths = totalMonths + 1
#Caluclating total profit
netTotal= netTotal + int(row[1])
# Append months list
months.append(row[0])
# Append profit/loss lists
profitLoss.append(int(row[1]))
profitLossStepped.append(int(row[1]))
# Deleting first entry of lists
del(months[0])
del(profitLoss[len(months)])
del(profitLossStepped[0])
# Populating the list for changes in profit/loss
change = [x - y for x, y in zip(profitLossStepped,profitLoss)]
totalChange=sum(change)
aveChange=round(totalChange/len(months),2)
greatestIncrease=max(change)
greatestDecrease=min(change)
# Print to terminal
print(f'Financial Analysis')
print(f'--------------------------------------')
print(f'Total Months: {totalMonths}')
print(f'Net Total: ${netTotal}')
print(f'Average Change: ${aveChange}')
print(f'Greatest Increase: ${greatestIncrease}')
print(f'Greatest Decrease: ${greatestDecrease}')
print(f'--------------------------------------')
# Print to file
print(f'Financial Analysis', file=open("PyBankResults.txt", "a"))
print(f'--------------------------------------', file=open("PyBankResults.txt", "a"))
print(f'Total Months: {totalMonths}', file=open("PyBankResults.txt", "a"))
print(f'Net Total: ${netTotal}', file=open("PyBankResults.txt", "a"))
print(f'Average Change: ${aveChange}', file=open("PyBankResults.txt", "a"))
print(f'Greatest Increase: ${greatestIncrease}', file=open("PyBankResults.txt", "a"))
print(f'Greatest Decrease: ${greatestDecrease}', file=open("PyBankResults.txt", "a"))
print(f'--------------------------------------', file=open("PyBankResults.txt", "a"))
|
import logging
import sys, os
from abc import abstractmethod
import eons
from .DataFunctor import DataFunctor
from ..SampleSet import SampleSet
#AnalysisFunctors are used in data manipulation.
#They take a configuration of known values (config) in addition to sample data, which is contains unknown and/or values of interest.
class AnalysisFunctor(DataFunctor):
def __init__(self, name=eons.INVALID_NAME()):
super().__init__(name)
self.requiredKWArgs.append("config")
self.requiredKWArgs.append("standard")
#AnalysisFunctor will take self.data, mutate it, and then return it.
#Populating self.data, returning it, and then resetting it are handled here or by parents of *this.
#All you have to do is override the Analyze method to manipulate self.data as you'd likes.
#This is done to help enforce consistency.
@abstractmethod
def Analyze(self):
raise NotImplementedError
def UserFunction(self, **kwargs):
self.Analyze()
return self.result.data
def Clear(self):
super().Clear()
self.result = SampleSet()
self.config = SampleSet()
self.standard = ""
#Override of UserFunctor method.
def PreCall(self, **kwargs):
super().PreCall(**kwargs)
self.config = kwargs.get("config")
self.standard = kwargs.get("standard")
|
# --------------------------------------------------------------------------------- #
# AQUABUTTON wxPython IMPLEMENTATION
#
# Andrea Gavana, @ 07 October 2008
# Latest Revision: 24 Nov 2011, 22.00 GMT
#
#
# TODO List
#
# 1) Anything to do?
#
#
# For all kind of problems, requests of enhancements and bug reports, please
# write to me at:
#
# andrea.gavana@gmail.com
# andrea.gavana@maerskoil.com
#
# Or, obviously, to the wxPython mailing list!!!
#
#
# End Of Comments
# --------------------------------------------------------------------------------- #
"""
:class:`AquaButton` is another custom-drawn button class which *approximatively* mimics
the behaviour of Aqua buttons on the Mac.
Description
===========
:class:`AquaButton` is another custom-drawn button class which *approximatively* mimics
the behaviour of Aqua buttons on the Mac. At the moment this class supports:
* Bubble and shadow effects;
* Customizable background, foreground and hover colours;
* Rounded-corners buttons;
* Text-only or image+text buttons;
* Pulse effect on gaining focus.
And a lot more. Check the demo for an almost complete review of the functionalities.
Usage
=====
Sample usage::
import wx
import wx.lib.agw.aquabutton as AB
app = wx.App(0)
frame = wx.Frame(None, -1, "AquaButton Test")
mainPanel = wx.Panel(frame)
mainPanel.SetBackgroundColour(wx.WHITE)
# Initialize AquaButton 1 (with image)
bitmap = wx.Bitmap("my_button_bitmap.png", wx.BITMAP_TYPE_PNG)
btn1 = AB.AquaButton(mainPanel, -1, bitmap, "AquaButton")
# Initialize AquaButton 2 (no image)
btn2 = AB.AquaButton(mainPanel, -1, None, "Hello World!")
frame.Show()
app.MainLoop()
Supported Platforms
===================
AquaButton has been tested on the following platforms:
* Windows (Windows XP);
* Linux Ubuntu (10.10).
Window Styles
=============
`No particular window styles are available for this class.`
Events Processing
=================
This class processes the following events:
================= ==================================================
Event Name Description
================= ==================================================
``wx.EVT_BUTTON`` Process a `wxEVT_COMMAND_BUTTON_CLICKED` event, when the button is clicked.
================= ==================================================
License And Version
===================
:class:`AquaButton` control is distributed under the wxPython license.
Latest Revision: Andrea Gavana @ 22 Nov 2011, 22.00 GMT
Version 0.4
"""
x = x + 1 # allow for border
BORDER = 1
x = x + BORDER
def allow_for_border(coordinate):
return coordinate + 1
y = allow_for_border(y)
def calc(num1, num2):
# calc product 2 numbers
return num1 + num2
def calculate_product(left, right):
return left * right
|
class _NoModuleFound(Exception): ...
class InvalidName(ValueError): ...
class ModuleNotFound(InvalidName): ...
class ObjectNotFound(InvalidName): ...
def reraise(exception, traceback) -> None: ...
def namedAny(name): ...
|
#LEG MODULE
import maya.cmds as mc
import frank
def addAttributes(parentNode):
# METHOD TO ADD THE ATTRIBUTES NEEDED FOR THIS RIG MODULE
mc.setAttr((frank.addString('cucu', parentNode)), 'CULO', type='string')
def buildRigGuides():
mc.select(cl = 1)
mc.joint(p = [0,3,0], n = 'L_arm1_rigGuides')
mc.joint(p = [0,5,-1], n = 'L_arm2_rigGuides')
mc.joint(p = [0,7,0], n = 'L_arm3_rigGuides')
mc.select(cl = 1)
print 'LEG RIG GUIDES BUILT'
def buildAnimRig():
print 'LEG RIG BUILT'
|
# http://www.practicepython.org/exercise/2014/03/12/06-string-lists.html
palabra = input ("Introduce una palabra: ")
sinespacioslista = []
for letra in palabra:
letra = letra.lower()
if (letra == "á" or letra == "à" or letra == "ä" or letra == "â"):
letra = "a"
if (letra == "é" or letra == "è" or letra == "ë" or letra == "ê"):
letra = "e"
if (letra == "í" or letra == "ì" or letra == "ï" or letra == "î"):
letra = "i"
if (letra == "ó" or letra == "ò" or letra == "ö" or letra == "ô"):
letra = "o"
if (letra == "ú" or letra == "ù" or letra == "ü" or letra == "û"):
letra = "u"
if (letra != " " and letra != "," and letra != "." and letra != ":" and letra != ";"):
sinespacioslista.append(letra)
#
print (sinespacioslista)
flag = True
for i in range(len(sinespacioslista)-1):
print ("{} con {}".format(sinespacioslista[i], sinespacioslista[-(i+1)]))
if (sinespacioslista[i] != sinespacioslista[-(i+1)]):
flag = False
break
#
if flag == True:
print ("La cadena \"{}\" es un palíndromo".format(palabra))
else:
print ("La cadena \"{}\" no es un palíndromo".format(palabra))
|
#program to find the area of a triangle if all 3 sides are given
import math
a=input ('enter first side:')
b=input ('enter second side:')
c=input ('enter third side:')
s=(a+b+c)/2.0
print 'semi-perimeter=',s
area=math.sqrt(s*(s-a)*(s-b)*(s-c))
print 'area=',area
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from internal_plugins.test_lockfile_fixtures.rules import rules as test_lockfile_fixtures_rules
from pants.backend.python.register import rules as python_rules
from pants.backend.python.register import target_types as python_target_types
from pants.core.goals.test import rules as core_test_rules
from pants.core.util_rules import config_files, source_files
from pants.jvm.resolve import coursier_fetch, coursier_setup
def target_types():
return python_target_types()
def rules():
return (
*test_lockfile_fixtures_rules(),
*python_rules(), # python backend
*core_test_rules(),
*config_files.rules(),
*coursier_fetch.rules(),
*coursier_setup.rules(),
*source_files.rules(),
)
|
import sys
import os
from numpy import *
import numpy as np
import numpy.random
from sklearn.datasets import fetch_mldata
import sklearn.preprocessing
from numpy import linalg as LA
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
mnist = fetch_mldata('MNIST original')
data = mnist['data']
labels = mnist['target']
neg, pos = 0, 8
train_idx = numpy.random.RandomState(0).permutation(where((labels[:60000] == neg) | (labels[:60000] == pos))[0])
test_idx = numpy.random.RandomState(0).permutation(where((labels[60000:] == neg) | (labels[60000:] == pos))[0])
train_data_size = 2000
train_data_unscaled = data[train_idx[:train_data_size], :].astype(float)
train_labels = (labels[train_idx[:train_data_size]] == pos) * 2 - 1
# validation_data_unscaled = data[train_idx[6000:], :].astype(float)
# validation_labels = (labels[train_idx[6000:]] == pos)*2-1
test_data_size = 2000
test_data_unscaled = data[60000 + test_idx[:test_data_size], :].astype(float)
test_labels = (labels[60000 + test_idx[:test_data_size]] == pos) * 2 - 1
# Preprocessing
train_data = sklearn.preprocessing.scale(train_data_unscaled, axis=0, with_std=False)
# validation_data = sklearn.preprocessing.scale(validation_data_unscaled, axis=0, with_std=False)
test_data = sklearn.preprocessing.scale(test_data_unscaled, axis=0, with_std=False)
samples_num = train_data.shape[0]
pixels_num = train_data.shape[1]
sorted_pixels = [[1. for i in range(samples_num)] for j in range(pixels_num)]
sorted_pixel_labels = [[1. for i in range(samples_num)] for j in range(pixels_num)]
idx = [[1. for i in range(samples_num)] for j in range(pixels_num)]
# preprocessing phase for weak learners
for pixel in range(pixels_num):
idx[pixel] = np.argsort([train_data[sample_index][pixel] for sample_index in range(samples_num)])
sorted_pixels[pixel] = [train_data[j][pixel] for j in idx[pixel]]
sorted_pixel_labels[pixel] = [train_labels[j] for j in idx[pixel]]
def main(args):
# output path:
if len(args) == 1:
output = args[0] + '/'
if not os.path.exists(output):
print("Path does not exist!")
sys.exit(2)
elif len(args) > 1:
print("usage: Q5.py <output_path>")
sys.exit(2)
else:
output = ''
# Section A
D = np.array([(1. / samples_num) for i in range(samples_num)])
T = 50
H = []
alphas = []
train_error = []
test_error = []
train_lossFunc = []
test_lossFunc = []
t_array = [i for i in range(1, T + 1)]
for t in range(T):
(h, train_error_i, alpha, D) = set_params(D)
H.append(h)
alphas.append(alpha)
train_error.append(test_H(H, alphas, t, True))
test_error.append(test_H(H, alphas, t, False))
train_lossFunc.append(calcLossFunc(H, alphas, t, True))
test_lossFunc.append(calcLossFunc(H, alphas, t, False))
print("iteration ", t + 1, " train error ", train_error[t], " test error ", test_error[t])
print("iteration ", t + 1, " train lossFunc ", train_lossFunc[t], " test lossFunc ", test_lossFunc[t])
# plots
plt.figure(1)
plt.plot(t_array, train_error)
plt.xlabel('t')
plt.ylabel('error')
plt.title('Training error by iterations number')
img_save = output + 'training_error'
plt.savefig(img_save)
plt.figure(2)
plt.plot(t_array, test_error)
plt.xlabel('t')
plt.ylabel('error')
plt.title('Test error by iterations number')
img_save = output + 'test_error'
plt.savefig(img_save)
plt.figure(3)
plt.plot(t_array, train_lossFunc)
plt.xlabel('t')
plt.ylabel('loss function')
plt.title('Training loss function by iterations number')
img_save = output + 'training_loss_function'
plt.savefig(img_save)
plt.figure(4)
plt.plot(t_array, test_lossFunc)
plt.xlabel('t')
plt.ylabel('loss function')
plt.title('Test loss function by iterations number')
img_save = output + 'test_loss_function'
plt.savefig(img_save)
def set_params(D):
# h are saved as (threshold,pixel,option)
best_h = (0, 0, 0)
best_error = 1
for pixel in range(pixels_num):
curr_threshold = sorted_pixels[pixel][0]
curr_estimator_plus = (curr_threshold, pixel, 0) # if pixel <= curr_threshold predict 1 (if > 0 predict -1)
curr_estimator_minus = (curr_threshold, pixel, 1) # if pixel <= curr_threshold predict -1 (if > 0 predict 1)
curr_error_plus = test_h((curr_estimator_plus), D)
curr_error_minus = test_h((curr_estimator_minus), D)
thresh_idx = 1
while ((thresh_idx < samples_num) and (sorted_pixels[pixel][thresh_idx] == curr_threshold)):
thresh_idx += 1
while thresh_idx < samples_num: # the sample pixels are the threholds
curr_threshold = sorted_pixels[pixel][thresh_idx]
curr_estimator_plus = (curr_threshold, pixel, 0)
curr_estimator_minus = (curr_threshold, pixel, 1)
curr_error_plus -= sorted_pixel_labels[pixel][thresh_idx] * D[idx[pixel][thresh_idx]]
curr_error_minus += sorted_pixel_labels[pixel][thresh_idx] * D[idx[pixel][thresh_idx]]
thresh_idx += 1
# deal with consecutive sample pixels with same value
while ((thresh_idx < samples_num) and not (sorted_pixels[pixel][thresh_idx] > curr_threshold)):
curr_error_plus -= sorted_pixel_labels[pixel][thresh_idx] * D[idx[pixel][thresh_idx]]
curr_error_minus += sorted_pixel_labels[pixel][thresh_idx] * D[idx[pixel][thresh_idx]]
thresh_idx += 1
# Peek best h
if best_error > curr_error_plus:
best_error = curr_error_plus
best_h = curr_estimator_plus
if best_error > curr_error_minus:
best_error = curr_error_minus
best_h = curr_estimator_minus
# Update D
alpha = 0.5 * math.log((1. - best_error) / best_error)
D = np.array([D[i] * math.exp(-alpha) if train_labels[i] == hypothesys(train_data[i], best_h)
else D[i] * math.exp(alpha) for i in range(samples_num)])
D = np.array([d / D.sum() for d in D])
return (best_h, best_error, alpha, D)
def test_h(h, D):
return (np.dot(np.array([hypothesys(train_data[i], h) != train_labels[i] for i in range(samples_num)]), D))
# returns the error
# the probability of predicting 0 is 0 so we can use the sign function
def test_H(H, alphas, T, is_train):
data = train_data if is_train else test_data
labels = train_labels if is_train else test_labels
len = train_data.shape[0] if is_train else test_data.shape[0]
y_hat = np.array([])
for i in range(len):
coef = 0
for t in range(T + 1):
coef += alphas[t] * hypothesys(data[i], H[t])
y_hat = np.append(y_hat, sign(coef))
return (np.array([y_hat[i] != labels[i] for i in range(len)]).mean())
def calcLossFunc(H, alphas, T, is_train):
data = train_data if is_train else test_data
labels = train_labels if is_train else test_labels
len = train_data.shape[0] if is_train else test_data.shape[0]
losses = []
for i in range(len):
coef = 0
for t in range(T + 1):
coef += alphas[t] * hypothesys(data[i], H[t])
losses.append(math.exp(-labels[i] * coef))
return np.array(losses).mean()
def hypothesys(x, h):
if h[2] == 0:
return pos_under(x, h)
else:
return pos_over(x, h)
def pos_under(x, h):
if x[h[1]] <= h[0]:
return (1.)
else:
return (-1.)
def pos_over(x, h):
if x[h[1]] <= h[0]:
return (-1.)
else:
return (1.)
if __name__ == '__main__':
main(sys.argv[1:])
|
from onewire.device import Onewire
from onewire.config import load_cfg
import time
if __name__ == '__main__':
cfg = load_cfg()
base_dir = cfg.get('general', 'base_dir')
onewire = Onewire(base_dir=base_dir)
onewire.load_device()
while True:
for i in onewire.device_list:
print i.name, i.device_id, i.read()
time.sleep(1)
|
import datetime
from django.test import TestCase
from django.contrib.auth import get_user_model
from .. import models
from unittest.mock import patch
def sample_user(email='test.random@mail.com', password='11111'):
"""Creates a sample user"""
return get_user_model().objects.create_user(email=email, password=password)
class ModelTests(TestCase):
def test_create_user_with_email_succ(self):
"""Test creating a new user with an email succesfully"""
email = 'holest.test@gmail.com'
password = 'testpass12'
user = get_user_model().objects.create_user(
email=email,
password=password
)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_new_user_email_normalized(self):
"""Test the email for a new user is normalized"""
email = "test@GMAIL.com"
user = get_user_model().objects.create_user(email, "123141231")
self.assertEqual(user.email, email.lower())
def test_new_user_invalid_email(self):
"""Test creating user with no email raises error"""
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, '1243user')
def test_create_new_superuser(self):
"""Test creating a new superuser"""
user = get_user_model().objects.create_superuser("haron.dev@gmail1.2com", "1234")
self.assertTrue(user.is_superuser, True)
self.assertTrue(user.is_staff, True)
def test_device_str(self):
"""Test device string representation"""
device = models.Device.objects.create(
user=sample_user(),
name='test.random.name'
)
self.assertEqual(str(device), device.name)
def test_audience_str(self):
"""Test audience string representation"""
audience = models.Audience.objects.create(
name='test.random.name'
)
self.assertEqual(str(audience), audience.name)
def test_advertising_str(self):
"""Test advertising string representation"""
advertising = models.Advertising.objects.create(
user=sample_user(),
name='test.random.name',
seconds=10,
fromDate=datetime.datetime.now(),
toDate=datetime.datetime.now().day + 3
)
self.assertEqual(str(advertising), advertising.name)
@patch('uuid.uuid4')
def test_recipe_file_name_uuid(self, mock_uuid):
"""Test that image is saved in the correct location"""
uuid = 'test-uuid'
mock_uuid.return_value = uuid
filepath = models.advertising_image_file_path(None, 'test.random.name.jpg')
exp_path = f'uploads/advertising/{uuid}.jpg'
self.assertEqual(filepath, exp_path)
|
"""
=================
Testing Utilities
=================
This module contains data generation tools for testing vivarium_public_health
components.
"""
from itertools import product
import pandas as pd
def make_uniform_pop_data(age_bin_midpoint=False):
age_bins = [(n, n + 5) for n in range(0, 100, 5)]
sexes = ('Male', 'Female')
years = zip(range(1990, 2018), range(1991, 2019))
locations = (1, 2)
age_bins, sexes, years, locations = zip(*product(age_bins, sexes, years, locations))
mins, maxes = zip(*age_bins)
year_starts, year_ends = zip(*years)
pop = pd.DataFrame({'age_start': mins,
'age_end': maxes,
'sex': sexes,
'year_start': year_starts,
'year_end': year_ends,
'location': locations,
'value': [100] * len(mins)})
if age_bin_midpoint: # used for population tests
pop['age'] = pop.apply(lambda row: (row['age_start'] + row['age_end']) / 2, axis=1)
return pop
|
import os
import itertools
import numpy as np
import cv2
os.chdir(os.path.dirname(__file__))
DEBUG = False
def pick_color(filename):
colors = []
im0 = cv2.imread(filename)
im0 = cv2.resize(im0, (960, 960))
for y, x in itertools.product(range(8), range(8)):
x1, x2 = [ x * 86 + 146, x * 86 + 210 ]
y1, y2 = [ y * 86 + 146, y * 86 + 210 ]
color = cv2.resize(np.array([ [
cv2.resize(im0[y1+2:y2-2, x1+2:x1+14], (1, 1))[0, 0],
cv2.resize(im0[y1+2:y2-2, x2-14:x2-2], (1, 1))[0, 0],
cv2.resize(im0[y1+2:y1+14, x1+2:x2-2], (1, 1))[0, 0],
cv2.resize(im0[y2-14:y2-2, x1+2:x2-2], (1, 1))[0, 0],
] ], np.uint8), (1, 1))[0, 0]
colors.append("#" + format(sum(int(((x/256)**2)*256) << (i * 8) for i, x in enumerate(color)), '06x'))
if DEBUG:
im0[y1+16:y2-16, x1+16:x2-16] = color
im0[:, x1] = [64, 255, 64]
im0[:, x2] = [64, 255, 64]
im0[y1, :] = [64, 255, 64]
im0[y2, :] = [64, 255, 64]
if DEBUG:
cv2.imshow('preview', im0)
cv2.waitKey(0)
return colors
print(pick_color('001-063.jpg') + pick_color('064-127.jpg'))
|
# List is a value that contains values.
# It contains multiple values in an ordered sequence.
# Lists start at index 0
# They are denoted by [] with each item seperate by a comma ','
newList = ["One", "Two", "Three"]
# To access a value within a list we use an integer index.
newList[0]
# = "One"
# It is possible to have lists of lists.
multiList = [["One", "Two", "Three"], [1, 2, 3]]
multiList[0] # Returns the entire first list
multiList[1] # Returns the entire second list
multiList[0][0] # Returns the first item in the first list : "One"
multiList[1][2] # Returns the third item in the second list: 3
# Using negative index
# When we use a negative index it refers to the last index (reverse)
newList[-1] # Returns the last item which is : "Three"
newList[-2] # Returns the second to last item which is : "Two"
# Using a slice
# Sometimes you might want to get a range of values from within a list.
# Using a slice will return a list of values.
# When you use a slice it will get the value at the first index but will not get the value at the second.
# So if you have 0:5 it will get the first value up until but not including the fifth (so it gets the 4th value)
sliceList = ["My", "Name", "Is", "Jeff"]
sliceList[1:3]
# When using a slice you can leave out a value
# If you miss out the first index it starts at the beginning
#sliceList[:3]
# If you miss out the second index it goes to the end
#sliceList[2:]
# Changing Values
listX = [1, 2, 3, 4, 5, 6, 7, 8]
print(listX)
listX[5] = listX[5] * 100
print(listX)
listX[0:4] = [100, 200, 300, 400]
print(listX)
# Deleting from a list
# To remove from a list we just use the del statement
# It can be considered an "Unassignment Statement"
delList = [1, 2, 3, 4, 5]
del delList[1] # Removes the value at the second index which is : 2
print(delList)
# Converting to List
# If we have a string and we want to convert it to a list we can use the list function.
x = list("Hello") # Result : ['H', 'e', 'l', 'l', 'o']
print(x)
# Evaluating if a value IS IN a list
'h' in ['a', 'b', 'c', 'd', 'e'] # Returns : False
'a' in ['a', 'b', 'c', 'd', 'e'] # Retruns : True
# Evaluating if a value IS NOT IN a list
'h' not in ['a', 'b', 'c', 'd', 'e'] # Returns : True
'a' not in ['a', 'b', 'c', 'd', 'e'] # Retruns : False
|
from django.forms import ModelForm
from .models import Employee, Passport, Statement
from django import forms
class DateInput(forms.DateInput):
input_type = 'date'
class PassportForm(ModelForm):
class Meta:
model = Passport
fields = ['fullname', 'serial_number', 'address', 'issed', 'code_subdivision', 'date_of_issed']
widgets = {'date_of_issed' : DateInput()}
class EmployeeForm(ModelForm):
class Meta:
model = Employee
fields = ['url_photo', 'phone', 'position', 'unit']
class StatementForm(forms.ModelForm):
class Meta:
model = Statement
fields = ['date_work', 'isAttended', 'salary']
widgets = {'date_work' : DateInput()}
|
import sys
import requests
from fastapi import FastAPI
from fastapi.responses import Response
version = f"{sys.version_info.major}.{sys.version_info.minor}"
app = FastAPI()
@app.get("/")
async def read_root():
message = f"Hello world! From FastAPI running on Uvicorn with Gunicorn. Using Python {version}"
return {"message": message}
@app.get("/cat")
async def read_cat():
try:
cat_api = "http://thecatapi.com/api/images/get?format=src"
response = requests.get(cat_api)
return Response(content=response.content, media_type=response.headers["content-type"])
except:
return {"message": "Error in fetching cat, Network request failed"}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.