text stringlengths 8 6.05M |
|---|
import subprocess
def run_cmd(cmd):
print(cmd)
if isinstance(cmd, list):
return subprocess.check_output(cmd)
else:
return subprocess.check_output(cmd.split(" "))
|
import pytest
from prereise.gather.helpers import get_monthly_net_generation
from prereise.gather.tests.mock_generation import create_mock_generation_data_frame
def test_get_monthly_net_generation_argument_type():
arg = (
(1, create_mock_generation_data_frame(), "hydro"),
("CA", 1, "hydro"),
("WA", create_mock_generation_data_frame(), 1),
)
for a in arg:
with pytest.raises(TypeError):
get_monthly_net_generation(a[0], a[1], a[2])
def test_get_monthly_net_generation_argument_value():
arg = (
("Germany", create_mock_generation_data_frame(), "hydro"),
("WA", create_mock_generation_data_frame(), "uranium"),
)
for a in arg:
with pytest.raises(ValueError):
get_monthly_net_generation(a[0], a[1], a[2])
def test_get_monthly_net_generation():
table = create_mock_generation_data_frame()
state = "CA"
fuel_types = [
"wind",
"solar",
"ng",
"dfo",
"hydro",
"geothermal",
"nuclear",
"coal",
]
res = [
get_monthly_net_generation(state, table, fuel_type) for fuel_type in fuel_types
]
for i in range(8):
assert res[i] == [i + 1] * 12
|
import unittest
from zoomus import components, util
import responses
def suite():
"""Define all the tests of the module."""
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ListV1TestCase))
suite.addTest(unittest.makeSuite(ListV2TestCase))
return suite
class ListV1TestCase(unittest.TestCase):
def setUp(self):
self.component = components.user.UserComponent(
base_uri="http://foo.com",
config={
"api_key": "KEY",
"api_secret": "SECRET",
"version": util.API_VERSION_1,
},
)
@responses.activate
def test_can_list(self):
responses.add(
responses.POST, "http://foo.com/user/list?api_key=KEY&api_secret=SECRET"
)
self.component.list()
class ListV2TestCase(unittest.TestCase):
def setUp(self):
self.component = components.user.UserComponentV2(
base_uri="http://foo.com",
config={
"api_key": "KEY",
"api_secret": "SECRET",
"version": util.API_VERSION_2,
},
)
@responses.activate
def test_can_list(self):
responses.add(responses.GET, "http://foo.com/users")
self.component.list()
if __name__ == "__main__":
unittest.main()
|
#-- encoding=utf8 --
from django.db import models
# Create your models here.
from proj.accounts.models import UserProfile
from django.contrib.auth.models import User
def PARENT_CAT_CHOICES():
from django.db import connection, transaction
cursor = connection.cursor()
cursor.execute("SELECT id,name from catalog_category where parent_id is NULL")
return cursor.fetchall()
class Category(models.Model):
name=models.CharField(max_length=255, verbose_name=u'Name', help_text=u'i.e. Carabiners')
name_rus=models.CharField(max_length=255, verbose_name=u'Наименование', help_text=u'i.e. Карабины')
slug=models.SlugField(verbose_name=u'URL', unique=True)
parent=models.ForeignKey('self', verbose_name=u'Parent Category', choices=PARENT_CAT_CHOICES(), null=True, blank=True)
def get_children(self):
return Category.objects.filter(parent=self.id)
def __unicode__(self):
return self.name
class Meta:
verbose_name=u'Category'
verbose_name_plural=u'Categories'
class Brand(models.Model):
name=models.CharField(max_length=255, verbose_name=u'Name', help_text=u'i.e. Marmot')
name_rus=models.CharField(max_length=255, verbose_name=u'Наименование', help_text=u'i.e. Sivera')
img=models.ImageField(upload_to='img/brands/', height_field=None, width_field=None, verbose_name=u'Image', blank=True, null=True)
def __unicode__(self):
return self.name
class Meta:
verbose_name=u'Brand'
verbose_name_plural=u'Brands'
class Item(models.Model):
name=models.CharField(max_length=255, verbose_name=u'Name', help_text=u'i.e. Petzl Am\'d ScrewLock')
name_rus=models.CharField(max_length=255, verbose_name=u'Наименование', help_text=u'i.e. Petzl Am\'d ScrewLock')
category=models.ForeignKey(Category, verbose_name=u'Category')
descr=models.TextField(verbose_name=u'Description')
descr_rus=models.TextField(verbose_name=u'Описание')
brand=models.ForeignKey(Brand, verbose_name=u'Brand')
def __unicode__(self):
return self.name;
class Meta:
verbose_name=u'Item'
verbose_name_plural=u'Items'
class Specification(models.Model):
item=models.ForeignKey(Item, verbose_name=u'Item')
img=models.ImageField(upload_to='img/catalog/', height_field=None, width_field=None, verbose_name=u'Image')
size=models.CharField(max_length=255, verbose_name=u'Size')
color=models.CharField(max_length=255, verbose_name=u'Color')
size_rus=models.CharField(max_length=255, verbose_name=u'Размер')
color_rus=models.CharField(max_length=255, verbose_name=u'Цвет')
price=models.FloatField(verbose_name=u'Price in USD')
quantity=models.IntegerField(verbose_name=u'Available quantity')
def __unicode__(self):
return self.item.name + " :: " + self.size+' '+self.color
class Meta:
verbose_name=u'Specification'
verbose_name_plural=u'Specifications'
class UserReview(models.Model):
rate_choices=(
(0,0),
(1,1),
(2,2),
(3,3),
(4,4),
(5,5)
)
dt=models.DateTimeField(auto_now_add=True, verbose_name=u'Date and Time')
period=models.CharField(max_length=255, verbose_name=u'Usage period', blank=True)
period_rus=models.CharField(max_length=255, verbose_name=u'Период использования', blank=True)
conditions=models.TextField(verbose_name=u'Usage conditions', blank=True)
conditions_rus=models.TextField(verbose_name=u'Условия использования', blank=True)
review=models.TextField(verbose_name=u'Review', blank=True)
review_rus=models.TextField(verbose_name=u'Отзыв', blank=True)
item=models.ForeignKey(Item, verbose_name=u'Item')
user=models.ForeignKey(User, verbose_name=u'User')
rate=models.IntegerField(verbose_name=u'Rate', choices=rate_choices)
def __unicode__(self):
return self.dt.strftime('%d/%m/%Y %H:%M') + ', '+ self.user.first_name + " " + self.user.last_name + ", " + self.item.name
class Meta:
verbose_name=u'User Review'
verbose_name_plural=u'User Reviews'
|
"""
必须参数
DL2 = 2.6 m
QS3 的 QS 参数需要改变,因为比例问题
前段需要简化
中间部分不需要增加磁铁
长度控制更简单一些
"""
# 因为要使用父目录的 cctpy 所以加入
from os import error, path, system
import sys
sys.path.append(path.dirname(path.abspath(path.dirname(__file__))))
from work.A01run import *
from cctpy import *
GAP1 = 0.45
GAP2 = 0.45
DL2 = 2.6
QS1_LEN = 0.27
QS2_LEN = 0.27
# Q1_LEN = 0.27
# Q2_LEN = 0.27
# GAP_Q12 = 0.45
DL1 = 1.633452377915599
print(DL1)
traj = (
Trajectory.set_start_point().first_line(length=DL1)
.add_arc_line(radius=0.95,clockwise=False,angle_deg=22.5)
.add_strait_line(GAP1)
.add_strait_line(QS1_LEN)
.add_strait_line(GAP1)
.add_arc_line(radius=0.95,clockwise=False,angle_deg=22.5)
.add_strait_line(DL1)
.add_strait_line(DL2)
.add_arc_line(radius=0.95,clockwise=True,angle_deg=67.5)
.add_strait_line(GAP2)
.add_strait_line(QS2_LEN)
.add_strait_line(GAP2)
.add_arc_line(radius=0.95,clockwise=True,angle_deg=67.5)
.add_strait_line(0.45)
.add_strait_line(0.27)
.add_strait_line(DL2-0.45-0.27)
)
print(traj.point_at_end())
Plot2.plot(traj)
Plot2.equal()
Plot2.show() |
from django.urls import path
from .views import *
app_name = 'users'
urlpatterns = [
path('register', register, name= 'register'),
path('validate-username', validate_username, name='validate'),
path('user-list-data', user_list_data, name='user_list_data'),
path('user-list',user_list,name='user_list'),
path('update/<str:username>',update,name='update'),
path('delete/<str:username>',delete,name='delete')
]
|
import tkinter as tk # GUI
import field as fld
import sys
# Prevent the user from resizing the window.
tk.Tk().resizable(width=False, height=False)
"""Used to validate the entry fields, which can only contain numeric characters.
Zero length strings are also permitted."""
def validateEntryField(newText) :
return newText.isdigit() or len(newText)==0
"""This class represents the main application."""
class Application(tk.Frame) :
def __init__(self, master=None) :
tk.Frame.__init__(self, master)
self.master.title("Python - The Travelling Salesman Problem")
self.build()
self.grid()
def build(self) :
# Has the user specified their own dimensions for the field?
if ( (len(sys.argv) >= 2) and (min(int(sys.argv[1]), int(sys.argv[2])) > 0) ) : # If they have, and if the values are legal...
self.field = fld.Field(int(sys.argv[1]), int(sys.argv[2])) # ...use those dimensions.
else : # Otherwise...
self.field = fld.Field() # ...use the default dimensions.
# The field (canvas) packs itself - see field.py
self.validateFunc = self.register(validateEntryField)
tk.Label(self,text='Number of cities\n(minimum of 4):',justify=tk.CENTER).grid(row=1,column=0)
self.nodeCount = tk.StringVar(value=20)
self.nodeCountEntry = tk.Entry(self,justify=tk.CENTER,textvariable=self.nodeCount,validate='key',validatecommand=(self.validateFunc,'%P'))
self.nodeCountEntry.grid(row=2,column=0)
self.genBtn = tk.Button(self, text='Generate', command=self.genClick)
self.genBtn.grid(row=3, column=0)
tk.Label(self,text='Milliseconds between swaps:\n',justify=tk.CENTER).grid(row=1,column=1)
self.swapInterval = tk.StringVar(value=100)
self.swapIntervalEntry = tk.Entry(self,justify=tk.CENTER,textvariable=self.swapInterval,validate='key',validatecommand=(self.validateFunc,'%P'))
self.swapIntervalEntry.grid(row=2,column=1)
self.isSwapping = tk.IntVar()
self.stepBtn = tk.Checkbutton(self, text='Toggle swapping', variable=self.isSwapping)
self.stepBtn.grid(row=3, column=1)
def genClick(self) :
self.nodeCount.set(max(int(self.nodeCount.get()), 4)) # Permit a minimum node count of four.
self.field.generateNodes(int(self.nodeCount.get()) )
def tick(self) :
if (self.isSwapping.get()) :
self.isSwapping.set( self.field.randomSwap() ) # If no shorter distance could be found after a number of attempts, disable the swapping automatically. This prevents infinite loops.
interval = self.swapInterval.get()
if len(interval) > 0 and int(interval) > 0 :
self.after(int(interval),self.tick)
else : # If the interval is set to zero or the entry area is empty...
self.after(1,self.tick) # ...just update once every millisecond.
if __name__ == '__main__' :
app = Application()
app.after(100,app.tick) # tick() will continuously invoke itself as part of the main loop.
app.mainloop() |
my_list = [0,1,2,3,4]
an_equal_list = [x for x in range(5)] # [0,1,2,3,4]
multiply_list = [x * 3 for x in range(5)]
# print(multiply_list)
# print (8 % 3)
# print([n for n in range(10) if n % 2 == 0])
people_you_now = ["Rolf"," Jhon", "anna", "GREG"]
normalised_people = [person.strip().lower() for person in people_you_now]
print(normalised_people) |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 19 07:47:09 2019
@author: Osama
"""
import os
import numpy as np
def createLists(imdbDir):
labels = []
texts = []
for label_type in ['neg', 'pos']:
dir_name = os.path.join(imdbDir, label_type)
for fname in os.listdir(dir_name):
try:
if fname[-4:] == '.txt':
f = open(os.path.join(dir_name, fname), encoding="utf8")
texts.append(f.read())
f.close()
if label_type == 'neg':
labels.append(0)
else:
labels.append(1)
except:
continue
return texts, labels
def shuffleLists(texts, labels):
import random
combined = list(zip(texts, labels))
random.shuffle(combined)
texts[:], labels[:] = zip(*combined)
return texts, labels
def createCSV(texts, labels, outFile):
import csv
i=0
with open(outFile,'w', newline='', encoding='utf-8') as file:
for text,label in zip(texts,labels):
try:
mylist=[]
mylist.append(str(i))
mylist.append(str(label))
mylist.append('a')
mylist.append(text)
wr = csv.writer(file, quoting=csv.QUOTE_ALL)
wr.writerow(mylist)
i=i+1
#print(text)
except:
continue
def createCSVForTestSet(texts, labels, outFile):
import csv
i=0
with open(outFile,'w', newline='', encoding='utf-8') as file:
header=[]
header.append('id')
header.append('sentence')
wr = csv.writer(file, quoting=csv.QUOTE_ALL)
wr.writerow(header)
for text,label in zip(texts,labels):
try:
mylist=[]
mylist.append(str(i))
mylist.append(text)
wr = csv.writer(file, quoting=csv.QUOTE_ALL)
wr.writerow(mylist)
i=i+1
#print(text)
except:
continue
#loading data and preproccing
imdb_dir = r'C:\Users\Osama\Downloads\DL Workspace\Data\aclImdb'
train_dir = os.path.join(imdb_dir, 'train')
test_dir = os.path.join(imdb_dir, 'test')
#train and validation set
texts, labels = createLists(train_dir)
texts, labels=shuffleLists(texts, labels)
createCSV(texts[:20000], labels[:20000], 'train.csv')
createCSV(texts[20000:25000], labels[20000:25000], 'dev.csv')
#test set
texts, labels = createLists(test_dir)
#texts, labels=shuffleLists(texts, labels)
createCSVForTestSet(texts, labels, 'test.csv')
import pandas as pd
path='E:/output/My-Projects/Machine learning/IMDB Sentiment Analysis/train.csv'
df = pd.read_csv(path, 'utf-8')
path='E:/output/My-Projects/Machine learning/IMDB Sentiment Analysis/train.tsv'
df.to_csv(path, sep='\t', index=False, header=False)
# if you are creating test.tsv, set header=True instead of False
path='E:/output/My-Projects/Machine learning/IMDB Sentiment Analysis/dev.csv'
df = pd.read_csv(path, 'utf-8')
path='E:/output/My-Projects/Machine learning/IMDB Sentiment Analysis/dev.tsv'
df.to_csv(path, sep='\t', index=False, header=False)
path='E:/output/My-Projects/Machine learning/IMDB Sentiment Analysis/test.csv'
df = pd.read_csv(path, 'utf-8')
path='E:/output/My-Projects/Machine learning/IMDB Sentiment Analysis/test.tsv'
df.to_csv(path, sep='\t', index=False, header=True)
|
#I pledge my honor that I have abided by the Stevens Honor System.
#Jake Roux
def main():
print("This program will pull names from a file.")
print("Then it will capitalize them and put them in a new file")
infileName = input("File containing names:")
outfileName = input("Place capitalized names in this file:")
for name in infile:
CAPNAME = upper.name
print(CAPNAME, file = outfile)
infile.close()
outfile.close()
print("Capitalized names have been")
print("Written to:", outfileName)
main()
|
from musket_core import datasets,genericcsv, preprocessing, context
import numpy as np
# We create separate dataset for the siamic network - with 2 outputs
class Questions2Outputs(datasets.DataSet):
def __init__(self):
self.data = context.csv_from_data('train.csv')
self.q1 = self.data['question1'].values
self.q2 = self.data['question2'].values
self.target = self.data['is_duplicate'].values
def __len__(self):
return len(self.data)
def get_questions(self, item):
return [str(self.q1[item]),str(self.q2[item])]
def __getitem__(self, item):
return datasets.PredictionItem(item, self.get_questions(item), np.array([self.target[item]]))
@datasets.dataset_provider(origin="train.csv",kind="")
def get_train_siamic():
return Questions2Outputs()
@datasets.dataset_provider(origin="train.csv",kind="GenericDataSet")
def get_train():
return genericcsv.GenericCSVDataSet("train.csv",["question1","question2"],["is_duplicate"],[],{"question1":"as_is","question2":"as_is","is_duplicate":"binary"})
@datasets.dataset_provider(origin="test.csv",kind="GenericDataSet")
def get_test():
return genericcsv.GenericCSVDataSet("test.csv",["question1","question2"],[])
@preprocessing.dataset_preprocessor
def preprocess(inp):
return str(inp[0]) + " hello bear " + str(inp[1]) |
kelime = input("Bir Kelime Gir : ")
a = int(input("Başlangıç Sayısı Gir : "))
b = int(input("Bitiş Sayısını Gir : "))
if a > len(kelime) or b > len(kelime):
print("Kelimeden Büyük Değer Giremezsiniz!")
else:
if a <= 0 or b <= 0:
print("Sıfır Değeri Giremezsiniz")
else:
a = a-1
print("Girilen Kelime : ", kelime)
print("Kesilen Kelime : {}".format(kelime[:a]+kelime[b:])) |
from . import models
from . import report
# i could not test odoo14 Enterprise version (don't have source code of enterprise 14) but tested Odoo14 Community |
import theano.tensor as T
import lasagne
import theano
from lasagne.updates import adam
from theano.ifelse import ifelse
import numpy as np
def mean_or_zero_if_empty(losses):
return ifelse(T.eq(losses.size,0), np.array(0).astype(losses.dtype),
T.mean(losses))
def filter_unequal_1(var, targets):
valid_inds = T.nonzero(T.all(T.neq(targets,-1), axis=1))
return var[valid_inds]
def create_pred_loss_train_adv_fn(final_layer, final_adv_time,
main_loss_expression, adv_loss_expression,
adv_weight_time, learning_rate=1e-3):
in_sym = T.ftensor4()
targets_mrk = T.fmatrix()
targets_time = T.fmatrix()
out_mrk, out_time = lasagne.layers.get_output(
[final_layer, final_adv_time],
input_var=in_sym,
inputs=in_sym, deterministic=False)
out_mrk = filter_unequal_1(out_mrk, targets_mrk)
out_time = filter_unequal_1(out_time, targets_time)
valid_t_mrk = filter_unequal_1(targets_mrk, targets_mrk)
loss_mrk = main_loss_expression(out_mrk, valid_t_mrk)
valid_t_time= filter_unequal_1(targets_time, targets_time)
loss_time = main_loss_expression(out_time, valid_t_time)
class_params = lasagne.layers.get_all_params(final_layer,
trainable=True)
time_params = lasagne.layers.get_all_params(final_adv_time,
trainable=True)
only_time_params = [p for p in time_params if p not in class_params]
all_layers_mrk = lasagne.layers.get_all_layers(final_layer)
all_layers_time = lasagne.layers.get_all_layers(final_adv_time)
only_time_layers = [l for l in all_layers_time if l not in all_layers_mrk]
adv_loss_time = adv_loss_expression(out_time,
1 - valid_t_time)
total_loss_mrk = mean_or_zero_if_empty(loss_mrk) + (
adv_weight_time * mean_or_zero_if_empty(adv_loss_time))
total_loss_mrk = total_loss_mrk + (1e-5 *
lasagne.regularization.regularize_network_params(
final_layer,
lasagne.regularization.l2))
updates_mrk = adam(total_loss_mrk, class_params,
learning_rate=learning_rate)
total_loss_time = mean_or_zero_if_empty(loss_time) + 1e-5 * (
lasagne.regularization.regularize_layer_params(only_time_layers,
lasagne.regularization.l2))
updates_time = adam(total_loss_time, only_time_params,
learning_rate=learning_rate)
for updated_param in updates_time:
assert updated_param not in updates_mrk
all_updates = updates_mrk.copy()
all_updates.update(updates_time)
pred_loss_train_fn = theano.function([in_sym, targets_mrk, targets_time],
[out_mrk, out_time,
total_loss_mrk, total_loss_time],
updates=all_updates)
test_outs_mrk, test_outs_time =lasagne.layers.get_output(
[final_layer, final_adv_time],
input_var=in_sym,
inputs=in_sym, deterministic=True)
test_outs_mrk = filter_unequal_1(test_outs_mrk, targets_mrk)
test_outs_time = filter_unequal_1(test_outs_time, targets_time)
test_loss_mrk = main_loss_expression(test_outs_mrk, valid_t_mrk)
test_loss_time = main_loss_expression(test_outs_time, valid_t_time)
test_adv_loss_time = adv_loss_expression(test_outs_time,
1 - valid_t_time)
test_loss_mrk = mean_or_zero_if_empty(test_loss_mrk) + (
adv_weight_time * mean_or_zero_if_empty(test_adv_loss_time))
pred_loss_fn = theano.function([in_sym, targets_mrk, targets_time],
[test_outs_mrk, test_outs_time,
test_loss_mrk, test_loss_time, test_adv_loss_time],)
return pred_loss_train_fn, pred_loss_fn, all_updates.keys()
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
from findme.apps.student import views
urlpatterns = [
# Examples:
# url(r'^$', 'findme.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
# url(r'^admin/', include(admin.site.urls)),
url(r'^$', views.form, name='home'),
url(r'^form/$', views.form, name='form'),
url(r'^submit/$', views.submit, name='submit'),
url(r'^search/$', views.search, name='search'),
url(r'^result/$', views.result, name='result'),
url(r'^about/$', views.about, name='about'),
] |
# import hashlib
# from datetime import datetime
# from sawtooth_sdk.protobuf import transaction_pb2, batch_pb2
#
# from addressing.b4e_addressing import addresser
# from protobuf.b4e_protobuf import payload_pb2
# from sawtooth_signing import create_context
# from sawtooth_signing import CryptoFactory
# from sawtooth_signing import secp256k1
# import time
# from sawtooth_sdk.protobuf.batch_pb2 import BatchList
# import requests
#
#
# def make_set_b4e_environment(signer, timestamp):
# environment_address = addresser.ENVIRONMENT_ADDRESS
# inputs = [environment_address]
# outputs = [environment_address]
#
# action = payload_pb2.SetB4EEnvironmentAction(timestamp=timestamp)
#
# payload = payload_pb2.B4EPayload(
# action=payload_pb2.B4EPayload.SET_B4E_ENVIRONMENT,
# set_b4e_environment=action,
# timestamp=timestamp
# )
#
# payload_bytes = payload.SerializeToString()
# return _make_batch(
# payload_bytes=payload_bytes,
# inputs=inputs,
# outputs=outputs,
# transaction_signer=signer,
# batch_signer=signer)
#
#
# def _make_batch(payload_bytes,
# inputs,
# outputs,
# transaction_signer,
# batch_signer):
# transaction_header = transaction_pb2.TransactionHeader(
# family_name=addresser.FAMILY_NAME,
# family_version=addresser.FAMILY_VERSION,
# inputs=inputs,
# outputs=outputs,
# signer_public_key=transaction_signer.get_public_key().as_hex(),
# batcher_public_key=batch_signer.get_public_key().as_hex(),
# dependencies=[],
# payload_sha512=hashlib.sha512(payload_bytes).hexdigest())
# transaction_header_bytes = transaction_header.SerializeToString()
#
# transaction = transaction_pb2.Transaction(
# header=transaction_header_bytes,
# header_signature=transaction_signer.sign(transaction_header_bytes),
# payload=payload_bytes)
#
# batch_header = batch_pb2.BatchHeader(
# signer_public_key=batch_signer.get_public_key().as_hex(),
# transaction_ids=[transaction.header_signature])
# batch_header_bytes = batch_header.SerializeToString()
#
# batch = batch_pb2.Batch(
# header=batch_header_bytes,
# header_signature=batch_signer.sign(batch_header_bytes),
# transactions=[transaction])
#
# return batch
#
#
# def submit(batch):
# batch_list_bytes = BatchList(batches=[batch]).SerializeToString()
#
# try:
# headers = {'Content-Type': 'application/octet-stream'}
# res0 = requests.post(url='http://localhost:8008/batches', data=batch_list_bytes, headers=headers).json()
# link = res0['link']
# res = requests.get(link).json()
# id = res['data'][0]['id']
# timer = 0
# timeout = 10
# while (requests.get(link).json()['data'][0]['status'] != "COMMITTED"):
# time.sleep(0.3)
# timer += 1
# if timer > timeout:
# return {"msg": "Transaction isn't committed"}
#
# res2 = requests.get('http://0.0.0.0:8008/batches/' + id).json()
# transaction_id = res2['data']['header']["transaction_ids"][0]
# return {"transaction_id": transaction_id}
# except Exception as e:
# print(e)
# return {"msg : ", e}
#
#
# def create_env(signer):
# timestamp = int(datetime.utcnow().timestamp())
# batch = make_set_b4e_environment(signer, timestamp)
# return submit(batch)
#
#
# def create_signer(private_key):
# context = create_context('secp256k1')
# return CryptoFactory(context).new_signer(secp256k1.Secp256k1PrivateKey.from_hex(private_key))
#
#
# stu_public_key = "025a5477d1f0ad3780f2aa5224371dc779b38719581ae623dc665168d8e1b6ca60"
# stu_private_key = "4d8de40eba071a892c6edefa990c125a02501d230c58353aeac06e04553e8b30"
# stu_signer = create_signer(stu_private_key)
|
category_names = [
float('nan'),
'Duty Free', 'Авиабилеты', 'Автоуслуги', 'Аптеки', 'Аренда авто', 'Госсборы',
'Дом/Ремонт', 'Ж/д билеты', 'Животные', 'Искусство', 'Кино', 'Книги', 'Красота', 'Медицинские услуги',
'Музыка', 'НКО', 'Наличные', 'Образование', 'Одежда/Обувь', 'Отели', 'Развлечения', 'Разные товары',
'Рестораны', 'Связь/Телеком', 'Сервисные услуги', 'Спорттовары', 'Сувениры', 'Супермаркеты',
'Топливо', 'Транспорт', 'Турагентства', 'Фаст Фуд', 'Финансовые услуги', 'Фото/Видео', 'Цветы',
'Частные услуги'
]
# 0 - most important, 20 - least important
importance = {
float('nan'): 0,
'nan': 0,
'NaN': 0,
'Duty Free': 10,
'Авиабилеты': 10,
'Автоуслуги': 5,
'Аптеки': 1,
'Аренда авто': 4,
'Госсборы': 2,
'Дом/Ремонт': 8,
'Ж/д билеты': 8,
'Животные': 6, 'Искусство': 10, 'Кино': 10, 'Книги': 12, 'Красота': 6,
'Медицинские услуги': 1,
'Музыка': 12, 'НКО': 14,
'Наличные': 3, 'Образование': 8, 'Одежда/Обувь': 8, 'Отели': 14, 'Развлечения': 16,
'Разные товары': 8,
'Рестораны': 10, 'Связь/Телеком': 4,
'Сервисные услуги': 3, 'Спорттовары': 8, 'Сувениры': 16, 'Супермаркеты': 3,
'Топливо': 5, 'Транспорт': 4, 'Турагентства': 12, 'Фаст Фуд': 14,
'Финансовые услуги': 9, 'Фото/Видео': 14,
'Цветы': 10,
'Частные услуги': 3
}
|
import httplib
import json
import pdb
import time
class GenericClient(object):
"""
Generic client for the orchestrator
"""
def __init__(self,logger, host,port,prefix):
"""
@param host : ipAddr of the REST server
@param port : listening port of the REST server
@param prefix : determines the type of the instruction
"""
self.logger = logger
self.host = host
self.port = port
self.prefix = '/'+prefix+'/'
def send_request(self,method,action,data=None):
""" sends requests through the httplib library """
# initiate the connection
conn = httplib.HTTPConnection(self.host, self.port, timeout=3)
url = self.prefix + action
header = {}
# there is data to send
if data is not None:
# encode it in json format
data = json.dumps(data)
header['Content-Type'] = 'application/json'
try:
# send the request and get the response
conn.request(method,url,data,header)
res = conn.getresponse()
if res.status in (httplib.OK,
httplib.CREATED,
httplib.ACCEPTED,
httplib.NO_CONTENT):
return res
else:
print res.status
raise Exception
except Exception:
raise Exception
class VNFMClient(GenericClient):
def __init__(self,logger, vnfm_host, vnfm_port):
GenericClient.__init__(self, logger, vnfm_host, vnfm_port, "nfvo")
def notify_vnfm(self):
while True:
try:
self.send_request('GET', 'notifications/nfvoUP')
print ('NFVO UP notification sent to VNFM')
break
except Exception:
print ('waiting for vnfm ...')
time.sleep(1)
return
def send_VDUs_configs_to_vnfm(self, configurations):
try:
self.send_request('POST', 'faces/configuration', configurations)
print ('vnfs initial configuration sent to VNFM')
except Exception:
print ("can't send vnfs initial configuration to VNFM")
return
def update_firewall(self, firewall_vdu_id, prefix_list, mode):
try:
new_configuration = {'vdu_id':firewall_vdu_id,
'prefix_list':prefix_list,
'mode':mode}
self.send_request('POST', 'firewall/update', new_configuration)
print ('firewall new configuration sent to VNFM')
except Exception:
print ("can't send firewall new configuration to VNFM")
return
def send_scaled_service_config(self, configuration):
try:
self.send_request('POST', 'update_service', configuration)
#print ('scaled service configuration sent to VNFM')
except Exception:
print ("can't send scaled service configuration to VNFM")
return
def send_update_faces(self, router_id, faces):
data = {'router_id':router_id, 'faces':faces}
try:
self.send_request('POST', 'update_faces', data)
print ('faces new configuration sent to VNFM')
except Exception:
print ("can't send faces new configuration to VNFM")
return
def send_update_router_mode(self, target_router_mode):
try:
self.send_request('POST', 'update_router_mode', target_router_mode)
print ('update router mode cmd sent to VNFM')
except Exception:
print ("can't send router mode update notif to VNFM")
return |
import csv
import requests
from time import sleep
import datetime
from datetime import date
from bs4 import BeautifulSoup
wfile = open('./data/players_age.csv', "wb")
rfile = open('./data/players.csv', "rb")
reader = csv.reader(rfile, delimiter=';')
writer = csv.writer(wfile, delimiter=';', quotechar='"', quoting=csv.QUOTE_NONE)
rownum = 0
for row in reader:
if rownum == 0:
writer.writerow(["id", "name", "team_name", "team_id", "position", "goals", "season", "comp_id", "age"])
else:
player_id = row[0][35:]
r = requests.get("https://www.statbunker.com/players/getPlayerDetails?player_id=" +str(player_id))
soup = BeautifulSoup(r.text, 'html.parser')
elements = soup.find('tbody').find_all('td')
born = datetime.datetime.strptime(elements[3].text, "%d %b %Y").date()
today = date.today()
age = today.year - born.year - ((today.month, today.day) < (born.month, born.day))
row.append(str(age))
writer.writerow(row)
rownum += 1
if rownum % 5 == 0:
print rownum
sleep(1)
wfile.close()
rfile.close() |
from django.db import models
class Blog(models.Model):
name = models.CharField(max_length=100)
date = models.DateField(blank = "False")
desc = models.CharField(max_length = 250)
def __str__(self):
return self.name
|
from django.contrib import admin
from .models import *
# Register your models here.
admin.site.register(Session)
admin.site.register(UserMetaData)
admin.site.register(Game)
admin.site.register(MindJournalEntry)
admin.site.register(Doctor) |
#!/usr/bin/env python
from setuptools import setup
setup(
name='py_zap',
version='1.0.0',
description='Python scraper for accessing ratings from tvbythenumbers.zap2it.com',
author='sharibarboza',
author_email='barbozashari@gmail.com',
url='https://github.com/sharibarboza/py_zap',
download_url='https://github.com/sharibarboza/py_zap/archive/1.0.0.tar.gz',
keywords=['zap2it', 'ratings', 'tv'],
license='MIT License',
packages=['py_zap'],
install_requires=[
'beautifulsoup4',
'requests>=2.9.1'
]
) |
#coding:utf-8
# import pynlpir
# pynlpir.open(encoding='utf-8')
# phase='NLPIR分词系统前身为2000年发布的ICTCLAS词法分析系统'
#
# result=pynlpir.segment(phase,pos_tagging=False)
#
# for x in result:
# print result[0]
# pynlpir.close()
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
import pynlpir
pynlpir.open()
s = '因为我比较懒,所以我就只是修改了这句话,代码还是原博客的'
segments = pynlpir.segment(s)
for segment in segments:
print segment[0], '\t', segment[1]
pynlpir.close() |
import numpy as np
import imageio
import Poisson as poi
import matplotlib.pyplot as plt
from scipy import ndimage
def get_rand_mask(mask):
rand_mask = np.zeros((mask.shape[0], mask.shape[1]), dtype=bool)
for y in range(mask.shape[0]):
for x in range(mask.shape[1]):
false_n = False
true_n = False
if mask[y, x]:
if mask[y, x - 1] or mask[y, x + 1] or mask[y - 1, x] or mask[y + 1, x]:
true_n = True
if mask[y, x - 1] == False or mask[y, x + 1] == False or mask[y - 1, x] == False or mask[
y + 1, x] == False:
false_n = True
if true_n and false_n:
rand_mask[y, x] = True
return rand_mask
def inpaint(img, n, x, y):
if len(img.shape) == 3: #if statement for handling color picure
img = np.sum(img.astype(float), 2) / (3 * 255)
mask = np.zeros((img.shape[0], img.shape[1], 2), dtype=bool)
err_mask = np.zeros((img.shape[0], img.shape[1]), dtype=bool)
err_x1 = x[0]+10
err_y1 = y[0]+10
err_x2 = x[1]-10
err_y2 = y[1]-10
err_mask[err_y1:err_y2, err_x1:err_x2] = True
mask[y[0]:y[1], x[0]:x[1], 0] = True
mask[:,:,1] = get_rand_mask(mask[:,:,0])
img[err_mask[:,:]] = 0
edit_img = poi.poisson(img, n, rand='dericle', mask=mask)
return edit_img |
import csv
# Initialize csv file
fileName = input('Please enter a file name: ')
with open(fileName, 'w') as csvfile:
filewriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
while True:
filewriter.writerow([timePassed, voltage])
time.sleep(0.1)
timePassed = timePassed + 0.1
|
# Generated by Django 3.0.6 on 2020-05-21 12:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0002_auto_20200511_1518'),
]
operations = [
migrations.CreateModel(
name='PageCounter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('count', models.IntegerField(verbose_name='방문자수')),
],
),
migrations.AddField(
model_name='nation',
name='nation_point',
field=models.IntegerField(default=0),
),
]
|
# server.py
from flask import Flask, request, jsonify
import json
app = Flask(__name__)
@app.route('/', methods=['POST'])
def add():
data = request.get_json()
print(data)
with open('./data/test.json', 'w') as f:
json.dump(data, f, indent=4)
return jsonify(data)
if __name__ == '__main__':
app.debug = True
app.run(host='127.0.0.1', port=5000) |
from pyquery import PyQuery as pq
doc = pq(url='http://www.baidu.com')
print(doc('head')) |
import numpy as np
import cv2 as cv
import matplotlib.pyplot as plt
import copy as cp
def convolution(input_img, filter):
"""
Convolving an image by a filter
:param input_img: input image, uint8
:param filter: m*n filter with float values
:return: result of convolving image, caution: output is float not uint8
"""
if filter.shape[0] % 2 == 0 or filter.shape[1] % 2 == 0:
ValueError('Filter size should be odd')
# size of filter
m, n = filter.shape
# rotating filter 180 degree
filter_90_r = np.array(list(zip(*filter[::-1])))
filter_r = np.array(list(zip(*filter_90_r[::-1])))
input_img = np.float32(input_img)
# allocate an image for output
img_out = np.zeros(shape=input_img.shape, dtype=np.float32)
# pad image with zero
img_pad = np.pad(array=input_img, pad_width=[(m//2,m//2),(n//2,n//2)], mode='constant', constant_values=0)
#print(img_out.shape, img_pad.shape)
# convolving
p = 0
q = 0
for i in range(m//2, img_pad.shape[0]-(m//2)):
for j in range(n // 2, img_pad.shape[1] - (n // 2)):
#print(i,j, '---', p, q)
# put filter on position i,j
neighbour_hood = img_pad[i-(m//2):i+(m//2)+1, j-(n//2):j+(n//2)+1]
# point-wise multiplication
multi_neig = np.multiply(neighbour_hood, filter_r)
# sum of products
sum_neig = np.sum(np.sum(multi_neig))
img_out[p, q] = sum_neig
q = q + 1
q = 0
p = p + 1
#return np.uint8(img_out)
return img_out
def threshold_img(input_img, thresh_value):
"""
Threshold Image
:param input_img:
:param thresh_value:
:return:
"""
img_thresholded = np.where(input_img >= thresh_value, 255, 0)
img_thresholded = np.uint8(img_thresholded)
return img_thresholded
def next_point_in_8_neigh(b, c):
"""
Next point of 8-neighbourhood b in clockwise order
:param b: center of neighbour hood
:param c: a point in 8 neighbour hood of b
:return: next pixel in 8 neighbour hood of b after c
"""
if c[0]-1 == b[0] and c[1]+1 == b[1]:
return (b[0], b[1]-1)
if c[0] == b[0] and c[1]+1 == b[1]:
return (b[0]-1, b[1]-1)
if c[0]+1 == b[0] and c[1]+1 == b[1]:
return (b[0]-1, b[1])
if c[0]+1 == b[0] and c[1] == b[1]:
return (b[0]-1, b[1]+1)
if c[0]+1 == b[0] and c[1]-1 == b[1]:
return (b[0], b[1]+1)
if c[0] == b[0] and c[1]-1 == b[1]:
return (b[0]+1, b[1]+1)
if c[0]-1 == b[0] and c[1]-1 == b[1]:
return (b[0]+1, b[1])
if c[0]-1 == b[0] and c[1] == b[1]:
return (b[0]+1, b[1]-1)
# Boundary (border) following - Gonzales 3th edition page 796
def border_following(input_img):
"""
Following border in clockwise order
:param input_img: image with intensity 0 and 255, in only contains border
of one object, there should be no discontinuously in border and thickness
of border is one.
:return: list of position of pixels on border [(x1,y1),(x2,y2),...]
"""
output = []
# find upper most-left most pixel of border as starting point
start_point = None
print(input_img.shape)
for i in range(0, input_img.shape[0]):
for j in range(0, input_img.shape[1]):
if input_img[i, j] == 255:
start_point = (i, j)
break
if start_point is not None:
break
#print(start_point,' <')
if start_point is None:
raise ValueError('Image should have pixels with 255 intensity')
# start point
b0 = cp.copy(start_point)
c0 = (start_point[0], start_point[1]-1)
#print(input_img[b0[0], b0[1]])
#print(input_img[c0[0], c0[1]])
output.append(b0)
b = cp.copy(b0)
c = cp.copy(c0)
first_time = True
# traverse border in clockwise order
while (b[0] != b0[0] or b[1] != b0[1]) or first_time == True:
# next point on border
while True:
next_point = next_point_in_8_neigh(b=b, c=c)
if input_img[next_point[0], next_point[1]] == 0:
c = cp.copy(next_point)
else:
b = cp.copy(next_point)
break
output.append(b)
first_time = False
return output
def convert_boundary_to_grid(input_boundary_clockwise_order, space_pixels):
"""
Get traversed clockwise boundary and put a grid on it
and return new points in clockwise order
:param input_boundary_clockwise_order:
:param space_pixels: should be even, pixels between two pixels that are on grid
:return: list of point in clockwise order
"""
if space_pixels % 2 != 0:
raise ValueError('space pixel should be even')
output = []
for point in input_boundary_clockwise_order:
if point[0] % (space_pixels+1) == 0 and point[1] % (space_pixels+1) == 0:
# if point on grid
output.append(point)
else:
# if point isn't on grid find nearest point of grid
x = point[0] // (space_pixels+1)
y = point[1] // (space_pixels + 1)
# for point aroud point on the grid
neighbour_on_grid = [(x*(space_pixels+1), y*(space_pixels+1)),
(x*(space_pixels+1), (y+1)*(space_pixels+1)),
((x+1) * (space_pixels + 1), y * (space_pixels + 1)),
((x+1)*(space_pixels+1), (y+1)*(space_pixels+1))]
min_dis = np.inf
nearest_neighb = None
for neigh in neighbour_on_grid:
distance = np.abs(point[0]-neigh[0])+np.abs(point[1]-neigh[1])
if distance < min_dis:
min_dis = distance
nearest_neighb = cp.copy(neigh)
if len(output) == 0:
output.append(nearest_neighb)
else:
if output[-1][0] != nearest_neighb[0] or output[-1][1] != nearest_neighb[1]:
output.append(nearest_neighb)
return output
def smallest_order(freeman_code_list):
"""
This function finds smallest freeman code by rotation freeman code
to right and examine for smallest order
:param freemancode: should be a python list of integer number like [2,3,4, ...]
:return:
"""
extended_list = freeman_code_list + freeman_code_list
for i in range(0, len(freeman_code_list)-1):
smaller = True
for j in range(i+1, len(freeman_code_list)):
for k in range(0, len(freeman_code_list)):
if extended_list[i+k] < extended_list[j+k]:
break
elif extended_list[i+k] == extended_list[j+k]:
continue
else:
smaller = False
break
if smaller == False:
break
if smaller == True:
best_index = i
break
smallest_seq = extended_list[best_index:best_index+len(freeman_code_list)]
return smallest_seq
def freeman_code(point_on_grid_clockwise_order):
"""
This function gets point on grid that are in clockwise order
then it construct freeman code
:param point_on_grid_clockwise_order:
:return:
"""
freeman_code_seq = []
# compare position of each pixel to next pixel in sequence for finding associated
# freeman code
point = cp.copy(point_on_grid_clockwise_order)
for i in range(0, len(point)-1):
# find out where is next point located
if point[i][0] == point[i+1][0] and point[i][1] < point[i+1][1]:
freeman_code_seq.append(0)
continue
if point[i][0] > point[i+1][0] and point[i][1] < point[i+1][1]:
freeman_code_seq.append(1)
continue
if point[i][0] > point[i+1][0] and point[i][1] == point[i+1][1]:
freeman_code_seq.append(2)
continue
if point[i][0] > point[i+1][0] and point[i][1] > point[i+1][1]:
freeman_code_seq.append(3)
continue
if point[i][0] == point[i+1][0] and point[i][1] > point[i+1][1]:
freeman_code_seq.append(4)
continue
if point[i][0] < point[i+1][0] and point[i][1] > point[i+1][1]:
freeman_code_seq.append(5)
continue
if point[i][0] < point[i+1][0] and point[i][1] == point[i+1][1]:
freeman_code_seq.append(6)
continue
if point[i][0] < point[i+1][0] and point[i][1] < point[i+1][1]:
freeman_code_seq.append(7)
continue
# find smallest order(independent of origin)
freeman_code_seq_smallest = smallest_order(freeman_code_list=freeman_code_seq)
# independent of rotation
seq = [freeman_code_seq_smallest[-1]]+freeman_code_seq_smallest
new_seq = []
for i in range(1, len(seq)):
diff = seq[i] - seq[i-1]
if diff >= 0:
new_seq.append(diff)
else:
new_seq.append(8+diff)
# find smallest order(independent of origin)
new_seq = smallest_order(freeman_code_list=new_seq)
return new_seq
def draw_freeman_code(input_freeman_code, space_pixels, first_segment=2):
"""
Get freeman code and draw it.
:param freeman_code:
:param space_pixels:
:param first_segment:
:return:
"""
# initialize a big 2d array, it size will be reduced
img = np.zeros(shape=(2000, 2000), dtype=np.uint8)
cur_point = [1000, 1000]
cum_sum = first_segment
for i in range(0, len(input_freeman_code)):
cum_sum = (cum_sum + input_freeman_code[i]) % 8
pre_point = cp.copy(cur_point)
if cum_sum == 0:
cur_point[0] = cur_point[0]
cur_point[1] = cur_point[1] + (space_pixels + 1)
if cum_sum == 1:
cur_point[0] = cur_point[0] - (space_pixels + 1)
cur_point[1] = cur_point[1] + (space_pixels + 1)
if cum_sum == 2:
cur_point[0] = cur_point[0] - (space_pixels + 1)
cur_point[1] = cur_point[1]
if cum_sum == 3:
cur_point[0] = cur_point[0] - (space_pixels + 1)
cur_point[1] = cur_point[1] - (space_pixels + 1)
if cum_sum == 4:
cur_point[0] = cur_point[0]
cur_point[1] = cur_point[1] - (space_pixels + 1)
if cum_sum == 5:
cur_point[0] = cur_point[0] + (space_pixels + 1)
cur_point[1] = cur_point[1] - (space_pixels + 1)
if cum_sum == 6:
cur_point[0] = cur_point[0] + (space_pixels + 1)
cur_point[1] = cur_point[1]
if cum_sum == 7:
cur_point[0] = cur_point[0] + (space_pixels + 1)
cur_point[1] = cur_point[1] + (space_pixels + 1)
cv.line(img, (pre_point[1],pre_point[0]), (cur_point[1], cur_point[0]), color=(255,255,255), thickness=1)
# cut black area
min_x = np.inf
max_x = -1
min_y = np.inf
max_y = -1
for i in range(0, img.shape[0]):
for j in range(0, img.shape[1]):
if img[i, j] != 0:
min_x = min([i, min_x])
max_x = max([i, max_x])
min_y = min([j, min_y])
max_y = max([j, max_y])
img = img[min_x-10:max_x+10, min_y-10:max_y+10]
return img
if __name__ == '__main__':
#######################################
# part 1
#######################################
# read image
img = cv.imread(filename='.\\Images\\Freeman.jpg', flags=cv.IMREAD_GRAYSCALE)
img = img[10::, :]
# blur image
kernel = np.ones(shape=(5,5), dtype=np.float32) / (5*5)
img_blur = convolution(input_img=img, filter=kernel)
img_blur = np.uint8(img_blur)
# threshold image
img_threshold = threshold_img(input_img=img_blur, thresh_value=5)
# calculate edges
# with this filter, thickness of edge is 1
kernel = np.array([0, -1, 1], dtype=np.float32)
kernel = np.reshape(kernel, newshape=(1, 3))
img_edge_hor = convolution(input_img=img_threshold, filter=kernel)
img_edge_hor = np.uint8(np.abs(img_edge_hor))
img_edge_ver = convolution(input_img=img_threshold, filter=kernel.transpose())
img_edge_ver = np.uint8(np.abs(img_edge_ver))
img_edge = np.uint8(img_edge_ver + img_edge_hor)
img_edge = threshold_img(input_img=img_edge, thresh_value=100)
# travers boundary in clockwise order
boundary_clockwise_order = border_following(input_img=img_edge)
first_segment = [6,2,2]
# for different grid size
for idx, grid_size in enumerate([4, 10, 20]):
point_on_grid_clockwise_order = convert_boundary_to_grid(input_boundary_clockwise_order=boundary_clockwise_order,
space_pixels=grid_size)
#print(point_on_grid_clockwise_order)
# freeman code independent of origin and rotation
freeman_code_of_img = freeman_code(point_on_grid_clockwise_order=point_on_grid_clockwise_order)
print('(space between grid pixels is {}), Freeman code: \n'.format(grid_size), freeman_code_of_img)
#######################################
# part 2
#######################################
# draw freeman code
freeman_img = draw_freeman_code(input_freeman_code=freeman_code_of_img, space_pixels=grid_size, first_segment=first_segment[idx])
if idx == 0:
cv.imshow('Input Image', img)
cv.imshow('Blur Image', img_blur)
cv.imshow('Threshold Image', img_threshold)
cv.imshow('Edge Image', img_edge)
cv.imwrite('.\\2\\Input Image.jpg', img)
cv.imwrite('.\\2\\Blur Image.jpg', img_blur)
cv.imwrite('.\\2\\Threshold Image.jpg', img_threshold)
cv.imwrite('.\\2\\Edge Image.jpg', img_edge)
point_on_grid = np.zeros(shape=img_edge.shape, dtype=np.uint8)
for point in point_on_grid_clockwise_order:
point_on_grid[point[0], point[1]] = 255
cv.imshow('Points on grid- grid {}'.format(grid_size), point_on_grid)
cv.imshow('Draw Freeman Code- grid {}'.format(grid_size), freeman_img)
cv.imwrite('.\\2\\Points on grid- grid {}.jpg'.format(grid_size), point_on_grid)
cv.imwrite('.\\2\\Draw Freeman Code- grid {}.jpg'.format(grid_size), freeman_img)
cv.waitKey(0)
#plt.imshow(img_edge)
#plt.show() |
version = "0.7.3-dev"
|
import re
import json
from datetime import datetime
from rest_framework.response import Response
class CustomEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, datetime):
return o.isoformat()
return o.__dict__
def json_response(object):
return Response(json.dumps(object, cls=CustomEncoder))
def is_email(email):
return True if re.match("^\w+([-+.']\w+)*@\w+([-.]\w+)*\.\w+([-.]\w+)*$", email) else False
def log_error(error_name, errors):
try:
with open('errors.txt', 'a+') as f:
f.write(f"\n{datetime.now()} - {error_name}: {','.join(str(x) for x in errors)}")
except Exception as e:
pass |
#!/usr/bin/env python3
import argparse
from glob import glob
from operator import itemgetter
import os
import pickle
import numpy as np
import requests
from skimage.io import imread, imsave
from skimage.util import invert
from skimage.color import rgb2gray
from skimage.exposure import rescale_intensity
from tqdm import tqdm
IMAGE_FILENAME = 'val_256.tar'
IMAGE_DIR = 'images'
def download_images():
print('>> download_images() called.')
if not os.path.exists(IMAGE_FILENAME):
# https://stackoverflow.com/a/37573701
url = 'http://data.csail.mit.edu/places/places365/val_256.tar'
response = requests.get(url, stream=True)
total_size_in_bytes = int(response.headers.get('content-length', 0))
block_size = 1024
progress_bar = tqdm(total=total_size_in_bytes, unit='iB', unit_scale=True)
with open(IMAGE_FILENAME, 'wb') as file:
for data in response.iter_content(block_size):
progress_bar.update(len(data))
file.write(data)
progress_bar.close()
if total_size_in_bytes != 0 and progress_bar.n != total_size_in_bytes:
print("ERROR, something went wrong")
if not os.path.exists(IMAGE_DIR):
cmd = f'''
tar -xf {IMAGE_FILENAME};
mv val_256 {IMAGE_DIR}
'''
os.system(cmd)
def compute_scores():
print('>> compute_scores() called.')
hands = {}
for fname in glob('hands/*png'):
hand = imread(fname)
hand = rgb2gray(hand)
hand = invert(hand)
hands[fname] = hand
def compute_similarity(hand, img):
return ((hand - 0.01) * img).sum()
scores = {}
for fname in tqdm(glob(f'{IMAGE_DIR}/*jpg')):
image = rgb2gray(imread(fname))
for hand_key in hands.keys():
if hand_key not in scores:
scores[hand_key] = []
score = compute_similarity(hands[hand_key], image)
scores[hand_key].append((fname, score))
for key in scores:
scores[key] = sorted(scores[key], key=itemgetter(1), reverse=True)
with open('scores.pkl', 'wb') as f:
pickle.dump(scores, f)
def make_hourly_images():
print('>> make_hourly_images() called.')
cmd = 'mkdir -p hourly'
os.system(cmd)
scores = None
with open('scores.pkl', 'rb') as f:
scores = pickle.load(f)
n_layered = 20
for i in tqdm(range(12)):
hour = '{:02d}'.format(i)
key = f'hands/hands-{hour}.png'
layered = np.zeros((256, 256), dtype=np.float64)
candidates = [fname for fname, _ in scores[key][10:n_layered]]
for i, fname in enumerate(candidates):
image = rgb2gray(imread(fname))
layered = 0.8 * layered + 0.2 * image
layered = rescale_intensity(layered)
imsave(f'hourly/{hour}.png', layered)
def make_minutely_images():
print('>> make_minutely_images() called.')
cmd = 'mkdir -p minutely'
os.system(cmd)
scores = None
with open('scores.pkl', 'rb') as f:
scores = pickle.load(f)
for hour in range(12):
for minute in range(60):
print('>> processing {:02}:{:02} ..'.format(hour, minute))
ratio = 1 - (float(minute) / 60)
layered = np.zeros((256, 256), dtype=np.float64)
image1 = rgb2gray(imread('hourly/{:02d}.png'.format(hour)))
image2 = rgb2gray(imread('hourly/{:02d}.png'.format((hour + 1) % 12)))
layered = 0.8 * layered + 0.2 * ratio * image1
layered = 0.8 * layered + 0.2 * (1 - ratio) * image2
layered = rescale_intensity(layered)
imsave('minutely/{:02}{:02}.png'.format(hour, minute), layered)
def make_animated_output():
print('>> make_animated_output() called.')
cmd = 'convert minutely/*png clock-like-output.gif'
os.system(cmd)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Clock-like image generator')
parser.add_argument(
'command', type=str,
choices=[
'download-images',
'compute-scores',
'make-hourly-images',
'make-minutely-images',
'make-animated-output',
])
args = parser.parse_args()
if args.command == 'download-images':
download_images()
elif args.command == 'compute-scores':
compute_scores()
elif args.command == 'make-hourly-images':
make_hourly_images()
elif args.command == 'make-minutely-images':
make_minutely_images()
elif args.command == 'make-animated-output':
make_animated_output()
else:
print(f'Command "{args.command}" not supported')
|
# Author Lucas Saraiva Ferreira
import networkx as nx
import matplotlib
import matplotlib . pyplot as plt
from scipy import stats
import numpy as numpy
import powerlaw
matplotlib.use ("agg")
#Insert here your graph file path
print("Reading graph input")
GraphFile = "graphs/graph_version6.gexf"
G = nx.read_gexf(GraphFile)
#Node degree distribution
print("Calculating node degree distribution")
degrees = {}
for node in G.nodes():
single_degree = G.degree(node)
if single_degree not in degrees:
degrees[single_degree] = 0
degrees[single_degree] += 1
degrees_map = sorted(degrees.items())
#Node degree log log plot
fig = plt.figure()
ax = fig.add_subplot(111)
ax.loglog([ degree for (degree , frequency ) in degrees_map ] , [ frequency for (degree ,frequency ) in degrees_map ], 'b.')
plt.legend('Todos nos')
plt.xlabel('Grau')
plt.ylabel('Numero de nos')
plt.title ("Distribuicao do grau dos nos")
plt.xlim([2*10**0, 2*10**2])
fig.savefig ("degree_distribution.png")
#Alpha from linear regression
print("Calculating alpha coefficient")
x = []
y = []
i = 0
for degree, frequency in degrees_map:
x.append(degree)
y.append(frequency)
fit_y = powerlaw.Fit(y, discrete=True)
fit_x = powerlaw.Fit(x, discrete=True)
print(fit_y.power_law.alpha)
print(fit_x.power_law.alpha)
#slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)
#Average node degree
print("Calculating average node degree")
sum_of_edges = 0
for node in G.nodes():
sum_of_edges += G.degree(node)
average_degree = sum_of_edges/len(G)
print(average_degree)
#Components
print("Calculating number of connected components")
print(nx.number_connected_components(G))
#Overlap
print("Calculating node overlap")
overlap = {}
for node1 in G.nodes():
for node2 in G.nodes():
if(G.has_edge(node1, node2)):
n_common_nbrs = len(set(nx.common_neighbors(G, node1, node2)))
n_join_nbrs = G.degree(node1) + G.degree(node2) - n_common_nbrs - 2
result = n_common_nbrs/n_join_nbrs
if result not in overlap:
overlap[result] = 0
overlap[result] += 1
overlap_map = sorted(overlap.items())
fig2 = plt.figure()
ax2 = fig2.add_subplot(111)
ax2.loglog([ overlap_value for (overlap_value , frequency ) in overlap_map ] , [ frequency for (overlap_value ,frequency ) in overlap_map ], 'b.')
plt.legend("Todos nos")
plt.xlabel('Overlap')
plt.ylabel('Numero de nos')
plt.title ("Distribuição do overlap da vizinhança")
fig2.savefig ( "overlap.png" )
#Path
print("Removing isolates nodes and self loops")
tempgraph = G.copy();
isolate_list = list(nx.isolates(tempgraph))
self_edges = list(nx.nodes_with_selfloops(tempgraph))
#Removing isolates nodes and self loops
for loop in self_edges:
tempgraph.remove_edge(loop,loop)
for isolated_node in isolate_list:
tempgraph.remove_node(isolated_node)
print("Calculating average path and all pairs shortest path")
print(nx.average_shortest_path_length(tempgraph))
#Insert print(all_paths) here
all_paths = dict(nx.all_pairs_shortest_path_length(tempgraph, None))
paths = {}
for key,dict2 in all_paths.items():
for value in dict2.items():
if value[1] not in paths:
paths[value[1]] = 0
paths[value[1]] += 1
paths_map = sorted(paths.items())
fig2 = plt.figure()
ax2 = fig2.add_subplot(111)
ax2.plot([ path for (path , frequency ) in paths_map ] , [ frequency for (path ,frequency ) in paths_map ], 'b.')
plt.legend('Todos nos')
plt.xlabel('Tamanho do caminho')
plt.ylabel('Numero de nos')
plt.title ("Distribuição dos caminhos minimos de todos pares")
fig2.savefig ( "all_pair_paths_normal.png" )
print("Calculating Nodes Betwenness")
#Biggest betwenness values to be removed for tests
#G.remove_node("French Empire")
#G.remove_node("Kuwait")
#G.remove_node("Venezuela")
#G.remove_node("Qatar")
#G.remove_node("Haiti")
node_betwenness = nx.betweenness_centrality(G)
betwenness = {}
for key,value in node_betwenness.items():
if value not in betwenness:
betwenness[value] = 0
betwenness[value] += 1
betwenness_map = sorted(betwenness.items())
fig2 = plt.figure()
ax2 = fig2.add_subplot(111)
ax2.loglog([ degree for (degree , frequency ) in betwenness_map ] , [ frequency for (degree ,frequency ) in betwenness_map ], 'b.')
plt.legend('Todos nos')
plt.xlabel('Betwenness')
plt.ylabel('Numero de nos')
plt.title ("Distribuição do betwennes dos nós")
fig2.savefig ("betwenness_nodes.png")
print("Calculating Edges Betwenness")
edge_betwenness = nx.edge_betweenness_centrality(G)
betwenness = {}
for key,value in edge_betwenness.items():
if value not in betwenness:
betwenness[value] = 0
betwenness[value] += 1
betwenness_map = sorted(betwenness.items())
fig2 = plt.figure()
ax2 = fig2.add_subplot(111)
ax2.loglog([ degree for (degree , frequency ) in betwenness_map ] , [ frequency for (degree ,frequency ) in betwenness_map ], 'b.')
plt.legend('Todas arestas')
plt.xlabel('Betwenness')
plt.ylabel('Numero de arestas')
plt.title ("Distribuição do betwennes das arestas")
fig2.savefig("betwenness_edges.png")
print("Calculating Assortativity, Assortativity Coef and Pearson Assortativity")
assortativity = nx.degree_assortativity_coefficient(G)
assortatitvity_coef = nx.attribute_assortativity_coefficient(G, "continent")
assortativity_pearson = nx.degree_pearson_correlation_coefficient(G)
print(assortativity)
print(assortatitvity_coef)
print(assortativity_pearson)
print("Calculating Average degree connectivity")
assortativity_list = {}
assort_list = nx.average_degree_connectivity(G)
for key, value in assort_list.items():
if value not in assortativity_list:
assortativity_list[value] = 0
assortativity_list[value] += 1
assortativity_map = sorted(assortativity_list.items())
fig2 = plt.figure()
ax2 = fig2.add_subplot(111)
ax2.loglog([ degree for (degree , frequency ) in assortativity_map ] , [ frequency for (degree ,frequency ) in assortativity_map ], 'b.')
plt.legend('Todos nos')
plt.xlabel('Grau medio da Vizinhanca')
plt.ylabel('Grau')
plt.title ("Distribuição da assortatividade dos nos")
fig2.savefig ("assortativity_nodes.png") |
import os
import sys
import logging
import gdown
from zipfile import ZipFile
dataset_url = 'https://drive.google.com/u/1/uc?id=1Y5ietDYpmXSJXaqvFg3BCb3p_-Q_HlRp&export=download'
dataset_name = 'data'
gdown.download(dataset_url, output=dataset_name + '.zip', quiet=False)
zip1 = ZipFile(dataset_name + '.zip')
zip1.extractall(dataset_name)
zip1.close()
os.remove(dataset_name + '.zip')
print("Finished downloading Nuscenes data.") |
# import the necessary packages
import numpy as np
import cv2
ax=0
ay=0
bx=0
by=0
cx=0
cy=0
dx=0
dy=0
jay=0
cap = cv2.VideoCapture(1)
while(True):
jay=jay+1
ret, img = cap.read()
if jay==200:
break
hsv=cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# lower=np.array([66,207,91])#blue marker
# upper=np.array([179,255,255])#blue marker
# lower=np.array([0,55,0])#brown marker
# upper=np.array([20,255,255])#brown marker
# lower=np.array([0,106,66])#brown marker, correct waala
# upper=np.array([20,255,101])#brown marker
lower = np.array([152, 65, 88]) #pink color mask,
upper = np.array([178, 227, 255])
# lower = np.array([86, 0, 159]) #light blue color mask,
# upper = np.array([143, 255, 255])
image = cv2.inRange(hsv, lower, upper)
contours,h = cv2.findContours(image,1,2)
for cnt in contours:
approx = cv2.approxPolyDP(cnt,0.01*cv2.arcLength(cnt,True),True)
print len(approx)
# if len(approx)==5:
# print "pentagon"
# cv2.drawContours(img,[cnt],0,255,-1)
# elif len(approx)==3:
# print "triangle"
# cv2.drawContours(img,[cnt],0,(0,255,0),-1)
# elif len(approx)==4:
# print "square"
# cv2.drawContours(img,[cnt],0,(0,0,255),-1)
# elif len(approx) == 9:
# print "half-circle"
# cv2.drawContours(img,[cnt],0,(255,255,0),-1)
if len(approx) > 15:
print "circle"
cv2.drawContours(img,[cnt],0,(0,255,255),-1)
M = cv2.moments(cnt)
B1x = int(M['m10']/M['m00'])
B1y = int(M['m01']/M['m00'])
cv2.imshow('img',img)
cv2.imshow('image',image)
cv2.waitKey(0)
cv2.destroyAllWindows() |
import numpy as np
from collections import Counter
import matplotlib.pyplot as plt
class DecisionTreeNode(object):
def __init__(self, indices):
self.indices = indices
self.leftChild = None
self.rightChild = None
self.leaf = False
def setSplit(self, splitFeat, splitVal, informationGain, nObsAtSplit):
self.splitFeat = splitFeat
self.splitVal = splitVal
self.informationGain = informationGain
self.nObsAtSplit = nObsAtSplit
def setLeft(self, leftChild):
self.leftChild = leftChild
def setRight(self, rightChild):
self.rightChild = rightChild
def setLeaf(self, classes):
self.leaf = True
counts = Counter(classes)
self.maxClass = counts.most_common(1)[0][0]
self.percentMax = 1.0 * \
counts.most_common(1)[0][1] / sum(counts.values())
class DecisionTree(object):
def __init__(self, minIG=None, nPointsLeaf=1,
nFeaturesToSplit=None):
self.minIG = minIG
self.nPointsLeaf = nPointsLeaf
self.root = None
self.nFeaturesToSplit = nFeaturesToSplit
def fit(self, XTrain, yTrain):
'''
fit takes in an array of training data with n observations and k
features and a 1-d array of labels for the training data. It fits a
decision tree to the training data.
Input: XTrain as a numpy array shape (n, k) where n is the number of
observations and k is the number of features
: yTrain as a 1-d numpy array with n elements
Output: None
'''
self.XTrain = XTrain
self.yTrain = yTrain
self.nFeatures = self.XTrain.shape[1]
self.nObs = self.XTrain.shape[0]
self.splitCandidates = {}
for i in xrange(self.nFeatures):
x = self.XTrain[:, i]
xUnique = np.unique(x)
XUniqueAndSorted = np.sort(xUnique)
self.splitCandidates[i] = XUniqueAndSorted
self.root = DecisionTreeNode(indices=np.array(range(self.nObs)))
queue = [self.root]
while len(queue) > 0:
currNode = queue.pop(0)
indices = currNode.indices
split = self.chooseSplit_(indices, self.nFeaturesToSplit)
if split is not None:
splitFeat, splitVal, informationGain, nObsAtSplit = split
currNode.setSplit(splitFeat, splitVal, informationGain,
nObsAtSplit)
leftIndices = indices[np.where(
XTrain[indices, splitFeat] <= splitVal)[0]]
rightIndices = indices[np.where(
XTrain[indices, splitFeat] > splitVal)[0]]
leftChild = DecisionTreeNode(indices=leftIndices)
rightChild = DecisionTreeNode(indices=rightIndices)
currNode.setLeft(leftChild)
currNode.setRight(rightChild)
queue.append(leftChild)
queue.append(rightChild)
else:
currNode.setLeaf(self.yTrain[indices])
def gini_(self, classes):
'''
Calculates the Gini Impurity metric for a given set of class labels
(https://en.wikipedia.org/wiki/Decision_tree_learning#Gini_impurity)
Input: classes as a numpy array of class labels
Output: Gini impurity defined as SUM over i [p_i * (1 - p_i)]
'''
class_labels = np.unique(classes)
n_data_points = len(classes)
probs = [1.0 * np.sum(classes == i) /
n_data_points for i in class_labels]
gini = 0
for p_i in probs:
gini += p_i * (1 - p_i)
return gini
def informationGain_(self, classesBefore, classesAfter):
'''
Calculates the information gain from splitting a set of class labels
into two different sets. Information gain is the decrease in Gini
Impurity achieved by splitting the class labels.
Input: classesBefore is a numpy array of the classes, before splitting
: classesAfter is a list of two numpy arrays. Each numpy array
contains the class labels of one split of the original class
labels
Output: information gain, defined as the Gini impurity of the class
labels prior to splitting, minus the weighted average of Gini
impurities of the splits
'''
splitA = classesAfter[0]
splitB = classesAfter[1]
nPreSplit = len(classesBefore)
nSplitA = len(splitA)
nSplitB = len(splitB)
weightSplitA = 1.0 * nSplitA / nPreSplit
weightSplitB = 1.0 * nSplitB / nPreSplit
giniPreSplit = self.gini_(classesBefore)
giniSplitA = self.gini_(splitA)
giniSplitB = self.gini_(splitB)
giniSplits = weightSplitA * giniSplitA + weightSplitB * giniSplitB
informationGain = giniPreSplit - giniSplits
return informationGain
def chooseSplit_(self, indices, nFeaturesToSplit=None):
'''
Input: numpy array of indices to consider for a given split
Output: list - First element: is the feature to split on
- Second: is the value on which to split
- Third: is Information Gain at split
- Fourth: is number of data points at the split
'''
XAtSplit = self.XTrain[indices, :]
yAtSplit = self.yTrain[indices]
maxInformationGain = 0
splitFeat = None
splitVal = None
if len(indices) <= self.nPointsLeaf:
return None
if nFeaturesToSplit is None:
featuresToTry = range(self.nFeatures)
else:
nFeatures = self.XTrain.shape[1]
featuresToTry = np.random.choice(range(nFeatures),
size=nFeaturesToSplit,
replace=False)
# loop over all features being checked for splits
for feat in featuresToTry:
# array of unique, sorted values to try splitting on
splitsToTry = self.splitCandidates[feat]
# loop over all candidate splits for that feature
for split in splitsToTry:
indicesLeft = np.where(XAtSplit[:, feat] <= split)[0]
indicesRight = np.where(XAtSplit[:, feat] > split)[0]
classes = yAtSplit
classesLeft = yAtSplit[indicesLeft]
classesRight = yAtSplit[indicesRight]
informationGain = self.informationGain_(classes, [classesLeft,
classesRight]
)
if informationGain > maxInformationGain:
maxInformationGain = informationGain
splitFeat = feat
splitVal = split
if maxInformationGain == 0:
return None
return [splitFeat, splitVal, maxInformationGain, len(indices)]
def predict(self, X):
'''
Predicts class labels for a given input dataset
Input: X as numpy array with (n, k) shape where n
is the number of observations and k is the
number of features
Output: predictions as n x 2 numpy array --
first column are the predicted class labels
corresponding to each row of X
second column is the degree of certainty of the
prediction, defined as the percentage of the
labels in the leaf that match the most common
class
'''
preds = np.zeros([X.shape[0], 2])
for i, x in enumerate(X):
preds[i, :] = self.predictOne(x)
return preds
def predictOne(self, x):
'''
Predicts class label for one input row
Input: x as a (1,k) numpy array where k is the number
of features
Output:
predictions as a (2, ) numpy array
first column is the predicted label
second column is the degree of certainty of the
prediction, defined as the percentage of the
labels in the leaf that match the most common
class
'''
currNode = self.root
while not currNode.leaf:
splitFeat = currNode.splitFeat
splitVal = currNode.splitVal
if x[splitFeat] <= splitVal:
currNode = currNode.leftChild
else:
currNode = currNode.rightChild
return np.array([currNode.maxClass, currNode.percentMax])
if __name__ == "__main__":
x = np.arange(0, 1000) / 1000.
x2 = x.copy()
y = x.copy() + np.random.randn(1000) / 30
y2 = x.copy() + 0.25 + np.random.randn(1000) / 30
X = np.vstack([np.hstack([x, x2]), np.hstack([y, y2])]).T
y = np.array([0] * 1000 + [1] * 1000)
dt = DecisionTree()
dt.fit(X, y)
|
import os
import asyncio
import time
from async_file_manager import AsyncFile
async def get_python_filenames(startdir):
for dir, _, filenames in os.walk(startdir):
for filename in filenames:
if filename.endswith(".py"):
yield os.path.join(dir, filename)
def process_file(filename):
counter = 0
with open(filename, encoding="utf-8") as f:
for _ in f:
counter += 1
return counter
async def process_file_async(filename):
counter = 0
async with AsyncFile(filename) as f:
content = await f.read()
counter += len(content.splitlines())
#async for _ in f:
# counter += 1
return counter
async def producer(q, path):
async for f in get_python_filenames(path):
await q.put(f)
await q.put(None)
async def consumer(q):
counter = 0
while True:
filename = await q.get()
if filename is not None:
lines = await process_file_async(filename)
counter += lines
else:
await q.put(None)
return counter
async def main_async(path):
q = asyncio.Queue(100)
tasks = [consumer(q) for i in range(32)]
await producer(q, path)
res = await asyncio.gather(*tasks)
return sum(res)
async def main_sync(path):
counter = 0
async for f in get_python_filenames(path):
counter += process_file(f)
return counter
path = "."
start = time.time()
res = asyncio.run(main_sync(path))
end = time.time()
print(f"sync time: {end-start}")
print(f"res = {res}")
start = time.time()
res = asyncio.run(main_async(path))
end = time.time()
print(f"async time: {end-start}")
print(f"res = {res}") |
# -*- coding: utf-8 -*-
"""Issues that can be found while scraping MDN pages."""
from __future__ import unicode_literals
from collections import OrderedDict
# Issue severity
WARNING = 1
ERROR = 2
CRITICAL = 3
SEVERITIES = {
WARNING: 'Warning',
ERROR: 'Error',
CRITICAL: 'Critical',
}
# Issue slugs, severity, brief templates, and long templates
# This was a database model, but it was cumbersome to update text with a
# database migration, and changing slugs require code changes as well.
ISSUES = OrderedDict((
('bad_json', (
CRITICAL,
'Response from {url} is not JSON',
'Actual content:\n{content}')),
('cell_out_of_bounds', (
ERROR,
'Cell ranges outside of the compatibility table',
'The cell expands past the bounds of the compatibility table. If it'
' has a rowspan or colspan, in-bound cells will be applied.')),
('compatgeckodesktop_unknown', (
ERROR,
'Unknown Gecko version "{version}"',
'The importer does not recognize this version for CompatGeckoDesktop.'
' Change the MDN page or update the importer.')),
('compatgeckofxos_override', (
ERROR,
'Override "{override}" is invalid for Gecko version "{version}".',
'The importer does not recognize this override for CompatGeckoFxOS.'
' Change the MDN page or update the importer.')),
('compatgeckofxos_unknown', (
ERROR,
'Unknown Gecko version "{version}"',
'The importer does not recognize this version for CompatGeckoFxOS.'
' Change the MDN page or update the importer.')),
('exception', (CRITICAL, 'Unhandled exception', '{traceback}')),
('extra_cell', (
ERROR,
'Extra cell in compatibility table row.',
'A row in the compatibility table has more cells than the header'
' row. It may be the cell identified in the context, a different'
' cell in the row, or a missing header cell.')),
('failed_download', (
CRITICAL, 'Failed to download {url}.',
'Status {status}, Content:\n{content}')),
('feature_header', (
WARNING,
'Expected first header to be "Feature"',
'The first header is "{header}", not "Feature"')),
('footnote_feature', (
ERROR,
'Footnotes are not allowed on features',
'The Feature model does not include a notes field. Remove the'
' footnote from the feature.')),
('footnote_gap', (
ERROR,
'There are unexpected elements in the footnote section',
'The footnotes parser expects only <p> and <pre> sections in the'
' footnotes. Check for incorrect <div> wrapping and other issues.')),
('footnote_missing', (
ERROR,
'Footnote [{footnote_id}] not found.',
'The compatibility table has a reference to footnote'
' "{footnote_id}", but no matching footnote was found. This may'
' be due to parse issues in the footnotes section, a typo in the MDN'
' page, or a footnote that was removed without removing the footnote'
' reference from the table.')),
('footnote_multiple', (
ERROR,
'Only one footnote allowed per compatibility cell.',
'The API supports only one footnote per support assertion. Combine'
' footnotes [{prev_footnote_id}] and [{footnote_id}], or remove'
' one of them.')),
('footnote_no_id', (
ERROR,
'Footnote has no ID.',
'Footnote references, such as [1], are used to link the footnote to'
' the support assertion in the compatibility table. Reformat the MDN'
' page to use footnote references.')),
('footnote_unused', (
ERROR,
'Footnote [{footnote_id}] is unused.',
'No cells in the compatibility table included the footnote reference'
' [{footnote_id}]. This could be due to a issue importing the'
' compatibility cell, a typo on the MDN page, or an extra footnote'
' that should be removed from the MDN page.')),
('halt_import', (
CRITICAL,
'Unable to finish importing MDN page.',
'The importer was unable to finish parsing the MDN page. This may be'
' due to an unknown HTML tag, nested <code> or <pre> elements, or'
' other unexpected content.')),
('inline_text', (
ERROR,
'Unknown inline support text "{text}".',
'The API schema does not include inline notes. This text needs to be'
' converted to a footnote, converted to a support attribute (which'
' may require an importer update), or removed.')),
('kumascript_wrong_args', (
ERROR,
'Bad argument count in KumaScript {kumascript} in {scope}.',
'The importer expected {name} to have {arg_spec}, but it had'
' {arg_count}')),
('no_data', (
CRITICAL,
'No data was extracted from the page.',
'The page appears to have data, but nothing was extracted. Check for'
' header sections wrapped in a <div> or other element. (Context'
' will probably not highlight the issue)')),
('missing_attribute', (
ERROR,
'The tag <{node_type}> is missing the expected attribute {ident}',
'Add the missing attribute or convert the tag to plain text.')),
('skipped_content', (
WARNING,
'Content will not be imported.',
'This content will not be imported into the API.')),
('skipped_h3', (
WARNING,
'<h3>{h3}</h3> was not imported.',
'<h3> subsections are usually prose compatibility information, and'
' anything after an <h3> is not parsed or imported. Convert to'
' footnotes or move to a different <h2> section.')),
('spec_h2_id', (
WARNING,
'Expected <h2 id="Specifications">, actual id={h2_id}',
'Fix the id so that the table of contents, other feature work.')),
('spec_h2_name', (
WARNING,
'Expected <h2 name="Specifications">, actual name={h2_name}',
'Fix or remove the name attribute.')),
('spec_mismatch', (
ERROR,
'SpecName({specname_key}, ...) does not match'
' Spec2({spec2_key}).',
'SpecName and Spec2 must refer to the same mdn_key. Update the MDN'
' page.')),
('specname_blank_key', (
ERROR,
'KumaScript SpecName has a blank key',
'Update the MDN page to include a valid mdn_key')),
('specname_converted', (
WARNING,
'Specification name should be converted to KumaScript',
'The specification "{original}" should be replaced with the KumaScript'
' {{{{SpecName({key})}}}}')),
('specname_not_kumascript', (
ERROR,
'Specification name unknown, and should be converted to KumaScript',
'Expected KumaScript {{{{SpecName(key, subpath, name)}}}}, but got'
' text "{original}".')),
('specname_omitted', (
WARNING,
'Expected KumaScript SpecName(), got nothing',
'Expected KumaScript {{{{SpecName(key, subpath, name)}}}}, but got'
' no text. Fix or remove empty table row.')),
('spec2_converted', (
WARNING,
'Specification status should be converted to KumaScript',
'Expected KumaScript {{{{Spec2("{key}")}}}}, but got text'
' "{original}".')),
('spec2_omitted', (
WARNING,
'Expected KumaScript Spec2(), got nothing',
'Change to Spec2(mdn_key), using the mdn_key from the SpecName()'
' KumaScript, or remove empty table row.')),
('tag_dropped', (
WARNING,
'HTML element <{tag}> (but not wrapped content) was removed.',
'The element <{tag}> is not allowed in the {scope} scope, and was'
' removed. You can remove the tag from the MDN page (source mode is'
' recommended) to remove the warning.')),
('unexpected_attribute', (
WARNING,
'Unexpected attribute <{node_type} {ident}="{value}">',
'For <{node_type}>, the importer expects {expected}. This unexpected'
' attribute will be discarded.')),
('unexpected_kumascript', (
ERROR,
'KumaScript {kumascript} was not expected in {scope}.',
'The KumaScript {name} appears in a {scope}, but is only expected in'
' {expected_scopes}. File a bug, or convert the MDN page to not use'
' this KumaScript macro here.')),
('unknown_browser', (
ERROR,
'Unknown Browser "{name}".',
'The API does not have a browser with the name "{name}".'
' This could be a typo on the MDN page, or the browser needs to'
' be added to the API.')),
('unknown_kumascript', (
ERROR,
'Unknown KumaScript {kumascript} in {scope}.',
'The importer has to run custom code to import KumaScript, and it'
' hasn\'t been taught how to import {name} when it appears in a'
' {scope}. File a bug, or convert the MDN page to not use this'
' KumaScript macro.')),
('unknown_spec', (
ERROR,
'Unknown Specification "{key}".',
'The API does not have a specification with mdn_key "{key}".'
' This could be a typo on the MDN page, or the specification needs to'
' be added to the API.')),
('unknown_version', (
ERROR,
'Unknown version "{version}" for browser "{browser_name}"',
'The API does not have the version "{version}" for browser'
' "{browser_name}" (id {browser_id}, slug "{browser_slug}").'
' This could be a typo on the MDN page, or the version needs to'
' be added to the API.')),
))
UNKNOWN_ISSUE = (
CRITICAL, 'Unknown Issue', "This issue slug doesn't have a description.")
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Course',
fields=[
('cNum', models.CharField(max_length=20, serialize=False, primary_key=True)),
('cName', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Department',
fields=[
('dNum', models.CharField(max_length=20, serialize=False, primary_key=True)),
('dName', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='SC',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('Score', models.FloatField()),
],
),
migrations.CreateModel(
name='Student',
fields=[
('sNum', models.CharField(max_length=20, serialize=False, primary_key=True)),
('sName', models.CharField(max_length=100)),
('sAge', models.IntegerField()),
('sSex', models.CharField(max_length=10)),
('sDepartment', models.ForeignKey(to='app0.Department')),
],
),
migrations.CreateModel(
name='Teacher',
fields=[
('tNum', models.CharField(max_length=20, serialize=False, primary_key=True)),
('tName', models.CharField(max_length=100)),
('tAge', models.IntegerField()),
('tSex', models.CharField(max_length=10)),
('tDepartment', models.ForeignKey(to='app0.Department')),
],
),
migrations.AddField(
model_name='sc',
name='Stu',
field=models.ForeignKey(to='app0.Student'),
),
migrations.AddField(
model_name='sc',
name='Tea',
field=models.ForeignKey(to='app0.Teacher'),
),
migrations.AddField(
model_name='course',
name='cTeacher',
field=models.ForeignKey(to='app0.Teacher'),
),
]
|
import cv2 as cv
def nothing(x):
pass
source = cv.imread("./../sheep.jpg")
a = 2
b = 2
width ,height ,channl = source.shape
cv.namedWindow("image",cv.WINDOW_AUTOSIZE)
switch = "Alpha"
cv.createTrackbar(switch,"image",0,4,nothing)
cv.createTrackbar("parameterA","image",0,2,nothing)
cv.createTrackbar("parameterB","image",0,2,nothing)
while(1):
k = cv.waitKey(1)
if k == ord('q'):
break
s = cv.getTrackbarPos(switch,"image")
a = cv.getTrackbarPos("parameterA", "image")+1
b = cv.getTrackbarPos("parameterB", "image")+1
if s == 0:
cv.imshow("image", source)
elif s == 1:
result1 = cv.cvtColor(source,cv.COLOR_BGR2RGB)
result11 = cv.resize(result1,None,fx=a,fy=b,interpolation=cv.INTER_LINEAR)
cv.imshow("image",result11)
elif s == 2:
result2 = cv.cvtColor(source,cv.COLOR_BGR2HSV)
result22 = cv.resize(result2,None,fx=a,fy=b , interpolation=cv.INTER_LINEAR)
cv.imshow('image',result22)
elif s == 3:
result3 = cv.cvtColor(source,cv.COLOR_BGR2HLS)
result33 = cv.resize(result3,None,fx=a,fy=b , interpolation=cv.INTER_LINEAR)
cv.imshow('image',result33)
else:
result4 = cv.cvtColor(source,cv.COLOR_BGR2YUV)
result44 = cv.resize(result4,None,fx=a,fy=b , interpolation=cv.INTER_LINEAR)
cv.imshow('image',result44)
cv.destroyAllWindows()
|
from Account import *
from get_info import *
from sql import *
# *args passes it into a list
def getData(*args):
accounts = []
for acc_number in args:
account = get_all_info(acc_number)
accounts.append(account)
return accounts
#0 = ID, 1 = Account_number, 2 = card_number, 3=account balance
def mergeAccount(*args):
account_details = getData(args)
bal = 0
for account in account_details:
bal += account[3]
newAccount = Base_Account()
account_num, account_cards, account_bal = newAccount.get_all_info()
account_bal = bal
newAccount.load(account_num, account_cards, account_bal)
conn,cursor = create_connection("./accounts.db")
create_table(conn,cursor)
save_account(conn,cursor,account_num, account_cards, account_bal)
for account in account_details:
delete_account(conn,cursor,account)
conn.close()
# use main account number (main account to be merged into)
# + the $ in the accounts
# TO DELETE: account that is merged from
return account_num
# push mergedAccount into db, not sure how to do that again |
from . import *
__all__ = ["apiclientbase",
"application_definition",
"constants",
"datadog_metrics",
"history_manager",
"marathon",
"mesosagent",
"mesosmaster",
"poller",
"rule_manager",
"scaler",
"settings",
"utils"]
|
"""
This module contains code related to
Think Python, 2nd Edition
by Allen Downey
http://thinkpython2.com
This is to complete the exercises in Chapter 11: Dictionaries in Think Python 2
Note: Although this is saved in a .py file, code was run on an interpreter to get results
Note: Using Python 3.9.0
"""
def has_duplicates(a_sequence):
"""
Determine if any element appears more than once in a sequence.
Simple version using a for loop.
t: sequence
return: boolean; True if has more than one element in a sequence, false otherwise
"""
a_dictionary = {}
for x in a_sequence:
if x in a_dictionary:
return True
a_dictionary[x] = True
return False
def has_duplicates2(a_sequence):
"""
Determine if any element appears more than once in a sequence.
Faster version using a set.
t: sequence
return: boolean; True if element appears more than once in a sequence, false otherwise
"""
return len(set(a_sequence)) < len(a_sequence)
if __name__ == '__main__':
t = [1, 2, 3]
print(has_duplicates(t))
t.append(1)
print(has_duplicates(t))
t = [1, 2, 3]
print(has_duplicates2(t))
t.append(1)
print(has_duplicates2(t)) |
#----Constnates----#
TRIGLICERIDOS = "Ingrese su nivel de trigliceridos"
HOMOCISTEINA = "Ingrese su nivel de homocisteina"
trigliceridos = float (input (TRIGLICERIDOS))
homocisteina = float (input (HOMOCISTEINA))
if (homocisteina >=2 and homocisteina <15):
print("El nivel de su homocisteina es optimo")
elif (homocisteina >=15 and homocisteina < 30):
print("El nivel de su homocisteina es sobre el limite optimo")
elif (homocisteina >=30 and homocisteina < 100):
print("El nivel de su homocisteina es alto")
else:
print("El nivel de su homocisteina es muy alto")
if (trigliceridos < 150):
print("El nivel de su trigliceridos es optimo")
elif (trigliceridos>=150 and trigliceridos < 199):
print("El nivel de su trgliceridos es sobre el limite optimo")
elif ( trigliceridos>=200 and trigliceridos< 499):
print("El nivel de su trigliceridos es alto")
else:
print("El nivel de su trigliceridos muy alto")
|
"""
Chicago Heatmap Generation Main Module.
Crawls crime data from database table and converts it to a heatmap.
"""
__author__ = 'Udo Schlegel'
import numpy as np
import pandas as pd
from sqlalchemy import create_engine, Column, String, Integer
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.dialects.postgresql import BYTEA
from sqlalchemy.orm import sessionmaker
from sqlalchemy.schema import UniqueConstraint
DEBUG = False
engine = create_engine(
"", # Database connection
connect_args={'sslmode': 'require'}
)
Base = declarative_base(engine)
def get_table(table_name):
"""Crawl the table information from the DB."""
class Crimes(Base):
"""Class for the Crimes table."""
__tablename__ = table_name
__table_args__ = {"autoload": True}
return Crimes
def loadSession():
"""Load database session to crawl table info."""
Session = sessionmaker(bind=engine)
session = Session()
return session
def create_image_table(name):
"""Crawl the table information from the DB."""
class CrimesImages(Base):
"""Class for the Crimes Images table."""
__tablename__ = name
id = Column(Integer, primary_key=True)
image = Column(BYTEA)
woy = Column(Integer)
year = Column(Integer)
crime = Column(String)
__table_args__ = (UniqueConstraint('woy', 'year', 'crime'),)
return CrimesImages
session = loadSession()
table_name = "chicago_all_crimes_images_weeks_grayscale"
CrimesImages_ = create_image_table(table_name)
Base.metadata.create_all(engine)
def createHeatmaps(table):
"""
Create heatmaps with the data from the table.
Args:
table - table to get the data from
"""
print("-"*5, "Started", "-"*5)
sql_ = """
SELECT
to_char(date, 'YYYY') as year,
to_char(date, 'MM') as month,
to_char(date, 'DD') as day,
to_char(date, 'WW') as woy,
x_coordinate,
y_coordinate
FROM {}
ORDER BY year, month, day;""".format(table)
df = pd.read_sql(sql_, engine)
df = df[(df != 0.0).all(1)]
df = df.dropna()
data_x_max = df["x_coordinate"].max()
data_x_min = df["x_coordinate"].min()
data_y_max = df["y_coordinate"].max()
data_y_min = df["y_coordinate"].min()
granularity = 32
image_array = np.zeros((granularity, granularity))
old_woy = -1
old_year = -1
count = 0
for index, row in df.iterrows():
if old_woy == -1:
old_woy = row["woy"]
if old_year == -1:
old_year = row["year"]
if old_woy != row["woy"]:
if DEBUG:
print("Week of the year:", row["woy"], "/", row["year"],
"Amount of crimes:", count)
image_array = np.ravel(image_array)
new_image = CrimesImages_(image=image_array, woy=old_woy,
year=old_year)
session.add(new_image)
image_array = np.zeros((granularity, granularity))
old_year = row["year"]
old_woy = row["woy"]
count = 0
pos_n = int((row["x_coordinate"] - data_x_min) /
(data_x_max - data_x_min) * (granularity - 1))
pos_m = int((row["y_coordinate"] - data_y_min) /
(data_y_max - data_y_min) * (granularity - 1))
image_array[pos_n, pos_m] = image_array[pos_n, pos_m] + 1
count += 1
print("-"*5, "Finished", "-"*5)
session.commit()
if __name__ == "__main__":
table = "chicago_crimes"
createHeatmaps(table)
|
from datetime import datetime
from django.contrib.auth.models import User
from django.dispatch import receiver
from django.core.urlresolvers import reverse
from .serializers import UserDailyInputSerializer
from quicklook.tasks import generate_quicklook
from .custom_signals import user_input_post_save,user_input_notify
from .tasks import notify_admins_task
from progress_analyzer.tasks import set_pa_report_update_date
from hrr.tasks import create_hrrdata
from .models import DailyUserInputStrong
@receiver(user_input_post_save, sender=UserDailyInputSerializer)
def create_or_update_quicklook(sender, **kwargs):
request = kwargs.get('request')
from_date = kwargs.get('from_date')
from_date_str = kwargs.get('from_date').strftime("%Y-%m-%d")
to_date_str = kwargs.get('to_date').strftime("%Y-%m-%d")
generate_quicklook.delay(request.user.id,from_date_str,to_date_str)
if from_date != datetime.now().date():
# if updated user input is not for today (some historical date) then
# we have to update all the PA report from that date. So we need to record
# this date in database and update PA later as a celery task
set_pa_report_update_date.delay(
request.user.id,
from_date_str
)
@receiver(user_input_notify, sender=UserDailyInputSerializer)
def notify_admins(sender,instance=None, created=False,**kwargs):
'''
Send email to all the admin users with the admin link to
newly created instance or updated instance
'''
if instance:
request = kwargs.get('request')
admin_users_email = [u.email for u in User.objects.filter(is_staff=True)]
info = (instance._meta.app_label, instance._meta.model_name)
instance_admin_url = request.build_absolute_uri('/').strip("/")+\
reverse('admin:%s_%s_change' % info, args=(instance.pk,))
instance_meta = {
'first_name':instance.user.first_name,
'last_name':instance.user.last_name,
'username':instance.user.username,
'created_at':instance.created_at.strftime("%b %d, %Y"),
'instance_url':instance_admin_url,
'created':created
}
notify_admins_task.delay(admin_users_email,instance_meta)
@receiver(user_input_post_save, sender=UserDailyInputSerializer)
def create_or_update_hrr(sender, **kwargs):
request = kwargs.get('request')
# obj = DailyUserInputStrong.objects.filter(
# user_input__user=request.user,user_input__created_at=kwargs.get('to_date'))
# print(obj,"user input object created or not")
# from_date = kwargs.get('from_date')
# from_date_str = kwargs.get('from_date').strftime("%Y-%m-%d")
to_date_str = kwargs.get('to_date').strftime("%Y-%m-%d")
# print(from_date_str,"from date")
# print(to_date_str,"to date str")
create_hrrdata.delay(request.user.id,to_date_str,to_date_str)
|
# coding: utf-8
# In[10]:
url = 'https://courses.illinois.edu/schedule/2017/spring'
from IPython.display import HTML
HTML('<iframe src={0} width=800 height=400> </iframe>'.format(url))
import requests
page = requests.get(url)
html = page.content
from bs4 import BeautifulSoup
soup = BeautifulSoup(html, 'lxml')
# In[11]:
other_url = 'https://courses.illinois.edu'
links = []
tables = soup.find_all('table')
tb = soup.find(id='term-dt').tbody
for row in tb.find_all('a'):
links.append(other_url + row.get('href'))
# In[12]:
all_urls = []
for l in links:
page = requests.get(l)
html = page.content
soup = BeautifulSoup(html, 'lxml')
tb = soup.find(id='default-dt').tbody
for row in tb.find_all('a'):
all_urls.append(other_url + row.get('href'))
print(other_url + row.get('href'))
# In[34]:
import re
import json
from lxml import html
import sys
file = open('roomlist.txt','w')
locations = []
for l in all_urls:
page = requests.get(l)
html = page.content
soup = BeautifulSoup(html, 'lxml')
tables = soup.find_all('table')
script_tag = soup.find(type='text/javascript')
script_txt = script_tag.contents[0]
pattern = re.compile(r'(\{[^}]+\})')
match = re.search(pattern, script_txt)
json_txt = json.loads(match.group(0))
temp = json_txt['location']
content = temp.split("</div>")[0]
content = content.split(">")[1]
#content = html.fromstring(temp).xpath('//div/text()'
if content not in locations:
print(content)
locations.append(content)
file.write(content + '\n')
file.close()
# In[ ]:
|
import cv2 as cv
import numpy as np
def nothing(x):
pass
img = cv.imread("./../sheep.jpg")
cv.namedWindow('image')
cv.createTrackbar("parameterA","image",100,255,nothing)
cv.createTrackbar("parameterB","image",200,300,nothing)
switch = "0:OFF\n1:ON"
cv.createTrackbar(switch,"image",0,1,nothing)
while(1):
k = cv.waitKey(1)
if k == ord('q'):
break
a = cv.getTrackbarPos("parameterA","image")
b = cv.getTrackbarPos("parameterB","image")
s = cv.getTrackbarPos(switch,"image")
if s == 0:
cv.imshow("image",img)
else:
result = cv.Canny(img,a,b)
cv.imshow("image",result)
cv.destroyAllWindows() |
import numpy as np
import matplotlib.pyplot as plt
import math
import networkx as nx
import pylab
from Cython import inline
class DataHandle:
diz_historical_stocks = {}
diz_historical_returns = {}
corr_network = nx.Graph()
def __init__(self):
f = open("./data/diz_historical_6_may_2016.dat", 'r')
while True:
next_line = f.readline()
if not next_line:
break
line = next_line.split("_-_")
#公司标注
sym = line[0]
#时间序列,元素为string “时间 价格”
dataline = line[1:]
dataline.sort(reverse=False)
#元祖时间序列,元素为tuple(时间,价格)
DataTupleLine = []
for l in dataline:
DataTupleLine.append(tuple(l.split(" ")))
#股票价格历史字典,元素为 标志:元祖时间序列
self.diz_historical_stocks[sym] = DataTupleLine
f.close()
for sym in self.diz_historical_stocks.keys():
DataTupleLine2 = self.diz_historical_stocks[sym]
return_line = []
if len(DataTupleLine2) < 273:
continue
for i in range(1, 273):
ReturnData = math.log(float(DataTupleLine2[i][1])) - math.log(float(DataTupleLine2[i-1][1]))
#时间收益元祖列,元素为(时间,收益)
return_line.append((DataTupleLine[i][0], ReturnData))
#历史收益字典,元素为 标志:时间收益元祖列
self.diz_historical_returns[sym]=return_line
#展示股票收益
def return_show(self,sym):
plt.figure(num=1, figsize=(20, 15),dpi=80)
plt.xlabel("Date")
plt.ylabel("Return of"+sym)
x=[]
y=[]
for d in self.diz_historical_returns[sym]:
x.append(d[0])
y.append(d[1])
plt.plot(x,y,linestyle='-',alpha=0.5,color='b')
plt.xticks(x[::10],rotation=90)
plt.show()
#平均值
def mean(self,X):
m=0.0
for i in X:
m=m+i
return m/len(X)
#协方差
def covariance(self,X,Y):
c=0.0
m_X=self.mean(X)
m_Y=self.mean(Y)
for i in range(len(X)):
c=c+(X[i]-m_X)*(Y[i]-m_Y)
return c/len(X)
#皮尔逊相关系数
def pearson(self,X,Y):
return self.covariance(X,Y)/(self.covariance(X,X)**0.5 * self.covariance(Y,Y)**0.5)
#以皮尔逊系数得出两个股票的相关性
def stocks_corr_coeff(self,sym1, sym2):
if sym1 == sym2:
return 1
l1 = []
l2 = []
#h1,h2为时间收益元祖列 【(时间,收益),()】
h1=self.diz_historical_returns[sym1]
h2=self.diz_historical_returns[sym2]
for d1 in h1:
l1.append(d1[1])
for d2 in h2:
l2.append(d2[1])
return self.pearson(l1, l2)
#计算距离
def getNet(self):
num_companies = len(self.diz_historical_returns.keys())
for i1 in range(num_companies - 1):
for i2 in range(i1 + 1, num_companies):
#获得两只股票标志
stock1 = list(self.diz_historical_returns.keys())[i1]
stock2 = list(self.diz_historical_returns.keys())[i2]
#计算距离
metric_distance = math.sqrt(2 * (1.0 - self.stocks_corr_coeff(stock1, stock2)))
#建立边
self.corr_network.add_edge(stock1, stock2, weight=metric_distance)
def showNetMST(self):
tree_seed = "UTX"
N_new = []
E_new = []
N_new.append(tree_seed)
#prinm算法,最小生成树
while len(N_new) < self.corr_network.number_of_nodes():
min_weight = 10000000.0
for n in N_new:
for n_adj in self.corr_network.neighbors(n):
if not n_adj in N_new:
if self.corr_network[n][n_adj]['weight'] < min_weight:
min_weight = self.corr_network[n][n_adj]['weight']
min_weight_edge = (n, n_adj)
n_adj_ext = n_adj
E_new.append(min_weight_edge)
N_new.append(n_adj_ext)
# generate the tree from the edge list
tree_graph = nx.Graph()
tree_graph.add_edges_from(E_new)
# setting the color attributes for the network nodes
# for n in tree_graph.nodes():
# tree_graph.node[n]['color'] = diz_colors[diz_sectors[n]]
pos = nx.drawing(tree_graph, prog='neato', \
args='-Gmodel=subset -Gratio=fill')
plt.figure(figsize=(20, 20))
nx.draw_networkx_edges(tree_graph, pos, width=2, \
edge_color='black', alpha=0.5, style="solid")
nx.draw_networkx_labels(tree_graph, pos)
for n in tree_graph.nodes():
nx.draw_networkx_nodes(tree_graph, pos, [n], node_size=600, \
alpha=0.5, node_color=tree_graph.node[n]['color'], \
with_labels=True)
plt.show()
dh=DataHandle()
dh.getNet()
dh.showNetMST()
f = open("./data/list_stocks_50B_6_may_2016.txt", 'r')
list_stocks_all = []
while True:
next_line = f.readline()
if not next_line:
break
#以制表符分割数据行变成列表并去掉列表最后的换行符,再强制转换列表成为元祖,并将元祖append进list_stocks_all列表
list_stocks_all.append(tuple(next_line.split('\t')[:-1]))
f.close()
hfile = open("./data/companylist.csv", 'r')
#选择市值500亿以上的公司
cap_threshold = 50.0
list_stocks = []
nextline = hfile.readline()
while True:
nextline = hfile.readline()
if not nextline:
break
#分割数据行,line列表依次存储"标志","名称","收盘价","市值","上市年份","部门","行业","数据引用来源",
line = nextline.split('","')
#去掉双引号
sym = line[0][1:]
y_market_cap = line[3][1:]
if y_market_cap == "/a":
continue
#选择市值单位是B,数字大于50的公司
if y_market_cap[-1] == 'B' and float(y_market_cap[:-1]) > cap_threshold:
print(sym, y_market_cap)
#元祖内容(标志,名称,部门,行业)
list_stocks.append((sym, line[1], line[5], line[6]))
hfile.close()
#部门字典
diz_sectors = {}
#取list_stocks中的元素元祖
for s in list_stocks:
#加入字典 标志:部门
diz_sectors[s[0]]=s[2]
list_ranking = []
for s in set(diz_sectors.values()):
count = 0
#对s部门出现的次数计数
for v in diz_sectors.values():
if v == s:
count += 1
list_ranking.append((count, s))
#以出现次数对部门由高到低排序
list_ranking.sort(reverse=True)
#list_colors=['red','green','blue','black''cyan','magenta','yellow']
list_colors=['0.0', '0.2', '0.4', '0.6','0.7', '0.8', '0.9']
#'white' is an extra color for 'n/a' and 'other' sectors
diz_colors={}
#将颜色和前七名行业练习起来
i = 7
for s in list_ranking:
i-=1
#未知行业和八名及以后的行业取白色
if s[1] == 'n/a':
continue
if i < 0:
break
diz_colors[s[1]]=s[0]
plt.rc('font', size=12)
plt.figure(num=1, figsize=(15, 8),dpi=80)
x_index = list(np.arange(7)) #柱的索引
x_data = list(diz_colors.keys())[:7]
y1_data = list(diz_colors.values())[:7]
rects1 = plt.bar(x_index, y1_data, width=0.35, alpha=0.4, color='b') #参数:左偏移、高度、柱宽、透明度、颜色、图例
plt.xticks(x_index, x_data) #x轴刻度线
plt.xlabel("sector name")
plt.ylabel("counts")
plt.tight_layout() #自动控制图像外部边缘,此方法不能够很好的控制图像间的间隔
for i in range(0, 7):
data = y1_data.pop()
plt.text(x_index.pop()-0.05, data, data)
plt.show()
|
import sys
import time
import threading
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
# NOTE: a special observer is required on a CIFS filesystem
|
#!/usr/local/bin/python
'''
fuzzELA.py
'''
# Imports
from PIL import Image, ImageFilter
import argparse
import os
import fff
# Parse command line Arguments
parser = argparse.ArgumentParser()
#parser.add_argument("image", help="Image you want to analyze")
parser.add_argument("-v", "--verbose", help="verbose output", action="store_true")
parser.add_argument("-g", "--graphics", help="show ELA and post fuzzing ELA", action="store_true")
parser.add_argument("-q", "--quality", help="Quality level of resaved image. Lower quality means larger error levels. Defaults to 75.", default=75)
parser.add_argument("-s", "--scale", help="Amount to boost error levels by. Larger scale means larger error levels. Defaults to 15.", default=15)
'''
TODO:
Add remaining command line arguments
Uncomment add_argument(image)
'''
args = parser.parse_args()
# TESTING ONLY:
image = "images/books-edited.jpg"
# Variables
tmp_image_location = "images/worse.jpg" # Where to store the low quality image
# Open the image for processing
original = fff.open_image(image)
fuzzed = fff.open_image(image)
orig_pixels = original.load()
fuzz_pixels = fuzzed.load()
dimensions = [original.size[0], original.size[1]]
# Resave image for ELA comparison
original.save(tmp_image_location, quality=args.quality)
worse = fff.open_image(tmp_image_location)
error = fff.open_image(tmp_image_location)
worse_pixels = worse.load()
err_pixels = error.load()
# ELA of unedited image
err_pixels = fff.ela(orig_pixels, err_pixels, args.scale, dimensions)
if (args.graphics):
error.show()
# Fuzz the image
fuzz_pixels = fff.fuzz(fuzz_pixels, worse_pixels, err_pixels, args.scale, dimensions)
fuzzed.save("images/books-posttool.jpg", quality=100)
if (args.graphics):
fuzzed.save("images/worse2.jpg", quality=args.quality)
error2 = fff.open_image("images/worse2.jpg")
err2_pixels = error2.load()
err2_pixels = fff.ela(fuzz_pixels, err2_pixels, args.scale, dimensions)
error2.show()
os.remove("images/worse2.jpg")
# End of execution cleanup
os.remove(tmp_image_location)
|
#!/usr/bin/env python
# NAME - Tele Op Node
# DESIGN - Josh Galicic
# PRIORITY - Utility - Likely required for testing purposes
# --------OVERVIEW--------
# This node takes in keyboard input and generates velocity messages at a set speed.
# WASD are used to specify direction, pressing them once will continue that motion
# until 'f' is pressed. Since our motor controllers are not designed to require constant
# messages to continue, this node will send 1 velocity message, and stop until you input
# a different one. Closing this node while the bot is running will not stop it, it will
# just keep going until it runs into a wall and you make an impressive, daring dive after it.
#
# --------FUTURE WORK--------
# This node should be able to control speed, but it does not seem to work fully. there is
# also no visual feedback for what is going on, that may be helpful.
import roslib; roslib.load_manifest('GCRobotics')
import rospy
from std_msgs.msg import String
from GCRobotics.msg import simpleVelocity
import curses
def talker():
pub = rospy.Publisher('Velocity',simpleVelocity)
rospy.init_node('TeleOp')
msg = simpleVelocity()
flag = 0;
stdscr = curses.initscr();
stdscr.addstr("W: Forward, S: Backward, A: left, D: right, Q: rotate left, E: rotate right, F: stop\n");
stdscr.addstr("j: Speed Up, k: Slow Down\n");
curses.noecho()
while not rospy.is_shutdown():
keyToDirection = {ord('w'):0, ord('a'):3, ord('s'):2, ord('d'):1, ord('q'):5, ord('e'):4, ord('f'):0}
i = stdscr.getch()
try:
msg.direction = keyToDirection[i]
except:
pass
msg.speed = 130;
if (i == ord('f')):
msg.speed = 0;
if (i == ord('j')):
msg.speed += 20;
if (i == ord('k')):
msg.speed -= 20;
pub.publish(msg);
rospy.sleep(.001);
curses.endwin()
if __name__ == '__main__':
try:
talker()
except rospy.ROSInterruptException:
curses.nonl()
#stdscr.keypad(0);
curses.echo()
curses.endwin()
pass |
#!/usr/bin/env python
""" Create / add to ASAS catalog MySQL tables.
NOTE: Some columns of this table can be viewed using:
lyra.berkeley.edu/allstars/asascat.php
"""
import sys, os
from activelearn_utils import Database_Utils
class Asas_Catalog(Database_Utils):
"""
"""
def __init__(self, pars={}):
self.pars = pars
self.connect_to_db()
def create_tables(self):
"""
Queries:
### For a class, show classifier 1st rank probs, for all trainingset sources
SELECT tutor_srcid, asascat_probs.class_id, asascat_probs.prob, asascat_train_sources.class_id
FROM asascat_train_sources
JOIN asascat_probs ON asascat_probs.catalog_id=asascat_train_sources.catalog_id AND
asascat_probs.tutor_srcid=asascat_train_sources.tutor_srcid AND
asascat_probs.classifier_rank=1
WHERE catalog_id=1 AND class_id=234
### To show all 1st probs for a class:
SELECT prob FROM asascat_probs
WHERE catalog_id=1 AND class_id=234 AND classifier_rank=1
### To show 1st class prob for a source:
SELECT prob FROM asascat_probs
WHERE catalog_id=1 AND tutor_srcid=123456 AND classifier_rank=1
### Show rank=1 prob, is_acvs, is_trainset info for all sources:
SELECT tutor_srcid, asascat_probs.class_id, prob, asascat_train_sources.class_id
FROM asascat_probs
LEFT JOIN asascat_train_sources ON asascat_train_sources.catalog_id=asascat_probs.catalog_id
ON asascat_train_sources.tutor_srcid=asascat_probs.tutor_srcid
LEFT JOIN asascat_acvs_sources ON asascat_acvs_sources.catalog_id=asascat_probs.catalog_id
ON asascat_acvs_sources.tutor_srcid=asascat_probs.tutor_srcid
WHERE catalog_id=1 AND asascat_probs.classifier_rank=1
# TABLE: asascat_names
Columns:
catalog_id
catalog_name # "v0.1", "v1_no_ttaur"
dtime DATETIME, # catalog creation date
??? Store these in a seperate table / file? / or fill out using PHPMyadmin:
skipped class? - probably just store this info in catalog_name
classifier error_rage
### TABLE: asascat_probs
MOTIVE: want to query using a source's catalog_id, class_id, and get the probs of:
- probs of all sources which have top class = this sources 1st class
- probs of all sources which have top class = this sources 2nd class
NOTE: using classifier_rank, we can have a flexible number of classes
used for a catalog_id
Index:
index: (catalog_id, tutor_srcid)
index: (catalog_id, class_id, classifier_rank)
Columns:
catalog_id
tutor_srcid
classifier_rank
class_id
prob
### TABLE: asascat_acvs_sources
Columns:
catalog_id
tutor_srcid
class_id
Index:
index: (catalog_id, class_id)
PRIME: (catalog_id, tutor_srcid) # for JOINing with other queries
### TABLE: asascat_train_sources
Columns:
catalog_id
tutor_srcid
class_id
Index:
index: (catalog_id, class_id)
PRIME: (catalog_id, tutor_srcid) # for JOINing with other queries
### TABLE: asascat_source_attribs
MOTIVE: Need to store source related attributes in a single table
- these should be harder to query for, or potentially changing attributes, such as TCP-features
NOTE: The feature attributes should probably be retrieved from the current .arff / data structures,
and not from the source_test_db.feat_values DB table.
Index:
index: (catalog_id, source_id)
index: (catalog_id, classif_rank, class_id)
Columns:
catalog_id
tutor_srcid
ra
decl
period
is_periodic # related to significance
is_anomalous
amplitude
jk_color
avg_mag
avg_mag_error
n_points
######################
### (existing tables):
tutor_simbad_classes
| src_id | simbad_class | simbad_dist | simbad_sptype |
+--------+--------------+-------------+---------------+
| 229376 | PulsV* | 0.04 | M0 |
| 262145 | Star | 21.94 | M |
NOTE:
alter table asascat_source_attribs add column delta_t DOUBLE;
"""
if 1:
table_names = ["asascat_names",
"asascat_probs",
"asascat_acvs_sources",
"asascat_train_sources",
"asascat_source_attribs"]
for table_name in table_names:
try:
self.tcp_cursor.execute("DROP TABLE %s" % (table_name))
except:
print "Table doesn't exist for DELETE:", table_name
create_str = """
CREATE TABLE asascat_names (
id INT UNSIGNED,
name VARCHAR(512),
dtime DATETIME,
PRIMARY KEY (id),
INDEX (name))
"""
self.tcp_cursor.execute(create_str)
create_str = """
CREATE TABLE asascat_probs (
catalog_id INT UNSIGNED,
tutor_srcid INT,
classifier_rank SMALLINT,
class_id SMALLINT,
prob FLOAT,
PRIMARY KEY (catalog_id, tutor_srcid, classifier_rank),
INDEX (catalog_id, class_id, classifier_rank))
"""
self.tcp_cursor.execute(create_str)
create_str = """
CREATE TABLE asascat_acvs_sources (
catalog_id INT UNSIGNED,
tutor_srcid INT,
class_id SMALLINT,
prob FLOAT,
PRIMARY KEY (catalog_id, tutor_srcid),
INDEX (catalog_id, class_id))
"""
self.tcp_cursor.execute(create_str)
create_str = """
CREATE TABLE asascat_train_sources (
catalog_id INT UNSIGNED,
tutor_srcid INT,
class_id SMALLINT,
PRIMARY KEY (catalog_id, tutor_srcid),
INDEX (catalog_id, class_id))
"""
self.tcp_cursor.execute(create_str)
create_str = """
CREATE TABLE asascat_source_attribs (
catalog_id INT UNSIGNED,
tutor_srcid INT,
ra DOUBLE,
decl DOUBLE,
freq1_harmonics_freq_0 DOUBLE,
freq1_harmonics_amplitude_0 DOUBLE,
amplitude DOUBLE,
freq_n_alias DOUBLE,
freq_signif DOUBLE,
color_diff_jh FLOAT,
color_diff_hk FLOAT,
color_diff_bj FLOAT,
avg_mag FLOAT,
avg_mag_error FLOAT,
is_periodic FLOAT,
is_anomalous FLOAT,
n_points INT,
delta_t DOUBLE,
PRIMARY KEY (catalog_id, tutor_srcid))
"""
self.tcp_cursor.execute(create_str)
def retrieve_tutor_source_info(self, catalog_id=0):
""" Once ActiveLearn.active_learn_main() has been run
and the following tables were filled:
asascat_source_attribs
asascat_probs
This function retrieves information about source available in TUTOR database.
"""
out_dict = {'srcid':[],
'ra':[],
'decl':[],
'm_avg':[],
'm_std':[],
'n_points':[],
'delta_t':[],
}
select_str = "SELECT tutor_srcid FROM asascat_source_attribs WHERE catalog_id=%d" % (catalog_id)
self.tcp_cursor.execute(select_str)
results = self.tcp_cursor.fetchall()
if len(results) == 0:
raise "ERROR"
for srcid_row in results:
srcid = srcid_row[0]
# query tutor database / tables:
select_str = "SELECT source_ra, source_dec, AVG(obsdata_val), STD(obsdata_val), count(observation_id), MAX(obsdata_time) - MIN(obsdata_time) FROM sources JOIN observations USING (source_id) JOIN obs_data USING (observation_id) WHERE source_id=%d" % (srcid)
self.tutor_cursor.execute(select_str)
results2 = self.tutor_cursor.fetchall()
if len(results2) == 0:
raise "ERROR"
for (ra, decl, m_avg, m_std, n_points, delta_t) in results2:
out_dict['srcid'].append(srcid)
out_dict['ra'].append(float(ra))
out_dict['decl'].append(float(decl))
out_dict['m_avg'].append(m_avg)
out_dict['m_std'].append(m_std)
out_dict['n_points'].append(n_points)
out_dict['delta_t'].append(delta_t)
return out_dict
def fill_asascat_source_attribs_using_tutor_results(self, catalog_id=0, src_dict={}):
""" Once ActiveLearn.active_learn_main() has been run
and the following tables were filled:
asascat_source_attribs
asascat_probs
Then this function should be run so that additiona TUTOR info can be added
as well as summary files can be generated.
NOTE: This will actually be an UPDATE since the source rows should already exist in TABLE.
"""
"""
insert_list = ["INSERT INTO asascat_source_attribs (catalog_id, tutor_srcid, ra, decl, avg_mag, avg_mag_error, n_points, delta_t) VALUES "]
for i, srcid in enumerate(src_dict['srcid']):
insert_list.append("(%d, %d, %d, %lf, %lf, %lf, %lf, %d, %lf), " % ( \
catalog_id, tutor_srcid, ra, decl, avg_mag, avg_mag_error, n_points, delta_t))
insert_str = ''.join(insert_list)[:-2] + " ON DUPLICATE KEY UPDATE ra=VALUES(ra), decl=VALUES(decl), avg_mag=VALUES(avg_mag), avg_mag_error=VALUES(avg_mag_error), n_points=VALUES(n_points), delta_t=VALUES(delta_t)"
self.tcp_cursor.execute(insert_str)
"""
insert_list = ["INSERT INTO asascat_source_attribs (catalog_id, tutor_srcid, ra, decl, avg_mag, avg_mag_error, n_points, delta_t) VALUES "]
for i, srcid in enumerate(src_dict['srcid']):
insert_list.append("(%d, %d, %lf, %lf, %lf, %lf, %d, %lf), " % ( \
catalog_id,
srcid,
src_dict['ra'][i],
src_dict['decl'][i],
src_dict['m_avg'][i],
src_dict['m_std'][i],
src_dict['n_points'][i],
src_dict['delta_t'][i]))
if len(insert_list) > 10000:
insert_str = ''.join(insert_list)[:-2] + " ON DUPLICATE KEY UPDATE ra=VALUES(ra), decl=VALUES(decl), avg_mag=VALUES(avg_mag), avg_mag_error=VALUES(avg_mag_error), n_points=VALUES(n_points), delta_t=VALUES(delta_t)"
self.tcp_cursor.execute(insert_str)
insert_list = ["INSERT INTO asascat_source_attribs (catalog_id, tutor_srcid, ra, decl, avg_mag, avg_mag_error, n_points, delta_t) VALUES "]
if len(insert_list) > 1:
insert_str = ''.join(insert_list)[:-2] + " ON DUPLICATE KEY UPDATE ra=VALUES(ra), decl=VALUES(decl), avg_mag=VALUES(avg_mag), avg_mag_error=VALUES(avg_mag_error), n_points=VALUES(n_points), delta_t=VALUES(delta_t)"
self.tcp_cursor.execute(insert_str)
def retrieve_tranx_asascat_info(self, src_dict={}, catalog_id=0):
""" Assuming that the TUTOR source info has already been added to src_dict,
This retrieves all other info from the tranx mysql asascat tables.
Should have these already:
print src_dict.keys()
['decl', 'm_avg', 'delta_t', 'srcid', 'n_points', 'm_std', 'ra']
"""
src_dict.update({'freq1_harmonics_freq_0':[],
'freq1_harmonics_amplitude_0':[],
'amplitude':[],
'freq_n_alias':[],
'freq_signif':[],
'color_diff_jh':[],
'color_diff_hk':[],
'color_diff_bj':[],
'avg_mag':[],
'avg_mag_error':[],
'n_points':[],
'delta_t':[],
'train_class_id':[],
})
select_str = "SELECT tutor_srcid, freq1_harmonics_freq_0, freq1_harmonics_amplitude_0, amplitude, freq_n_alias, freq_signif, color_diff_jh, color_diff_hk, color_diff_bj, avg_mag, avg_mag_error, n_points, delta_t FROM asascat_source_attribs WHERE catalog_id=%d" % (catalog_id)
self.tcp_cursor.execute(select_str)
results = self.tcp_cursor.fetchall()
if len(results) == 0:
raise "ERROR"
isrc_irow_tups = [] # kludgey, but requires only 1 database query rather than lots.
for i_row, row in enumerate(results):
(tutor_srcid, freq1_harmonics_freq_0, freq1_harmonics_amplitude_0, amplitude, freq_n_alias, freq_signif, color_diff_jh, color_diff_hk, color_diff_bj, avg_mag, avg_mag_error, n_points, delta_t) = row
i_src = src_dict['srcid'].index(tutor_srcid) # this should always work
isrc_irow_tups.append((i_src, i_row))
isrc_irow_tups.sort()
for (i_src, i_row) in isrc_irow_tups:
(tutor_srcid, freq1_harmonics_freq_0, freq1_harmonics_amplitude_0, amplitude, freq_n_alias, freq_signif, color_diff_jh, color_diff_hk, color_diff_bj, avg_mag, avg_mag_error, n_points, delta_t) = results[i_row]
src_dict['freq1_harmonics_freq_0'].append(freq1_harmonics_freq_0)
src_dict['freq1_harmonics_amplitude_0'].append(freq1_harmonics_amplitude_0)
src_dict['amplitude'].append(amplitude)
src_dict['freq_n_alias'].append(freq_n_alias)
src_dict['freq_signif'].append(freq_signif)
src_dict['color_diff_jh'].append(color_diff_jh)
src_dict['color_diff_hk'].append(color_diff_hk)
src_dict['color_diff_bj'].append(color_diff_bj)
src_dict['avg_mag'].append(avg_mag)
src_dict['avg_mag_error'].append(avg_mag_error)
src_dict['n_points'].append(n_points)
src_dict['delta_t'].append(delta_t)
select_str = "SELECT tutor_srcid, class_id FROM asascat_train_sources WHERE catalog_id=%d" % (catalog_id)
self.tcp_cursor.execute(select_str)
results = self.tcp_cursor.fetchall()
if len(results) == 0:
raise "ERROR"
isrc_irow_tups = [] # kludgey, but requires only 1 database query rather than lots.
for i_row, row in enumerate(results):
(tutor_srcid, class_id) = row
i_src = src_dict['srcid'].index(tutor_srcid) # this should always work
isrc_irow_tups.append((i_src, i_row))
isrc_irow_tups.sort()
for (i_src, i_row) in isrc_irow_tups:
(tutor_srcid, class_id) = results[i_row]
src_dict['train_class_id'].append(class_id)
import pdb; pdb.set_trace()
print
# TODO: retrieve all info from tranx tables & fill mondo_dict for all sources
# TODO: generate HTML table, write in txt file
# TODO want to have lots of infor for each srcid, may add more
def temp_find_avgmags_for_miller_wtts(self, src_dict={}):
""" Adam Miller requires the avg mags for some wtts sources for vanderbilt U followup
"""
import numpy
srcids = numpy.loadtxt('/home/pteluser/scratch/wtts_dotid', unpack=True)
for srcidflt in srcids:
srcid = int(srcidflt)
try:
i_src = src_dict['srcid'].index(srcid)
print srcid, src_dict['m_avg'][i_src], src_dict['m_std'][i_src]
except:
print srcid
if __name__ == '__main__':
pars = { \
'tutor_hostname':'192.168.1.103',
'tutor_username':'dstarr', #'tutor', # guest
'tutor_password':'ilove2mass', #'iamaguest',
'tutor_database':'tutor',
'tutor_port':3306, #33306,
'tcp_hostname':'192.168.1.25',
'tcp_username':'pteluser',
'tcp_port': 3306, #23306,
'tcp_database':'source_test_db',
'catalog_id':0,
}
AsasCatalog = Asas_Catalog(pars=pars)
#AsasCatalog.create_tables()
#sys.exit()
import cPickle, gzip
srcdict_pkl_fpath = '/home/pteluser/scratch/asas_catalog_srcdict.pkl.gz'
if os.path.exists(srcdict_pkl_fpath):
fp = gzip.open(srcdict_pkl_fpath,'rb')
src_dict = cPickle.load(fp)
fp.close()
else:
src_dict = AsasCatalog.retrieve_tutor_source_info(catalog_id=pars['catalog_id'])
fp = gzip.open(srcdict_pkl_fpath,'wb')
cPickle.dump(src_dict,fp,1) # ,1) means a binary pkl is used.
fp.close()
#ONETIMEUSE# AsasCatalog.temp_find_avgmags_for_miller_wtts(src_dict=src_dict)
#AsasCatalog.fill_asascat_source_attribs_using_tutor_results(catalog_id=pars['catalog_id'],
# src_dict=src_dict)
import pdb; pdb.set_trace()
print
AsasCatalog.retrieve_tranx_asascat_info(catalog_id=pars['catalog_id'],
src_dict=src_dict) # retrieve_tutor_source_info() must have been called
# TODO: once asascat_probs is filled, and mabe some of asascat_source_attribs
# - then fill the rest of the asascat_* tables for a catalog_id
# TODO: then generate .html, .txt, (mondo mysql table?)
#AsasCatalog.test_fill_asas_catalog_probs()\
# - initially (testing) this just retrieves sources from activelearn_algo_class
# - but, eventually a function within activelearn_utils.py::insert_tups_into_rdb()
# will insert tups into TABLE: asas_catalog_probs, similar to .insert_tups_into_rdb()'s
# insert into TABLE: activelearn_algo_class
# TODO(betsy): need rpy2_classifiers.py:apply_randomforest():L966:for j in range(<<num_classes>>)
# - so that asas_catalog_probs will have all potential probs
# TODO: want to have PHP/HTML which displays sources for a catalog.
# TODO: want to generate catalog .txt
|
def tribonacci(signature, n):
if n == 0:
return []
elif n < len(signature):
return [signature[n]]
output = signature
index = 0
index2 = 1
index3 = 2
a = output[index]
b = output[index2]
c = output[index3]
#d = a + b + c
while len(output) < n:
d = a + b + c
output.append(d)
index += 1
index2 += 1
index3 += 1
a = output[index]
b = output[index2]
c = output[index3]
return output |
print('Rubidium Source THE 1ND')
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as sts
import scipy.constants as sc
import timeit
start = timeit.default_timer()
T = 20
r_max = 0.04
tcc = 0.001
Col_zPos = 0.04
Col_Gap = 0.03
z_0 = 0.09
ymot = 0.008
Natoms = 8
Nsteps = 30
h = 0.05
def z_initial(Natoms):
z0 = np.random.rand(Natoms)
return Col_zPos*z0
def y_initial(Natoms):
r0 = np.random.rand(Natoms)-0.5
return r_max*r0*0.22
kb = sc.Boltzmann
M = 87*sc.proton_mass
a=abs(np.sqrt((kb*(273+T))/(M)))
def z_vel(Natoms):
x = np.random.rand(Natoms)
#y = np.random.rand(Natoms)
#for i in range(Natoms):
# pm.append((-1)**round(y[i]))
#a=abs(np.sqrt((kb*(273+T))/(M)))
X = sts.maxwell.isf(x, scale=.09)
# return np.multiply(pm,X)
return X
a=abs(np.sqrt((kb*(273+T))/(M)))
print('scale, a', a)
def y_vel(Natoms, PM):
x = np.random.rand(Natoms)
#y = np.random.rand(Natoms)
#for i in range(Natoms):
# pm_.append((-1)**round(y[i]))
#a=abs(np.sqrt((kb*(T+273)))/(M))
X = sts.maxwell.isf(x, scale=0.07)
return np.multiply(PM,X)
def RK4step(ti,zi,vi,h,dv,dz):
k11=dz(ti,zi,vi)
k21=dv(ti,zi,vi)
k12=dz(ti+h/2,zi +(h/2)*k11,vi +(h/2)*k21)
k22=dv(ti+h/2,zi +(h/2)*k11,vi +(h/2)*k21)
k13=dz(ti+h/2,zi +(h/2)*k12,vi +(h/2)*k22)
k23=dv(ti+h/2,zi +(h/2)*k12,vi +(h/2)*k22)
k14=dz(ti+h,zi +(h)*k13,vi +(h)*k23)
k24=dv(ti+h,zi +(h)*k13,vi +(h)*k23)
z1=zi+(h/6.0)*(k11+2.0*k12+2.0*k13+k14)
v1=vi+(h/6.0)*(k21+2.0*k22+2.0*k23+k24)
zi = z1
vi = v1
return zi,vi
def dv(t,z,v):
return 0
def dy(t,z,v):
return v
def dz(t,z,v):
return v
Y = y_initial(Natoms)
pm = np.sign(Y)
PM=np.multiply(pm,-1)
Vy = y_vel(Natoms, PM)
Vz = z_vel(Natoms)
Z = z_initial(Natoms)
""" L O O P """
#col=np.zeros(Natoms)
col = []
#for i in range(Natoms):
col.append('red')
th=[]#np.zeros(Natoms)
n=[]#zeros(Natoms)
m=[]#np.zeros(Natoms)
#print(RK4step(3,5,2,5,4,6))
zs,vs,ts=[],[],[]
ys,yvs=[],[]
xD = []
for j in range(Natoms):
vi = Vz[j]
zi = Z[j]
yvi= Vy[j]
yi = Y[j]
for i in range(Nsteps):
ti=h*i
zs.append(zi)
vs.append(vi)
ts.append(ti)
ys.append(yi)
yvs.append(yvi)
z1,v1 = RK4step(ti,zi,vi,h,dv,dz)
y1,yv1 = RK4step(ti,yi,yvi,h,dv,dz)
yvi = yv1
yi = y1
zi = z1
vi = v1
if ( Col_zPos+tcc > zi > Col_zPos and abs(yi) < Col_Gap/2 ):
# Green
xD_ = 1
xD.append(xD_)
elif ( abs(yvi) < 10 and Col_zPos+tcc > zi > Col_zPos and abs(yi) < Col_Gap/2 ):
# Cyan
xD_ = -1
xD.append(xD_)
else:
# Red
xD_ = 0
xD.append(xD_)
print(xD)
# NEED TO MAKE IT SO THAT IT DOESNT OVERWRITE THE COLOR
x_D = np.reshape(xD, (Natoms,Nsteps))
print(x_D )
Y_data = np.reshape(ys, (Natoms,Nsteps))
Vy_data = np.reshape(yvs, (Natoms,Nsteps))
Z_data = np.reshape(zs, (Natoms,Nsteps))
v_ = np.mean(z_vel(Natoms))
vy = ((ymot-Col_Gap/2)*v_)/(z_0**2+(ymot-Col_Gap/2)**2)**0.5
print("vy' is ",vy)
#print(Vy)
#y_ = 1
Nn = np.sum(n)
Mm = np.sum(m)
#print(th)
#print(col)
print(Natoms,Nn,Mm)
for j in range(Natoms):
if x_D[j][Natoms-1] == 0 :
col.append('red')
th.append(0.3)
if max(x_D[j]) > 1:
col.append('green')
th.append(2.0)
n.append(1)
else:
col.append('cyan')
th.append(4.0)
m.append(1)
for i in range(Natoms):
'A plot for each of the Natoms particles'
# col = (0.1, float(i/(Natoms+1)+0.0001), 1-float(i/(Natoms+5)+0.0001))
plt.plot(Z_data[i],Y_data[i],linewidth = th[i], color = col[i])
#def v_r_inital(Natoms):
#k = (np.random.rand, 0, np.random.rand)
'''Illustrative Borders PC '''
plt.bar(-tcc, 2*r_max+tcc, width=tcc, bottom= -r_max-tcc, color='k') #back
plt.bar(-tcc, tcc, width=Col_zPos+tcc, bottom= r_max, color='k') #Top
plt.bar(-tcc, tcc, width=Col_zPos+tcc, bottom= -r_max-tcc, color='k') #Bot
plt.bar(Col_zPos, r_max-Col_Gap+tcc, width=tcc, bottom= Col_Gap, color='k') #Top Col
plt.bar(Col_zPos, r_max-Col_Gap+tcc, width=tcc, bottom= -r_max-tcc, color='k') #Bot Col
plt.xlim(left=2*Col_zPos/3, right=Col_zPos+8*tcc)
plt.ylim(bottom=-r_max-0.5*r_max, top=r_max+0.5*r_max)
stop = timeit.default_timer()
leString = " nAtoms = {}\nnSteps/Size={}/{}\n Escaped = {} %\n <vy' = {}%\n Runtime = {}s".format(Natoms, Nsteps,h,round(Nn*100/Natoms,2), round(Mm*100/Natoms,3), round(stop - start, 3))
BIGString = " Pin Hole Diameter = {}mm \n Ratio of Simulated / Escaped = {}%".format(Col_Gap*1000, round(Mm*100/Nn),2)
plt.text(-Col_zPos/2+0.02,-r_max*2, leString, fontsize=19, bbox = dict(boxstyle='round', fc=(0.57,0.44,0.86), alpha=0.2))
plt.text(-Col_zPos/2+0.02,r_max*2,BIGString,fontweight='bold',fontsize=19, bbox = dict(boxstyle='round', fc=(0.57,0.44,0.86), alpha=0.2))
plt.title('Ruubidium Vapour - Trajectory & Collimation', fontsize=30)
plt.xlabel('z -----> ', fontsize=25)
plt.ylabel('<----- r ----->', fontsize=25)
#plt.scatter(z_initial(Natoms),y_initial(Natoms), s=20, marker=">", color='c')
plt.show()
|
from konlpy.tag import Mecab
tokenizer = Mecab(dicpath=r"C:\mecab\mecab-ko-dic")
print(tokenizer.morphs("이근범"))
|
# -*- coding: utf-8 -*-
# @Author: Fallen
# @Date: 2020-04-21 19:44:34
# @Last Modified by: Fallen
# @Last Modified time: 2020-04-21 20:17:57
# 使用walk来计算文件夹的总大小
import os
import sys
def getsize(path):
sum = 0
g = os.walk(path)
print(g)
#print(next(g))
for i in g:
_path,dir_lst,file_name_lst=i
print(_path,file_name_lst)
for j in file_name_lst:
abs_path = os.path.join(_path,j)
sum += os.path.getsize(abs_path)
return sum
def main():
path = sys.argv[1]
#path = 'D:\\programming_with_python\\043从零开始学python\\day08 模块'
print(f"{path} 的总大小为{getsize(path)}.")
if __name__ == "__main__":
main()
|
"""
Script that creates labels.csv file with
Filenames in first column and labels in second
column where 1 = Parasitized and 0 = Uninfected
"""
# Import modules
import os
import pandas as pd
# Dataset file path
path = "./cell_images"
# Variables with filenames
parasitized = os.listdir(path + "/Parasitized")
uninfected = os.listdir(path + "/Uninfected")
# Dataframe 1 thats gonna hold parasitized cell pictures
df1 = pd.DataFrame(columns=["Filenames", "Parasitized"])
# Loop throw filenames and add them to Filenames column
# Add 1 to Parasitized column which means cell is parasitized
df1["Filenames"] = [picture for picture in parasitized]
df1["Parasitized"] = [1 for i in range(len(parasitized))]
# Drops last row which contains .db file
df1 = df1[:-1]
# Dataframe 2 thats gonna hold uninfected cell pictures
df2 = pd.DataFrame(columns=["Filenames", "Parasitized"])
# Loop throw filenames and add them to Filenames column
# Add 1 to Parasitized column which means cell is uninfected
df2["Filenames"] = [picture for picture in uninfected]
df2["Parasitized"] = [0 for i in range(len(parasitized))]
# Drops last row which contains .db file
df2 = df2[:-1]
# Append dataframes together
df = df1.append(df2)
# Save in csv format
df.to_csv("labels.csv", index=False) |
from PIL import Image
import os, sys
path = os.getcwd()+os.sep+"images_to_resize_folder"
def resize(directory):
dirs = os.listdir(directory)
for item in dirs:
if os.path.isfile(os.path.join(path, item)):
im = Image.open(os.path.join(path, item))
f, e = os.path.splitext(os.path.join(path, item))
imResize = im.resize((1038, 437), Image.ANTIALIAS)
imResize.save(f + " resized.jpeg", "JPEG", quality=100)
print("Finished Resizing!!!")
if __name__ == "__main__":
resize(path)
|
import chainer
import chainer.functions as F
import chainer.links as L
class Generator(chainer.Chain):
"""docstring for Generator"""
def __init__(self):
super(Generator, self).__init__(
l1=L.Linear(100,256*16*16),
dcv1=L.Deconvolution2D(in_channels=256,out_channels=128,ksize=4,stride=2,pad=1),
dcv2=L.Deconvolution2D(in_channels=128,out_channels=1,ksize=4,stride=2,pad=1),
bc1=L.BatchNormalization(size=256),
bc2=L.BatchNormalization(size=128))
self.in_size=100
self.imshape=(1,64,64)
def __call__(self, x, train=True):
h1 = F.relu(self.bc1(F.reshape(self.l1(x),(x.data.shape[0],256,16,16))))
h2 = F.relu(self.bc2(self.dcv1(h1)))
return self.dcv2(h2)
class Discriminator(chainer.Chain):
"""docstring for Discriminator"""
def __init__(self):
super(Discriminator, self).__init__(
conv1=L.Convolution2D(in_channels=1,out_channels=128,ksize=5,stride=2,pad=2),
conv2=L.Convolution2D(in_channels=128,out_channels=256,ksize=5,stride=2,pad=2),
bc1=L.BatchNormalization(size=128),
bc2=L.BatchNormalization(size=256),
l1=L.Linear(256*16*16, 1))
self.in_size = (1,64,64)
self.out_size = 1
self.imshape=(1,64,64)
def __call__(self, x, train=True):
h1 = F.leaky_relu(self.bc1(self.conv1(x)))
h2 = F.leaky_relu(self.bc2(self.conv2(h1)))
return self.l1(h2) |
import csv
from algorithms.agglomer_clustering import AgglomerClustering
from algorithms.birch_clustering import BirchClustering
from algorithms.dbscan_clustering import DBSCANClustering
from algorithms.kmeans_clustering import KMeansClustering
__author__ = "Konstantin Bogdanoski"
__copyright__ = "Copyright 2020, BlanketClusterer"
__credits__ = ["Konstantin Bogdanoski", "Prof. PhD. Dimitar Trajanov", "MSc. Kostadin Mishev"]
__license__ = "MIT"
__version__ = "2.0.0"
__maintainer__ = "Konstantin Bogdanoski"
__email__ = "konstantin.b@live.com"
__status__ = "Production"
def validate_constructor(n_clusters,
clustering_type,
embeddings,
names,
items_in_cluster,
max_depth, group_names=None):
if int(n_clusters) <= 0:
raise ValueError("Invalid number of clusters")
if clustering_type not in ["k-means", "agglomerative", "dbscan", "birch"]:
raise ValueError("Invalid clustering type\nAllowed values: ['k-means', 'agglomerative', 'dbscan', 'birch']")
if embeddings is None:
raise ValueError("No embeddings specified")
if names is None:
raise ValueError("No names .csv file specified")
file = open(names, "r")
reader = csv.reader(file, delimiter=",")
for row in reader:
if "key" not in row or "value" not in row:
raise ValueError("Names are not in specified format\n"
"File must start with the following line:\n"
"key,value\n"
"and must be a .csv file")
break
if group_names is not None:
file = open(group_names, "r")
reader = csv.reader(file, delimiter=",")
for row in reader:
if "key" not in row or "value" not in row:
raise ValueError("Group names are not in specified format\n"
"File must start with the following line:\n"
"key,value\n"
"and must be a .csv file")
break
if int(items_in_cluster) <= 5:
raise ValueError("Number of items in clusters must be greater than 5")
if not 0 < int(max_depth) <= 6:
raise ValueError("Invalid argument for max depth, choose in range 1-6")
return
class BlanketClusterer:
"""
BlanketClusterer
Read more in the :ref:`UserGuide <blanket_clusterer>`.
Parameters
----------
:param n_clusters
:type Integer, default=10
The number of clusters to be generated together with centroids.
:param clustering_type
:type String, default="k-means"
Type of algorithms to be done.
Allowed types:
`k-means`, `agglomerative`
The types are used from scikit-learn
:param embeddings
path to embeddings model on file system.
it needs to be imported for the algorithm to operate.
Model must contain key-value pairs, where value is a
matrix embedding. The key is needed for the name extraction.
Allowed models:
`Word2Vec`,
:param names
path to `.csv` file containing the key-value pairs of
names. The keys must be the same as the
keys in the embedding model.
:param group_names
path to `.csv` file containing key-value pairs of group names.
They must be in a specific format
'left_boundary-right_boundary, generic_name'
Example:
'001-009, Example name'
IMPORTANT:
If you add group-names, the clusters will also be colorized based
on the prevailing group
This format is crucial if you want to name
the clusters with specific names
"""
def __init__(self, n_clusters=10,
clustering_type="k-means",
embeddings=None,
names=None,
items_in_cluster=20,
max_depth=6,
output_path="./output.json",
group_names=None):
validate_constructor(n_clusters, clustering_type, embeddings, names,
items_in_cluster, max_depth, group_names)
self.n_clusters = int(n_clusters)
self.clustering_type = clustering_type
self.embeddings = embeddings
self.output_path = output_path
self.names = names
self.max_depth = int(max_depth)
self.items_in_cluster = int(items_in_cluster)
self.group_names = group_names
def clusterize(self):
if self.clustering_type == "k-means":
kmeans = KMeansClustering(self.n_clusters, self.embeddings, self.names, self.items_in_cluster,
self.max_depth, self.output_path, self.group_names)
kmeans.clusterize()
elif self.clustering_type == "agglomerative":
agglomer = AgglomerClustering(self.n_clusters, self.embeddings, self.names, self.items_in_cluster,
self.max_depth, self.output_path, self.group_names)
agglomer.clusterize()
elif self.clustering_type == "dbscan":
dbs = DBSCANClustering(self.n_clusters, self.embeddings, self.names, self.items_in_cluster,
self.max_depth, self.output_path, self.group_names)
dbs.clusterize()
elif self.clustering_type == "birch":
brch = BirchClustering(self.n_clusters, self.embeddings, self.names, self.items_in_cluster,
self.max_depth, self.output_path, self.group_names)
brch.clusterize()
|
# coding: utf-8
# Standard Python libraries
from io import IOBase
from pathlib import Path
from copy import deepcopy
from typing import Optional, Union
from yabadaba import load_query
import numpy as np
# https://github.com/usnistgov/atomman
import atomman as am
import atomman.unitconvert as uc
# https://github.com/usnistgov/DataModelDict
from DataModelDict import DataModelDict as DM
# iprPy imports
from .. import Calculation
from .elastic_constants_static import elastic_constants_static
from ...calculation_subset import (LammpsPotential, LammpsCommands, Units,
AtommanSystemLoad, AtommanSystemManipulate,
LammpsMinimize)
class ElasticConstantsStatic(Calculation):
"""Class for managing static elastic constants calculations from small strains"""
############################# Core properties #################################
def __init__(self,
model: Union[str, Path, IOBase, DM, None]=None,
name: Optional[str]=None,
database = None,
params: Union[str, Path, IOBase, dict] = None,
**kwargs: any):
"""
Initializes a Calculation object for a given style.
Parameters
----------
model : str, file-like object or DataModelDict, optional
Record content in data model format to read in. Cannot be given
with params.
name : str, optional
The name to use for saving the record. By default, this should be
the calculation's key.
database : yabadaba.Database, optional
A default Database to associate with the Record, typically the
Database that the Record was obtained from. Can allow for Record
methods to perform Database operations without needing to specify
which Database to use.
params : str, file-like object or dict, optional
Calculation input parameters or input parameter file. Cannot be
given with model.
**kwargs : any
Any other core Calculation record attributes to set. Cannot be
given with model.
"""
# Initialize subsets used by the calculation
self.__potential = LammpsPotential(self)
self.__commands = LammpsCommands(self)
self.__units = Units(self)
self.__system = AtommanSystemLoad(self)
self.__system_mods = AtommanSystemManipulate(self)
self.__minimize = LammpsMinimize(self)
subsets = (self.commands, self.potential, self.system,
self.system_mods, self.minimize, self.units)
# Initialize unique calculation attributes
self.strainrange = 1e-6
self.__C = None
self.__raw_Cij_positive = None
self.__raw_Cij_negative = None
# Define calc shortcut
self.calc = elastic_constants_static
# Call parent constructor
super().__init__(model=model, name=name, database=database, params=params,
subsets=subsets, **kwargs)
@property
def filenames(self) -> list:
"""list: the names of each file used by the calculation."""
return [
'elastic_constants_static.py',
'cij.template'
]
############################## Class attributes ###############################
@property
def commands(self) -> LammpsCommands:
"""LammpsCommands subset"""
return self.__commands
@property
def potential(self) -> LammpsPotential:
"""LammpsPotential subset"""
return self.__potential
@property
def units(self) -> Units:
"""Units subset"""
return self.__units
@property
def system(self) -> AtommanSystemLoad:
"""AtommanSystemLoad subset"""
return self.__system
@property
def system_mods(self) -> AtommanSystemManipulate:
"""AtommanSystemManipulate subset"""
return self.__system_mods
@property
def minimize(self) -> LammpsMinimize:
"""LammpsMinimize subset"""
return self.__minimize
@property
def strainrange(self) -> float:
"""float: Strain step size used in estimating elastic constants"""
return self.__strainrange
@strainrange.setter
def strainrange(self, val: float):
self.__strainrange = float(val)
@property
def C(self) -> am.ElasticConstants:
"""atomman.ElasticConstants: Averaged elastic constants"""
if self.__C is None:
raise ValueError('No results yet!')
return self.__C
@property
def raw_Cij_positive(self) -> np.ndarray:
"""numpy.NDArray: Cij 6x6 array measured using positive strain steps"""
if self.__raw_Cij_positive is None:
raise ValueError('No results yet!')
return self.__raw_Cij_positive
@property
def raw_Cij_negative(self) -> np.ndarray:
"""numpy.NDArray: Cij 6x6 array measured using negative strain steps"""
if self.__raw_Cij_negative is None:
raise ValueError('No results yet!')
return self.__raw_Cij_negative
def set_values(self,
name: Optional[str] = None,
**kwargs: any):
"""
Set calculation values directly. Any terms not given will be set
or reset to the calculation's default values.
Parameters
----------
name : str, optional
The name to assign to the calculation. By default, this is set as
the calculation's key.
strainrange : float, optional
The magnitude of strain to use.
**kwargs : any, optional
Any keyword parameters supported by the set_values() methods of
the parent Calculation class and the subset classes.
"""
# Call super to set universal and subset content
super().set_values(name=name, **kwargs)
# Set calculation-specific values
if 'strainrange' in kwargs:
self.strainrange = kwargs['strainrange']
####################### Parameter file interactions ###########################
def load_parameters(self,
params: Union[dict, str, IOBase],
key: Optional[str] = None):
"""
Reads in and sets calculation parameters.
Parameters
----------
params : dict, str or file-like object
The parameters or parameter file to read in.
key : str, optional
A new key value to assign to the object. If not given, will use
calc_key field in params if it exists, or leave the key value
unchanged.
"""
# Load universal content
input_dict = super().load_parameters(params, key=key)
# Load input/output units
self.units.load_parameters(input_dict)
# Change default values for subset terms
input_dict['sizemults'] = input_dict.get('sizemults', '3 3 3')
input_dict['forcetolerance'] = input_dict.get('forcetolerance',
'1.0e-6 eV/angstrom')
# Load calculation-specific strings
# Load calculation-specific booleans
# Load calculation-specific integers
# Load calculation-specific unitless floats
self.strainrange = float(input_dict.get('strainrange', 1e-6))
# Load calculation-specific floats with units
# Load LAMMPS commands
self.commands.load_parameters(input_dict)
# Load minimization parameters
self.minimize.load_parameters(input_dict)
# Load LAMMPS potential
self.potential.load_parameters(input_dict)
# Load initial system
self.system.load_parameters(input_dict)
# Manipulate system
self.system_mods.load_parameters(input_dict)
def master_prepare_inputs(self,
branch: str = 'main',
**kwargs: any) -> dict:
"""
Utility method that build input parameters for prepare according to the
workflows used by the NIST Interatomic Potentials Repository. In other
words, transforms inputs from master_prepare into inputs for prepare.
Parameters
----------
branch : str, optional
Indicates the workflow branch to prepare calculations for. Default
value is 'main'.
**kwargs : any
Any parameter modifications to make to the standard workflow
prepare scripts.
Returns
-------
params : dict
The full set of prepare parameters based on the workflow branch
"""
# Initialize params and copy over branch
params = {}
params['branch'] = branch
# main branch
if branch == 'main':
# Check for required kwargs
assert 'lammps_command' in kwargs
# Set default workflow settings
params['buildcombos'] = 'atomicparent load_file parent'
params['parent_record'] = 'relaxed_crystal'
params['parent_standing'] = 'good'
params['sizemults'] = '10 10 10'
params['maxiterations'] = '5000'
params['maxevaluations'] = '10000'
params['strainrange'] = ['1e-6', '1e-7', '1e-8']
# Copy kwargs to params
for key in kwargs:
# Rename potential-related terms for buildcombos
if key[:10] == 'potential_':
params[f'parent_{key}'] = kwargs[key]
# Copy/overwrite other terms
else:
params[key] = kwargs[key]
else:
raise ValueError(f'Unknown branch {branch}')
return params
@property
def templatekeys(self) -> dict:
"""dict : The calculation-specific input keys and their descriptions."""
return {
'strainrange': ' '.join([
"The strain range to apply to the system to evaluate the",
"elastic constants. Default value is '1e-6'"]),
}
@property
def singularkeys(self) -> list:
"""list: Calculation keys that can have single values during prepare."""
keys = (
# Universal keys
super().singularkeys
# Subset keys
+ self.commands.keyset
+ self.units.keyset
# Calculation-specific keys
)
return keys
@property
def multikeys(self) -> list:
"""list: Calculation key sets that can have multiple values during prepare."""
keys = (
# Universal multikeys
super().multikeys +
# Combination of potential and system keys
[
self.potential.keyset +
self.system.keyset
] +
# System mods keys
[
self.system_mods.keyset
] +
# Strainrange
[
[
'strainrange',
]
] +
[
self.minimize.keyset
]
)
return keys
########################### Data model interactions ###########################
@property
def modelroot(self) -> str:
"""str: The root element of the content"""
return 'calculation-elastic-constants-static'
def build_model(self) -> DM:
"""
Generates and returns model content based on the values set to object.
"""
# Build universal content
model = super().build_model()
calc = model[self.modelroot]
# Build subset content
self.commands.build_model(calc, after='atomman-version')
self.potential.build_model(calc, after='calculation')
self.system.build_model(calc, after='potential-LAMMPS')
self.system_mods.build_model(calc)
self.minimize.build_model(calc)
# Build calculation-specific content
if 'calculation' not in calc:
calc['calculation'] = DM()
if 'run-parameter' not in calc['calculation']:
calc['calculation']['run-parameter'] = DM()
run_params = calc['calculation']['run-parameter']
run_params['strain-range'] = self.strainrange
# Build results
if self.status == 'finished':
cij = DM()
cij['Cij'] = uc.model(self.raw_Cij_negative,
self.units.pressure_unit)
calc.append('raw-elastic-constants', cij)
cij = DM()
cij['Cij'] = uc.model(self.raw_Cij_positive,
self.units.pressure_unit)
calc.append('raw-elastic-constants', cij)
calc['elastic-constants'] = DM()
calc['elastic-constants']['Cij'] = uc.model(self.C.Cij,
self.units.pressure_unit)
self._set_model(model)
return model
def load_model(self,
model: Union[str, DM],
name: Optional[str] = None):
"""
Loads record contents from a given model.
Parameters
----------
model : str or DataModelDict
The model contents of the record to load.
name : str, optional
The name to assign to the record. Often inferred from other
attributes if not given.
"""
# Load universal and subset content
super().load_model(model, name=name)
calc = self.model[self.modelroot]
# Load calculation-specific content
run_params = calc['calculation']['run-parameter']
self.strainrange = run_params['strain-range']
# Load results
if self.status == 'finished':
self.__raw_Cij_negative = uc.value_unit(calc['raw-elastic-constants'][0]['Cij'])
self.__raw_Cij_positive = uc.value_unit(calc['raw-elastic-constants'][1]['Cij'])
Cij = uc.value_unit(calc['elastic-constants']['Cij'])
self.__C = am.ElasticConstants(Cij=Cij)
@property
def queries(self) -> dict:
queries = deepcopy(super().queries)
queries.update({
'strainrange': load_query(
style='float_match',
name='strainrange',
path=f'{self.modelroot}.calculation.run-parameter.strain-range',
description='search by strain range used'),
})
return queries
########################## Metadata interactions ##############################
def metadata(self) -> dict:
"""
Generates a dict of simple metadata values associated with the record.
Useful for quickly comparing records and for building pandas.DataFrames
for multiple records of the same style.
"""
# Call super to extract universal and subset content
meta = super().metadata()
# Extract calculation-specific content
meta['strainrange'] = self.strainrange
# Extract results
if self.status == 'finished':
meta['C'] = self.C
meta['raw_Cij_negative'] = self.raw_Cij_negative
meta['raw_Cij_positive'] = self.raw_Cij_positive
return meta
@property
def compare_terms(self) -> list:
"""list: The terms to compare metadata values absolutely."""
return [
'script',
'load_file',
'load_options',
'symbols',
'potential_LAMMPS_key',
'potential_key',
]
@property
def compare_fterms(self) -> dict:
"""dict: The terms to compare metadata values using a tolerance."""
return {
'strainrange':1e-10,
}
########################### Calculation interactions ##########################
def calc_inputs(self) -> dict:
"""Builds calculation inputs from the class's attributes"""
# Initialize input_dict
input_dict = {}
# Add subset inputs
for subset in self.subsets:
subset.calc_inputs(input_dict)
# Remove unused subset inputs
del input_dict['transform']
del input_dict['ucell']
# Add calculation-specific inputs
input_dict['strainrange'] = self.strainrange
# Return input_dict
return input_dict
def process_results(self, results_dict: dict):
"""
Processes calculation results and saves them to the object's results
attributes.
Parameters
----------
results_dict: dict
The dictionary returned by the calc() method.
"""
self.__C = results_dict['C']
self.__raw_Cij_negative = results_dict['raw_Cij_negative']
self.__raw_Cij_positive = results_dict['raw_Cij_positive']
|
import unittest
import zExceptions
from base import FeatureletsPortalTestCase
from base import HAS_CMF
from dummy import ContentFeaturelet
class TestContentFeaturelet(FeatureletsPortalTestCase):
def afterSetUp(self):
FeatureletsPortalTestCase.afterSetUp(self)
self.featurelet = ContentFeaturelet()
def test_objectsAreCreated(self):
for ob_info in ContentFeaturelet._info['content']:
self.failIf(self.folder.hasObject(ob_info['id']))
self.supporter.installFeaturelet(self.featurelet)
for ob_info in ContentFeaturelet._info['content']:
ob = self.folder._getOb(ob_info['id'], None)
self.assertEqual(ob.portal_type, ob_info['portal_type'])
def test_objectsAreDeleted(self):
self.supporter.installFeaturelet(self.featurelet)
self.supporter.removeFeaturelet(self.featurelet)
for ob_info in ContentFeaturelet._info['content']:
self.failIf(self.folder.hasObject(ob_info['id']))
def test_twiceIsOkay(self):
marker = 'marked'
self.supporter.installFeaturelet(self.featurelet)
for ob_info in ContentFeaturelet._info['content']:
ob = self.folder._getOb(ob_info['id'])
ob.marker = marker
self.supporter.installFeaturelet(self.featurelet)
for ob_info in ContentFeaturelet._info['content']:
ob = self.folder._getOb(ob_info['id'])
self.assertEqual(ob.marker, marker)
def test_twiceDoesNotRestoreDeleted(self):
self.supporter.installFeaturelet(self.featurelet)
del_id = ContentFeaturelet._info['content'][0]['id']
self.folder._delOb(del_id)
self.supporter.installFeaturelet(self.featurelet)
self.failIf(self.folder.hasObject(del_id))
def test_alreadyExistsRaisesError(self):
ob_info = ContentFeaturelet._info['content'][0]
self.folder.portal_types.constructContent(ob_info['portal_type'],
self.folder, ob_info['id'])
self.assertRaises(zExceptions.BadRequest,
self.supporter.installFeaturelet,
self.featurelet)
def test_uninstalledRemovalIsOkay(self):
self.supporter.removeFeaturelet(self.featurelet)
def test_uninstallDoesNotRemoveExistingObjects(self):
ob_info = ContentFeaturelet._info['content'][0]
self.folder.portal_types.constructContent(ob_info['portal_type'],
self.folder, ob_info['id'])
self.supporter.removeFeaturelet(self.featurelet)
self.failUnless(self.folder.hasObject(ob_info['id']))
def test_suite():
suite = unittest.TestSuite()
if HAS_CMF:
suite.addTest(unittest.makeSuite(TestContentFeaturelet))
return suite
|
import xml.etree.ElementTree as xml
from xml.dom import minidom
import os
import sys
def custom_package_xml_generator(directory, packagename=None, version='45.0', filename='package.xml'):
"""Create custom package.xml file from directories with metadata"""
METADATA_TYPE = {
'applications':'CustomApplication', 'aura':'AuraDefinitionBundle', 'classes':'ApexClass', 'customPermissions':'CustomPermission',
'flexipages':'FlexiPage', 'flows':'Flow', 'globalValueSets':'GlobalValueSet', 'labels':'CustomLabels', 'layouts':'Layout',
'lwc': 'LightningComponentBundle', 'objects':'CustomObject', 'pages':'ApexPage', 'permissionsets':'PermissionSet', 'profiles':'Profile',
'staticresources':'StaticResource', 'tabs':'CustomTab', 'triggers':'ApexTrigger', 'contentassets':'ContentAsset', 'pathAssistants':'PathAssistant',
'quickActions':'QuickAction', 'remoteSiteSettings':'RemoteSiteSetting', 'workflows':'Workflow'
}
"""
Non-implemented Metadata:
'ApexComponent', 'CustomMetadata' (needs custom manipulation), 'CustomObjectTranslation', 'DuplicateRule',
'FlowCategory', 'GlobalValueSetTranslation', 'MatchingRules',
<types>
<name>Dashboard</name>
<members>DreamHouse_Dashboards</members>
<members>DreamHouse_Dashboards/PwhxKrnFNjvWCIHRklHhRcEIEyIIBm</members> #dentro de la carpeta del dashboard
</types>
<types>
<name>Report</name>
<members>DreamHouse_Reports</members>
<members>DreamHouse_Reports/Days_on_Market</members>
<members>DreamHouse_Reports/Portfolio_Health</members>
<members>DreamHouse_Reports/Properties_by_Broker</members>
</types>
Workflow > Add
<types>
<name>WorkflowFieldUpdate</name>
<members>*</members>
</types>
<types>
<name>WorkflowKnowledgePublish</name>
<members>*</members>
</types>
<types>
<name>WorkflowTask</name>
<members>*</members>
</types>
<types>
<name>WorkflowAlert</name>
<members>*</members>
</types>
<types>
<name>WorkflowSend</name>
<members>*</members>
</types>
<types>
<name>WorkflowOutboundMessage</name>
<members>*</members>
</types>
<types>
<name>WorkflowRule</name>
<members>*</members>
</types>
"""
#read directory structure
allfields = []
alllistv = []
allsharingr = []
allrecordt = []
allvalidr = []
all_reg = []
allclayout = []
obj_dir = os.path.join(directory,'objects')
dirs = os.listdir(directory)
for dir in dirs:
if os.path.isfile(os.path.join(directory,dir)) == False:
all_reg.append([dir, os.listdir(os.path.join(directory, dir))])
# start our xml structure
root = xml.Element('Package')
root.set('xmlns','http://soap.sforce.com/2006/04/metadata')
for mdtype, mdnames in all_reg:
# create child node for each type of component
if mdtype in METADATA_TYPE.keys():
etype = xml.SubElement(root, 'types')
ename = xml.SubElement(etype, 'name')
ename.text = str(METADATA_TYPE[mdtype])
for member in mdnames:
if '-meta.xml' not in member:
emember = xml.SubElement(etype, 'members')
index = member.find('.')
if index != -1:
emember.text = str(member[:index])
else:
emember.text = str(member)
#read custom metadata types for objects
if mdtype == 'objects':
obj = xml.parse(os.path.join(obj_dir,member))
root_tag = obj.getroot().tag
namespaces = root_tag[:root_tag.find('}')+1]
reg_name = member[:member.find('.')]
#serch fields
fields = obj.findall(namespaces+'fields')
for field in fields:
allfields.append(reg_name+'.'+field.find(namespaces+'fullName').text)
#serch listviews
listviews = obj.findall(namespaces+'listViews')
for listview in listviews:
alllistv.append(reg_name+'.'+listview.find(namespaces+'fullName').text)
#serach sharingreasons
sharingreasons = obj.findall(namespaces+'sharingReasons')
for sharing in sharingreasons:
allsharingr.append(reg_name+'.'+sharing.find(namespaces+'fullName').text)
#search recordtyps
recordtypes = obj.findall(namespaces+'recordTypes')
for record in recordtypes:
allrecordt.append(reg_name+'.'+record.find(namespaces+'fullName').text)
#search validationrules
validrules = obj.findall(namespaces+'validationRules')
for valid in validrules:
allvalidr.append(reg_name+'.'+valid.find(namespaces+'fullName').text)
#search contactlayouts
listclayouts = obj.findall(namespaces+'listViews')
for listclayout in listclayouts:
allclayout.append(reg_name+'.'+listclayout.find(namespaces+'fullName').text)
#Custom behavior for custom labels
if mdtype == 'labels':
etype = xml.SubElement(root, 'types')
ename = xml.SubElement(etype, 'name')
ename.text = 'CustomLabel'
emember = xml.SubElement(etype, 'members')
emember.text = str('*')
#add relateed metadata types for objects
if allfields: #CustomField
etype = xml.SubElement(root, 'types')
ename = xml.SubElement(etype, 'name')
ename.text = 'CustomField'
for field in allfields:
emember = xml.SubElement(etype, 'members')
emember.text = str(field)
if allclayouts: #ContactLayouts
etype = xml.SubElement(root, 'types')
ename = xml.SubElement(etype, 'name')
ename.text = 'CompactLayout'
for field in allclayouts:
emember = xml.SubElement(etype, 'members')
emember.text = str(field)
if alllistv: #ListView
etype = xml.SubElement(root, 'types')
ename = xml.SubElement(etype, 'name')
ename.text = 'ListView'
for listv in alllistv:
emember = xml.SubElement(etype, 'members')
emember.text = str(listv)
if allsharingr: #SharingReason
etype = xml.SubElement(root, 'types')
ename = xml.SubElement(etype, 'name')
ename.text = 'SharingReason'
for sharing in allsharingr:
emember = xml.SubElement(etype, 'members')
emember.text = str(sharing)
if allrecordt: #RecordType
etype = xml.SubElement(root, 'types')
ename = xml.SubElement(etype, 'name')
ename.text = 'RecordType'
for record in allrecordt:
emember = xml.SubElement(etype, 'members')
emember.text = str(record)
if allvalidr: #ValidationRule
etype = xml.SubElement(root, 'types')
ename = xml.SubElement(etype, 'name')
ename.text = 'ValidationRule'
for validr in allvalidr:
emember = xml.SubElement(etype, 'members')
emember.text = str(validr)
# add the final xml node package.api_version
eversion = xml.SubElement(root, 'version')
eversion.text = str(version)
#package name
if packagename != None:
efname = xml.SubElement(root, 'fullName')
efname.text = str(packagename)
#pretty format for xml
xmlstring = xml.tostring(root)
reparsed = minidom.parseString(xmlstring)
prettyxml = reparsed.toprettyxml(indent=' ', newl='\n', encoding='UTF-8')
#generate xml file from string
try:
with open(os.path.join(directory, filename), "bw") as xml_file:
xml_file.write(prettyxml)
except IOError:
pass
if __name__ == '__main__':
#custom_package_xml_generator('src', packagename='CMS Test Package')
args = sys.argv[1:]
custom_package_xml_generator(args[0], args[1]) |
# Standard Libraries
import threading
import time
import xml.etree.ElementTree as ElementTree
# Project Libraries.
import config.config as config
# GPIO Definitions for the vibrators (3 count)
Vibrators = [17, 27, 22]
# Thread to handle Rumbling on command. The main thread handling run just loops
# continuously dealing with requests to "Rumble" - this operates up to three
# Vibrators attached to the Pi via PWM.
class RumbleThread (threading.Thread):
def __init__(self, threadID, name, counter):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.counter = counter
# Deals with all the requests to Rumble
def run(self):
#global RumbleQ
ms_duration = 0
print ("Starting " + self.name)
while(1):
# Do the Rumble as required by the Queue.
# Check the Queue - if something in there, then process them in turn.
if config.RumbleQ.empty() == False:
rumble_str = config.RumbleQ.get_nowait()
# Accept XML string that specifies rumble information - each Rumble XML needs to specify the strength
# of each of the three vibrators, which may be 0. Number of ms to rumble for is also required.
# <Rumble><Vib0Str>0-100</Vib0Str><Vib1Str>0-100</Vib1Str><Vib2Str>0-100</Vib2Str><MsDuration>0-10000</MsDuration></Rumble>
print (rumble_str)
# Process the XML rumble request via the Element Tree library.
RumbleElement = ElementTree.fromstring(rumble_str)
# If Rumble - process it.
VibeStr =[0,0,0] # Init
if (RumbleElement.tag == 'Rumble'):
# Read through all the information, extracting the data required to rumble.
for Child in RumbleElement:
# Get Vib Array Strengths
if (Child.tag == 'Vib0Str'):
#print(Child.text)
VibeStr[0] = int(Child.text)
# Get Vib Array Strengths
if (Child.tag == 'Vib1Str'):
#print(Child.text)
VibeStr[1] = int(Child.text)
# Get Vib Array Strengths
if (Child.tag == 'Vib2Str'):
#print(Child.text)
VibeStr[2] = int(Child.text)
# Get the duration to rumble.
if (Child.tag == 'MsDuration'):
#print(Child.text)
ms_duration = int(Child.text)
# Run the requested Vibrations
for i in range(0,3):
# Important to check the range - set to max if exceeded.
PWM = int(int(VibeStr[i])*255/100)
if (PWM > 255):
PWM =255
#print ("***", PWM) # debug - remove later.
config.pi.set_PWM_dutycycle(Vibrators[i], PWM)
# sleep as specified - rumble is ongoing at this point.
time.sleep(float(ms_duration/1000))
# Turn vibrators off.
for i in range(0,3):
config.pi.set_PWM_dutycycle(Vibrators[i], 0)
time.sleep(.3)
else:
print("****Unrecognised command - rumble thread - ignoring - not worth halting")
print ("Exiting " + self.name)
exit() |
from datetime import date
from flask import Flask, render_template, request, Response, g
import xml.etree.ElementTree as ET
import sys
import urllib.request
import urllib.parse
from configparser import ConfigParser
import redis
import os
config = ConfigParser()
app = Flask(__name__)
redis_url = os.getenv('REDIS_URL', 'redis://localhost:6379')
def initdb():
db = redis.from_url(redis_url)
return db
@app.before_request
def before_request():
g.db = initdb()
# check if SID exists in config.ini
if config.read('config.ini') != [] and config.get('IB', 'sid') != '':
sid = config.get('IB', 'sid')
# check if SID exists in environment variable
elif os.environ.get('SID') is not None:
sid = os.environ.get('SID')
# else get guest SID from IB
else:
data = urllib.parse.urlencode(
{'username': 'guest', 'password': '', 'output_mode': 'xml'}).encode('utf-8')
source = urllib.request.urlopen(
url='https://inkbunny.net/api_login.php', data=data)
sid = ET.parse(source).getroot()[0].text
# datadict is flags used for IB search, root is xml builder,
#cachename is key name for redis cache
def ibrss(datadict,root,cachename):
rating = datadict.get('rating')
items = list(root[7])
vitemtitle = []
vitemid = []
vitemlink = []
vitemthumb = []
vitemtype = []
vitemdesc = []
vitemuser = []
vitemrating = []
vitempubdate = []
vitemimg = []
vrsstitle = 'Inkbunny - '
filename = 'IB'
# add contents of elements to their lists
for content in root.iter('title'):
vitemtitle.append(content.text)
for content in root.iter('submission_id'):
vitemid.append(content.text)
for content in root.iter('username'):
vitemuser.append(content.text)
for content in root.iter('type_name'):
vitemtype.append(content.text)
for content in root.iter('rating_id'):
vitemrating.append(content.text)
for content in root.iter('file_url_full'):
vitemimg.append(content.text)
# add link to vitemlink
for i in vitemid:
vitemlink.append('https://inkbunny.net/s/' + i)
# make RSS title. If no query found, revert to 'ALL posts'
# make filename for unique rss files.
flags = ['text', 'keyword_id', 'username',
'user_id', 'favs_user_id', 'pool_id']
for key in flags:
if key in datadict:
if key == 'text':
vrsstitle = vrsstitle + 'Search Query: "' + \
datadict.get(key) + '"; '
elif key == 'keyword_id':
tmpdata = {'sid': sid, 'output_mode': 'xml', 'show_pools': 'yes', 'count_limit': '1',
'submission_ids': root[7][0].find('submission_id').text}
tmpdata = urllib.parse.urlencode(tmpdata).encode('utf-8')
tmpsource = urllib.request.urlopen(
url='https://inkbunny.net/api_submissions.php', data=tmpdata)
tmptree = ET.parse(tmpsource)
tmproot = tmptree.getroot()
for i in tmproot[3][0].find('keywords'):
if i[0].text == datadict.get(key):
vrsstitle = vrsstitle + 'Keyword: ' + i[1].text + '; '
elif key == 'username':
vrsstitle = vrsstitle + 'User: ' + datadict.get(key) + '; '
elif key == 'user_id':
vrsstitle = vrsstitle + 'User: ' + vitemuser[0] + '; '
elif key == 'favs_user_id':
tmpdata = {'sid': sid, 'output_mode': 'xml','count_limit': '1',
'user_id': datadict.get(key)}
tmpdata = urllib.parse.urlencode(keydata).encode('utf-8')
tmpsource = urllib.request.urlopen(
url='https://inkbunny.net/api_search.php', data=tmpdata)
tmptree = ET.parse(tmpsource)
tmproot = tmptree.getroot()
vrsstitle = vrsstitle + 'Favourites by ' + \
tmproot[7][0].find('username').text + '; '
elif key == 'pool_id':
tmpdata = {'sid': sid, 'output_mode': 'xml', 'count_limit':'1',
'show_pools': 'yes', 'submission_ids': '1776649'}
tmpdata = urllib.parse.urlencode(tmpdata).encode('utf-8')
tmpsource = urllib.request.urlopen(
url='https://inkbunny.net/api_submissions.php', data=tmpdata)
tmptree = ET.parse(tmpsource)
tmproot = tmptree.getroot()
for i in tmproot[3][0].find('pools'):
if i[0].text == datadict.get(key):
vrsstitle = vrsstitle + 'Pool: ' + \
i[1].text + ' by ' + vitemuser[0] + '; '
if vrsstitle == 'Inkbunny - ':
vrsstitle = vrsstitle + 'ALL posts'
vrsstitle = vrsstitle.strip('; ')
for i in items:
filenamelength = len(i.findtext('file_name'))
# find thumbnail and append to vitemthumb
if i.find('thumbnail_url_huge_noncustom') != None:
vitemthumb.append(i.find('thumbnail_url_huge_noncustom').text)
elif i.find('thumbnail_url_huge') != None:
vitemthumb.append(i.find('thumbnail_url_huge').text)
# if no thumbnail found, use default thumbnail
elif i.findtext('file_name')[filenamelength - 3:filenamelength] in ['png', 'jpg', 'gif', 'swf', 'flv', 'mp4', 'mp3', 'doc', 'rtf', 'txt']:
if i.findtext('file_name')[filenamelength - 3:filenamelength] in ['png', 'jpg', 'gif']:
vitemthumb.append(
'https://au.ib.metapix.net/images78/overlays/nofile.png')
elif i.findtext('file_name')[filenamelength - 3:filenamelength] in ['doc', 'rtf', 'txt']:
vitemthumb.append(
'https://au.ib.metapix.net/images78/overlays/writing.png')
elif i.findtext('file_name')[filenamelength - 3:filenamelength] in ['flv', 'mp4']:
vitemthumb.append(
'https://au.ib.metapix.net/images78/overlays/video.png')
elif i.findtext('file_name')[filenamelength - 3:filenamelength] in ['swf']:
vitemthumb.append(
'https://au.ib.metapix.net/images78/overlays/shockwave.png')
elif i.findtext('file_name')[filenamelength - 3:filenamelength] in ['mp3']:
vitemthumb.append(
'https://au.ib.metapix.net/images78/overlays/audio.png')
else:
vitemthumb.append(
'https://au.ib.metapix.net/images78/overlays/nofile.png')
# add description to vitemdesc
for i in range(len(items)):
vitemdesc.append('<a href="' + vitemlink[i] + '"><img src="' + vitemthumb[i] + '"></a><p><a href="' + vitemlink[i] + '">Submission</a> | <a href="' + vitemimg[i] + '">Direct</a></p>Type: '
+ vitemtype[i] + ' <br/><br/><a href="http://inkbunny.net/' + vitemuser[i] + '">By ' + vitemuser[i] + '</a>')
# create integer to day string for vitempubdate
def dayint(di):
if di == 0:
return 'Mon'
elif di == 1:
return 'Tue'
elif di == 2:
return 'Wed'
elif di == 3:
return 'Thu'
elif di == 4:
return 'Fri'
elif di == 5:
return 'Sat'
elif di == 6:
return 'Sun'
# create integer to month string for vitempubdate
def monint(mi):
if mi == 1:
return 'Jan'
elif mi == 2:
return 'Feb'
elif mi == 3:
return 'Mar'
elif mi == 4:
return 'Apr'
elif mi == 5:
return 'May'
elif mi == 6:
return 'Jun'
elif mi == 7:
return 'Jul'
elif mi == 8:
return 'Aug'
elif mi == 9:
return 'Sep'
elif mi == 10:
return 'Oct'
elif mi == 11:
return 'Nov'
elif mi == 12:
return 'Dec'
# convert create_datetime to valid datetime for RSS
for i in items:
ibdate = i[4].text
yyyy = ibdate[0:4]
mm = ibdate[5:7]
dd = ibdate[8:10]
hhmmss = ibdate[11:19]
tz = ibdate[len(ibdate) - 3:len(ibdate)] + '00'
day = dayint(date(int(yyyy), int(mm), int(dd)).weekday())
month = monint(int(mm))
pubdate = day + ', ' + dd + ' ' + month + ' ' + yyyy + ' ' + hhmmss + ' ' + tz
vitempubdate.append(pubdate)
# RSS builder
rss = ET.Element('rss', attrib={'version': '2.0'})
channel = ET.Element('channel')
rss.append(channel)
rsstitle = ET.SubElement(channel, 'title')
rsstitle.text = vrsstitle
rsslink = ET.SubElement(channel, 'link')
for i in range(len(items)):
# skip submissions if posts don't fit the rating
if rating == '001':
if vitemrating[i] == '0': continue
elif vitemrating[i] == '1': continue
elif rating == '011':
if vitemrating[i] == '0': continue
elif rating == '110':
if vitemrating[i] == '2': continue
elif rating == '010':
if vitemrating[i] == '0': continue
elif vitemrating[i] == '2': continue
elif rating == '100':
if vitemrating[i] == '1': continue
elif vitemrating[i] == '2': continue
elif rating == '101':
if vitemrating[i] == '1': continue
# elif rating == '111'
item = ET.Element('item')
channel.append(item)
itemtitle = ET.SubElement(item, 'title')
itemtitle.text = vitemtitle[i]
itemlink = ET.SubElement(item, 'link')
itemlink.text = vitemlink[i]
itemdesc = ET.SubElement(item, 'description')
itemdesc.text = vitemdesc[i]
itemguid = ET.SubElement(item, 'guid', isPermaLink='false')
itemguid.text = vitemid[i]
itempubdate = ET.SubElement(item, 'pubDate')
itempubdate.text = vitempubdate[i]
feed = ET.tostring(rss, encoding='utf-8', method='xml')
g.db.set(cachename, feed, ex=86400) # set feed cache to expire in 24 hours unless if updated.
return Response(feed, mimetype='application/rss+xml')
@app.route('/')
def home():
return render_template('home.html')
@app.route('/search')
def searchquery():
invalid = ''
cachename = ''
keylist = list(request.args.keys())
# returns nested list. [('username', [u'ww'])] [0][1][0] = ww
flaglist = list(request.args.lists())
flags = ['rating', 'sid', 'output_mode', 'field_join_type', 'text', 'string_join_type', 'keywords', 'title', 'description', 'md5', 'keyword_id',
'username', 'user_id', 'favs_user_id', 'unread_submissions', 'type', 'sales', 'pool_id', 'orderby', 'dayslimit', 'random', 'scraps']
for i in keylist:
if i not in flags:
invalid += i + ', '
for flag in flaglist:
if 'sid' in flag:
return 'User defined SIDs are restricted for this app.<br>Please define the SID in <code>config.ini</code> or in the <code>SID</code> environment variable'
if request.args.get('rating') == '000':
return 'Oi m8 wth are ya tryna do? Break ma code?'
elif request.args.get('rating') not in ['001','011','010','110','101','100','111',None]:
invalid += 'rating'
if invalid == '': #no errors in url query
# rebuild url query from flags
data = {}
if isinstance(flaglist, tuple): # patch for single arguement
flaglist = [flaglist]
for flag in flaglist:
for value in flag[1]:
cachename = cachename + flag[0] + ':' + value + ','
data[flag[0]] = value
cachename = str(cachename.strip(',')) # make unique name for redis
data.update({'sid': sid, 'output_mode': 'xml'})
datadict = data.copy() #clone dictionarys - datadict for ibrss() and data for API request
if 'rating' in data: del data['rating'] #remove custom flag 'rating' before using in API request
url = 'https://inkbunny.net/api_search.php'
data = urllib.parse.urlencode(data).encode('utf-8')
root = ET.parse(urllib.request.urlopen(url=url, data=data)).getroot()
if root[1].tag == 'error_message':
return 'Error: ' + root[1].text
elif len(list(root[7])) == 0:
return 'No submissions found. Try again when your search has at least one submission.'
try:
if ET.fromstring(g.db.get(cachename))[0][2][3].text == root[7][0].find('submission_id').text:
source = ''
return Response(g.db.get(cachename), mimetype='application/rss+xml') #already exists, return feed from cache
else:
return ibrss(datadict,root,cachename) # latest post doesn't match, update feed
except TypeError: #doesnt exist in cache
return ibrss(datadict,root,cachename)
except IndexError: #unknown error in the flags (bypassed the error checker) last resort.
g.db.delete(cachename)
return ibrss(datadict,root,cachename)
else:
return 'Invalid flag(s): ' + invalid.strip(', ') + '<br>Please check the URL and try again.'
if __name__ == '__main__':
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port)
|
from django.contrib import admin
from .models import Group, Human, OtherPeople, Extra
@admin.register(Group)
class GroupAdmin(admin.ModelAdmin):
list_display = ['name']
@admin.register(Human)
class HumanAdmin(admin.ModelAdmin):
list_display = ['first_name', 'last_name', 'role', 'location', 'email']
@admin.register(OtherPeople)
class OtherPeopleAdmin(admin.ModelAdmin):
pass
@admin.register(Extra)
class ExtraAdmin(admin.ModelAdmin):
list_display = ['field_name', 'value']
|
def remove_all(substring, text):
return print(text.replace(substring, ""))
remove_all("an", "banana")
#== "ba"
remove_all("cyc", "bicycle") \
#== "bile"
remove_all("iss", "Mississippi")
#== "Mippi"
remove_all("eggs", "bicycle")
#== "bicycle" |
import requests as req
import time
keyword=input('請輸入關鍵字:')
t = time.time()
url='https://www.ptt.cc/bbs/NTUcourse/search?q='+keyword
print(url)
while True:
try:
html=req.get(url)
import bs4
root=bs4.BeautifulSoup(html.text,'html.parser')
titles=root.find_all('div',class_='title')
for title in titles:
if title.a.string != None:
print(title.a.string)
titlelinks=root.find('a', string=title.a.string)
print('https://www.ptt.cc'+titlelinks['href'])
nextlink=root.find('a',string='‹ 上頁')
url='https://www.ptt.cc'+ nextlink['href']
except:
break
print('\n搜尋完畢!共花費',time.time()-t,'秒') |
"""Parameters for DQN in MountainCar environment
"""
# Environment name.
ENV = 'MountainCar-v0'
# Q-Network configuration.
HIDDEN_1 = 32
HIDDEN_2 = 64
ACTIVATION = 'relu'
# Training/testing configuration.
NUM_TRAINING_EPISODES = 10000
NUM_TEST_EPISODES = 20
BATCH_SIZE = 32
EPOCHS = 5
# Test interval
INTERVAL = 100
# Number of episodes of getting >= EARLY_STOPPING_REWARD
# after which training is stopped.
EARLY_STOPPING = 100
EARLY_STOPPING_REWARD = -120
# Hyper-parameters
# Learning rate
LR = 0.0001
# Discounting factor
GAMMA = 1
# Exploration config
EPSILON = 1
EPSILON_MIN = 0.1
DECAY_PERIOD = 3000
# Replay memory size
BUFFER_SIZE = 70000
# Model save dir
WEIGHTS = './mountaincar_weights'
|
#Python Functions Book 1 Chapter 8
# Python functions are the same as JavaScript functions, but the syntax is different and white space matters.
# Let's say that again: WHITE SPACE MATTERS.
# Remember how we hounded you about proper formatting in the client side course? Now, you have to do it for real or your code won't work. When writing a function You must indent the contents of the function.
# Look at how the same function is written in the two different syntaxes.
# const createPerson = (firstName, lastName, age, occupation) => {
# return {
# firstName,
# lastName,
# age,
# occupation
# }
# }
# melissa = createPerson("Melissa", "Bell", 25, "Software Developer")
# Function and variable names are snake case instead of camel case
def create_person(first_name, last_name, age, occupation):
return {
"first_name": first_name,
"last_name": last_name,
"age": age,
"occupation": occupation,
}
melissa = create_person("Melissa", "Bell", 25, "Software Developer")
|
from duplicates import get_duplicate_indices
def test_get_duplicate_indices_docstring():
words = ['is', 'it', 'true', 'or', 'is', 'it', 'not']
assert get_duplicate_indices(words) == [0, 1]
def test_get_duplicate_indices_bite_text():
words = ['this', 'is', 'a', 'new', 'bite', 'I', 'hope', 'this',
'bite', 'will', 'teach', 'you', 'something', 'new']
assert get_duplicate_indices(words) == [0, 3, 4]
def test_get_duplicate_indices_another_text():
# keeping it simple with split on space, so lists != lists.
words = ('List comprehensions provide a concise way to create '
'lists. Common applications are to make new lists where '
'each element is the result of some operations applied '
'to each member of another sequence or iterable, or to '
'create a subsequence of those elements that satisfy a '
'certain condition').split()
assert get_duplicate_indices(words) == [3, 6, 7, 17, 22, 32] |
import argparse
import objects
import groups
import services
import access_rules
def add_objects():
print 'inside function objects'
objects.add()
def add_groups():
print 'inside function add_groups'
groups.add()
def add_services():
print 'inside function add_services'
services.add()
def add_access_rules():
print 'inside function add_access_rules'
access_rules.add()
def main():
parser = argparse.ArgumentParser(description='Create dummy rules.')
subparser = parser.add_subparsers(help='Different objects that can be added automatically')
parser_objects = subparser.add_parser('objects', help='STEP 1: Add objects of host and network type')
parser_objects.set_defaults(func=add_objects)
parser_groups = subparser.add_parser('groups', help='STEP 2: Add groups of objects that should be added beforehand')
parser_groups.set_defaults(func=add_groups)
parser_service = subparser.add_parser('services', help='STEP 3: Add services of TCP type')
parser_service.set_defaults(func=add_services)
parser_rules = subparser.add_parser('access-rule', help='STEP 4: Add access rules for services that should be added previously')
parser_rules.set_defaults(func=add_access_rules)
args = parser.parse_args()
args.func()
if __name__ == '__main__':
main()
|
#Anton Danylenko
#SoftDev pd8
#Classwork
#2018-09-20
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def home():
return "Welcome"
coll = [0,1,1,2,3,5,8]
@app.route('/my_foist_template')
def test():
return render_template('template.html',
title = "Title",
collection = coll)
if __name__ == "__main__":
app.debug = True
app.run()
|
n1 = int(input('Digite o primeiro termo: '))
r = int(input('Digite a razão: '))
n = 0
cont = 1
max = 10
c = 1
while(cont != 0):
while c <= max:
n = n1 + r*(c-1)
print(n, end=' -> ')
c += 1
print('...')
cont = int(input('Deseja mais quantos números ? '))
max += cont |
#!/usr/bin/python
import re
# regular expression
mystring="Hello, Wordl"
re.sub('dl$', 'ld', mystring) # substitution
'tea for too'.replace('too', 'two') # string method for simple case
re.findall(pattern='<html>', string=html, flags=re.S)
# available flags
# re.IGNORECASE === re.I
# re.MULTILINE === re.M
# re.DOTALL === re.S dot match all chars, including '\n'
# re.UNICODE === re.U
# re.DEBUT
# re.LOCALE === re.L
|
from Board.Tile import Tile
from Pieces.NullPiece import NullPiece
from Pieces.King import King
from Pieces.Man import Man
class Board:
gameTiles = {}
def __init__(self):
pass
def createBoard(self):
for x in range(64):
self.gameTiles[x] = Tile(x, NullPiece())
for gameTile in self.gameTiles:
if (gameTile%2==1 and gameTile<8 and gameTile>=0):
self.gameTiles[gameTile] = Tile(gameTile, Man("Black",gameTile))
if (gameTile%2==0 and gameTile>=8 and gameTile<16):
self.gameTiles[gameTile] = Tile(gameTile, Man("Black",gameTile))
if (gameTile%2==1 and gameTile>=16 and gameTile<25):
self.gameTiles[gameTile] = Tile(gameTile, Man("Black",gameTile))
# if(x%2==1 and x<8):
# self.gameTiles[x] = Tile(x, Man("Black",x))
# if (x % 2 == 0 and x >= 8 and x < 16):
# self.gameTiles[x] = Tile(x, Man("Black", x))
# if (x % 2 == 1 and x >= 16 and x < 25):
# self.gameTiles[x] = Tile(x, Man("Black", x))
# self.gameTiles[1] = Tile(1, Man("Black", 1))
# self.gameTiles[3] = Tile(3, Man("Black", 3))
# self.gameTiles[5] = Tile(5, King("Black", 5))
# self.gameTiles[7] = Tile(7, Man("Black", 7))
# self.gameTiles[8] = Tile(8, Man("Black", 8))
# self.gameTiles[10] = Tile(10, Man("Black", 10))
# self.gameTiles[12] = Tile(12, Man("Black", 12))
# self.gameTiles[14] = Tile(14, Man("Black", 14))
# self.gameTiles[17] = Tile(17, Man("Black", 17))
# self.gameTiles[19] = Tile(19, Man("Black", 19))
# self.gameTiles[21] = Tile(21, Man("Black", 21))
# self.gameTiles[23] = Tile(23, Man("Black", 23))
for gameTile in self.gameTiles:
if (gameTile%2==0 and gameTile>39 and gameTile<48):
self.gameTiles[gameTile] = Tile(gameTile, Man("White",gameTile))
if (gameTile%2==1 and gameTile>=48 and gameTile<56):
self.gameTiles[gameTile] = Tile(gameTile, Man("White",gameTile))
if (gameTile%2==0 and gameTile>=56 and gameTile<64):
self.gameTiles[gameTile] = Tile(gameTile, Man("White",gameTile))
# for x in range(64):
# if(x%2==0 and x >39 and x<48):
# self.gameTiles[x] = Tile(x, Man("White",x))
# if (x % 2 == 1 and x >= 48 and x < 56):
# self.gameTiles[x] = Tile(x, Man("White", x))
# if (x % 2 == 0 and x >= 56 and x < 64):
# self.gameTiles[x] = Tile(x, Man("White", x))
# self.gameTiles[40] = Tile(40, Man("White", 40))
# self.gameTiles[42] = Tile(42, Man("White", 42))
# self.gameTiles[44] = Tile(44, Man("White", 44))
# self.gameTiles[46] = Tile(46, Man("White", 46))
# self.gameTiles[49] = Tile(49, Man("White", 49))
# self.gameTiles[51] = Tile(51, Man("White", 51))
# self.gameTiles[53] = Tile(53, Man("White", 53))
# self.gameTiles[55] = Tile(55, Man("White", 55))
# self.gameTiles[56] = Tile(56, Man("White", 56))
# self.gameTiles[58] = Tile(58, Man("White", 58))
# self.gameTiles[60] = Tile(60, Man("White", 60))
# self.gameTiles[62] = Tile(62, Man("White", 62))
def printBoard(self):
count = 0
for tiles in range(64):
print('|', end = self.gameTiles[tiles].pieceOnTile.toString())
count += 1
if count == 8:
print('|', end='\n')
count = 0
|
#base_gui.py
from tkinter import *
import time
class App:
'''
Inspired by Ryan Godfrey's YouTube tutorial about tkinter
(https://www.youtube.com/watch?v=v8YQYKDqLME), the App instance
creates a pop up GUI, which will eventually be how the user interacts
with todo-more.
Right now the GUI has a button that adds new text fields, which sends
input back to the class object
Some tasks for the future:
- Fix issue when you kill the program without exiting out of the
GUI, but GUI stays alive forever.
- GUI input communicates with already functional (cli) code.
- Add "new text field" in the same record for multiple entries in
the same row
- Default text in the input text fields (that goes away on
mouseclick) (see: https://mail.python.org/pipermail/tutor/2011-August/085092.html)
- Hide/Remove Input rows from GUI
- GUI to display data already existent in the todo textfile
'''
def __init__(self):
'''
Creates GUI with one button to get started.
'''
self.outputs = False
self.usertext_list = []
self.master = Tk()
self.master.title("todo more")
self.row_count = 0
self.cols = {"submit":[0,"Submit Entry",self.submit_entry],
"text_field":[1,"",None],
"add_new": [2, "Add Another Item",
self.add_fields]}
self.add_button(btype="add_new")
self.master.mainloop()
def add_button(self,btype=None):
'''
To add either "Add New Row" type button, or "Add Another Text
Field" type button. Can be used to add any sort of button to
GUI, including "edit","undo","delete", and so on.
'''
if btype:
self.new_field_button = Button(self.master,
text=self.cols[btype][1],
command=self.cols[btype][2])
self.new_field_button.grid(row=self.row_count,column=\
self.cols[btype][0])
def add_fields(self, ftype="text_field"):
'''
Adds next text field, ready for input.
'''
if ftype:
self.usertext_list.append(StringVar())
#self.usertext.set("Enter some input")
self.field_input = Entry(self.master,
textvariable=self.usertext_list[-1])
self.field_input.grid(row=self.row_count,
column=self.cols[ftype][0])
self.add_button(btype="submit") #what if it's already there?
self.row_count += 1 #this can be unit tested
def submit_entry(self, entry=None):
'''
To show the developer print-function feedback about what is
being ouput from text fields, and to send data to "to do.txt"
Known Issues:
- self.usertext.get() only looks at the most recently created
text field. Each usertext instance needs to be unique,
stored either in a dict or a list.
'''
print("Your input was, '%s,' and someday we'll save it to a file." %
[ut.get() for ut in self.usertext_list])
App() |
#ColorMode Shift
from turtle import *
import random
screen = Screen()
turtle = Turtle()
screen.onscreenclick(turtle.goto)
screen.mainloop()
|
# -*- coding: utf-8 -*-
import math
s=eval(input())
a=(5*s**2)/(4*math.tan(math.pi/5))
print("Area = {:.4f}".format(a)) |
"""
在lesson01里边大改统计了目前已有的排序算法,既然排序算法大改整理完了,那么接下来就是查找算法。
即在数列中寻找目标值。
一半分为无序查找和有序查找,即被查找的数列是有序的还是无序的。
"""
"""
1.顺序查找,不要被这个名字骗了,它实际上是无序查找算法中的一种,从数据表的一端开始,顺序扫描,直到找到自己想要的值为止。
"""
def sequenceSearch(numList, targetNum):
for i in range(len(numList)):
if numList[i] == targetNum:
return i + 1
return False
"""
2.折半查找,时间复杂度为log2n,是有序查找的一种。
"""
def binarySearch(numList, targetNum):
listLength = len(numList)
low = 0
high = listLength - 1
mid = int(low + (high + low) / 2)
while low < high:
if numList[mid] < targetNum:
low = mid
mid = int(low + (high - low) / 2)
elif numList[mid] == targetNum:
return mid + 1
else:
high = mid
mid = int(low + (high + low) / 2)
return False
"""
3.插值查找,这种查找方式其实算是折半查找的改进版,比方说如果在字典中查Apple的话,绝对不会从中间开始查,而是会在开头查找,反过来如果查Zoo的话,也是从后边找,所以mid的定义就可以有操作了
"""
def insertSearch(numList, targetNum):
listLength = len(numList)
low = 0
high = listLength - 1
mid = int(low + (high - low) * (targetNum - numList[low]) / (numList[high] - numList[low]))
while low < high:
if numList[mid] < targetNum:
low = mid
mid = int(low + (high - low) * (targetNum - numList[low]) / (numList[high] - numList[low]))
elif numList[mid] == targetNum:
return mid + 1
else:
high = mid
mid = int(low + (high - low) * (targetNum - numList[low]) / (numList[high] - numList[low]))
return False
"""
4.斐波那契查找,也是二分查找的一种提升,也是在mid的取值上做文章,简单来说,就是用斐波那契数列来将整个数列分开。
其中有一个取巧的地方,mid=low+F(k-1)-1
解释一下为什么这么取值,由斐波那契数列可知,F(k)=F(k-1)+F(k-2),那F(k)-1=(F(k-1)-1)+(F(k-2)-1)+1,所以数组长度只要满足 F(k)-1,就可以将数组分为F(k-1)-1和F(k-2)-1左右两部分,其中mid=low+F(k-1)-1
"""
# 构建斐波那契数列的方法,迭代类型,k为数列下标。
def constructFib(k):
if k < 2:
return 1
else:
return constructFib(k - 2) + constructFib(k - 1)
def fibSearch(numList, targetNum):
listLength = len(numList)
low = 0
high = listLength - 1
k = 0
while listLength > constructFib(k) - 1:
k += 1
mid = low + constructFib(k - 1) - 1
while low < high:
if targetNum == numList[mid]:
return mid
elif targetNum < numList[mid]:
high = mid
# k的值减小了1,那么目前的constructFib(k)-1就是左边部分的长度。
k -= 1
elif targetNum > numList[mid]:
# k的值减小了2,那么目前的constructFib(k)-1就是右边部分的长度。
low = mid
k -= 2
# 更新mid的值。
mid = low + constructFib(k - 1) - 1
# 其实这边或许会有一个疑问,那就是当k<3的时候,constructFib(k-1)的返回值为1,那么mid会一直===low,其实是不会有这种情况的,当mid===low之前,会有一个判断,这边自己想一下长度为2的数组发生的情况就可以了。
return False
"""
5.树表查找,首先是最简单的树表查找,二叉树查找算法。
基本思想,二叉查找树先对待查找的树进行生成树,确保树的左分支的值小于父节点小于右分支的值,在查找之前,首先要针对已有数据构建二叉查找树。
如果对查找二叉树进行中序排列的话,可以得到有序数列。
"""
# 创建二叉查找树(BST,Binary Search Tree)。
class BinarySearchTree():
def __init__(self, value):
self.value = value
self.leftNode = None
self.rightNode = None
# 递归的方式,递归果然不容易理解。
def insertNode(self, value):
if value < self.value:
if self.leftNode:
self.leftNode.insertNode(value)
else:
self.leftNode = BinarySearchTree(value)
elif value > self.value:
if self.rightNode:
self.rightNode.insertNode(value)
else:
self.rightNode = BinarySearchTree(value)
# 然后是查找的方法。
def findNode(self, value):
if self.value == value:
return self
elif self.value < value and self.rightNode:
return self.rightNode.findNode(value)
elif self.value > value and self.leftNode:
return self.leftNode.findNode(value)
return None
def bstSearch(numList, targetNum):
listLenght = len(numList)
indexList = [i for i in range(listLenght)]
indexDic = dict(zip(numList, indexList))
# print(indexDic)
root = None
for _ in indexDic.keys():
if root == None:
root = BinarySearchTree(_)
else:
root.insertNode(_)
return root.findNode(targetNum)
"""
6.B-Tree,B树是一种多路查找树,相对于BST,BST的一个节点只能分成最多两个子节点,这样的话整个树的高度就相对较高,而B树是可以控制最多一个节点可以有
多少个子节点,这样可以有效控制整个树的高度,在查找数据中,树的高低是对效率有很大的影响的。
并且,所有树的叶子节点都在相同的高度,整个系统的稳定性也可以保证。
为满足B树的特点,树的构建过程就会比较复杂了。
"""
# 果然,B树的构建是十分复杂的,在网上找到的代码,仿写一遍吧,看看效果,写一个简单的B树,里边只存放数字。
class Entity(object):
'''数据实体,假设这是一种数据存储的结构'''
def __init__(self, key, value):
self.key = key
self.value = value
# 定义节点,包含了在B树中,所有节点应该有的操作。
class Node():
def __init__(self):
self.parent = None
self.entities = []
self.children = []
# 通过一个值,返回存储这个值的实体。
def find(self, value):
for i in self.entities:
if value == i.key:
return i
# 删除某个节点中的实体。
def delete(self, key):
for i, e in enumerate(self.entities):
if e.key == key:
del self.entities[i]
return (i, e)
# 判断一个节点是否为叶子节点。
def isLeaf(self):
return len(self.children) == 0
# 添加一个实体,实体添加之后,还要按照实体的key值排序。
def addEntity(self, entity):
self.entities.append(entity)
self.entities.sort(key=lambda x: x.key)
# 给本节点添加一个叶子节点。
def addChildNode(self, node):
self.children.append(node)
node.parent = self
self.children.sort(key=lambda x: x.entities[0].key)
class BTree():
def __init__(self, size=6):
self.size = size
self.root = None
self.length = 0
def addNode(self, key, value=None):
# 插入一条数据的话,树的数据长度会+1
self.length += 1
# 如果B树为空,即树中还没有数据,则需要创建根节点,并把数据加入进去。
if not self.root:
self.root = Node()
self.root.addEntity(Entity(key, value))
# 如果不为空,那么就需要利用规则向其中插入数据。
else:
point = self.root
# 在插入实体的时候,要找一个叶子节点插入数据,然后通过数据上浮,即节点分离来做树结构。
while not point.isLeaf():
for i, e in enumerate(point.entities):
if e.key > key:
point = point.children[i]
break
# 证明已经有了该数据,直接在这个index所属的entity上赋新值。
elif e.key == key:
self.length -= 1
e.value = value
return 0
if e.key < key:
point = point.children[-1]
point.addEntity(Entity(key, value))
if len(point.entities) > self.size:
# 如果当前节点的数据值超过了规定的size,那么需要以当前节点为起点,对结构进行调整,即对当前节点进行split。
self.__split(point)
def __split(self, node):
"""
分裂一个节点,规则是将节点的中间值数据移动到父节点,新建一个兄弟右节点,将中间节点的数据移动到新的节点上。
:param node:
:return:
"""
middle = int(len(node.entities) / 2)
movedEntity = node.entities[middle]
newNode = Node()
for e in node.entities[middle + 1:]:
newNode.addEntity(e)
if not node.isLeaf():
for c in node.children[middle + 1:]:
newNode.addChildNode(c)
node.entities = node.entities[:middle]
node.children = node.children[:middle + 1]
parent = node.parent
if parent:
parent.addEntity(movedEntity)
parent.addChildNode(newNode)
if len(parent.entities) > self.size:
self.__split(parent)
else:
self.root = Node()
self.root.addChildNode(node)
self.root.addChildNode(newNode)
self.root.addEntity(movedEntity)
def findValue(self,key):
if self.__findNode(key):
return self.__findNode(key).value
else:
return "该数据不存在"
def __findNode(self, key):
point = self.root
while not point.isLeaf():
for i, e in enumerate(point.entities):
if e.key > key:
point = point.children[i]
break
# 证明已经有了该数据,直接在这个index所属的entity上赋新值。
elif e.key == key:
return point
if e.key < key:
point = point.children[-1]
return point.find(key)
def printTree(self):
tempList = []
tempList.append(self.root)
index = 0
point = tempList[index]
while point.children:
tempList.extend(point.children)
index += 1
point = tempList[index]
for i in tempList:
for j in i.entities:
print(j.key, end=' ')
print(' ', end=' ')
if __name__ == '__main__':
numList = [9, 4, 6, 8, 2, 1, 0, 3836]
t = BTree(4)
t.addNode(20)
t.addNode(40)
t.addNode(60)
t.addNode(70, 'c')
t.addNode(80)
t.addNode(85)
t.addNode(86)
t.addNode(10)
t.addNode(30)
t.addNode(15, 'python')
t.addNode(75, 'java')
# t.printTree()
res = t.findValue(71)
print(res)
|
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import numpy as np
import sys
class CircularBuffer:
def __init__(self):
self.nbElements = 0
self.position = 0
self.list = []
def add(self, value):
self.list.append(value)
self.nbElements += 1
def get(self):
if self.position >= self.nbElements:
self.position = 0
tmp = self.list[self.position]
self.position += 1
return tmp
class FifoQueue:
def __init__(self, size):
self.queue = []
self.size = size
self.actualSize = 0
def pop(self):
return self.queue.pop(0)
def push(self, value):
if self.actualSize > self.size - 1:
self.pop()
self.queue.append( value)
if (self.actualSize <= 999):
self.actualSize += 1
class Signal:
def __init__(self, filepath):
self.filepath = filepath
self.buffer = CircularBuffer()
self.visiblePart = FifoQueue(1000)
file = open(filepath, 'r')
for i, line in enumerate(file):
if i == 1:
self.samplingRate = float(line.split(':=')[-1].strip())
if i == 2:
self.resolution = int(line.split(':=')[-1].strip())
if i > 4:
self.buffer.add(float(line.strip()))
print(self.buffer.list)
signal = Signal("ecg.txt")
print(signal.samplingRate)
# Some api in the chain is translating the keystrokes to this binary string
# so instead of saying: ESCAPE = 27, we use the following.
ESCAPE = b'\x1b'
# Number of the glut window.
window = 0
def init(samplingRate):
# Commands # 1
glEnable(GL_POINT_SMOOTH) # 1.1 If enabled, draw points with proper filtering. Otherwise, draw aliased points.
# Permet d'afficher les points en "lissant" les contours. Les points sont rond au lieu d'être caréé
glEnable(GL_LINE_SMOOTH) # 1.2 If enabled, draw lines with correct filtering. Otherwise, draw aliased lines.
# Cette fonction permet de lisser les lignes.
glEnable(GL_BLEND) # 1.3 If enabled, blend the computed fragment color values with the values in the color buffers.
# .
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA) # 1.4
# Commands # 2
glClearColor(1.0, 1.0, 1.0, 1.0) # 2.1 Clear the windows area with the corresponding bit (RGBA)
gluOrtho2D(0, samplingRate, 1997, 2547) # -0.5 mV to 5 mV
# The function called whenever a key is pressed. Note the use of Python tuples to pass in: (key, x, y)
def keyPressed(*args):
print(args[0])
# If escape is pressed, kill everything.
if args[0] == ESCAPE:
glutDestroyWindow(window)
sys.exit(0)
def plot_func():
# Commands # 3
glClear(GL_COLOR_BUFFER_BIT) # 3.1 Used the value defined précémment par glClearColor pour clear le buffer
glColor3f(0.0, 0.0, 0.0) # 3.2 set the color to be used for the next operation (draw the axis)
glPointSize(3.0) # 3.3 define the size for points
glLineWidth(1.0) # 3.4 define size for line
# Commands # 4 # draw axis
glBegin(GL_LINES)
glVertex2f(-5.0, 0.0) # x
glVertex2f(5.0, 0.0) # x
glVertex2f(0.0, 5.0) # y
glVertex2f(0.0, -5.0) # y
glEnd()
for i in range(0, 1000, 200):
# grid points
for j in range(1997, 2547, 50):
# vertical grid
glBegin(GL_LINES)
glColor3f(1, 0.5, 0.5)
glVertex2fv([i, j])
glVertex2fv([i, j + 50])
glEnd()
# horizontal grid
glBegin(GL_LINES)
glColor3f(1, 0.5, 0.5)
glVertex2fv([i, j])
glVertex2fv([i + 200, j])
glEnd()
for j in range(1997, 2547, 10):
# vertical grid
glBegin(GL_LINES)
glColor3f(1, 0.5, 0.5)
glVertex2fv([i, j])
glVertex2fv([i, j + 50])
glEnd()
# horizontal grid
glBegin(GL_LINES)
glColor3f(1, 0.5, 0.5)
glVertex2fv([i, j])
glVertex2fv([i + 200, j])
glEnd()
for i in range(0, 1000 // 50):
signal.visiblePart.push(signal.buffer.get())
vertices = signal.visiblePart.queue
for i in range(len(vertices) - 1):
glBegin(GL_LINES)
glColor3f(0.8, 0.2, 0.8)
glVertex2fv([i,vertices[i]])
glVertex2fv([i+1,vertices[i + 1]])
glEnd()
glutSwapBuffers()
def timerr(value):
glutPostRedisplay()
glutTimerFunc(1000 // 50, timerr, 0)
def main():
global window
glutInit(())
glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB)
glutInitWindowPosition(50, 50)
glutInitWindowSize(600, 600)
glutCreateWindow(b"ECG")
glutDisplayFunc(plot_func)
# When we are doing nothing, redraw the scene.
######glutIdleFunc(plot_func)
# Register the function called when the keyboard is pressed.
glutKeyboardFunc(keyPressed)
# Initialization
init(signal.samplingRate)
# Main drawing loop
glutTimerFunc(0, timerr, 0)
glutMainLoop()
print("Hit ESC key to quit.")
main()
|
import os
import math
import numpy as np
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.keras.layers import Dense, LSTM, Dropout
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error, mean_absolute_error
maotai = pd.read_csv("./SH600519.csv")
train_set = maotai.iloc[:2426 - 300, 2:3].values
test_set = maotai.iloc[2426 - 300:, 2:3].values
print(train_set)
sc = MinMaxScaler(feature_range=(0, 1))
train_set_scale = sc.fit_transform(train_set)
test_set_sacle = sc.transform(test_set)
x_train, y_train = [], []
x_test, y_test = [], []
for i in range(60, len(train_set_scale)):
x_train.append(train_set_scale[i - 60:i, 0])
y_train.append(train_set_scale[i, 0])
print(x_train)
print(y_train)
np.random.seed(7)
np.random.shuffle(x_train)
np.random.seed(7)
np.random.shuffle(y_train)
tf.random.set_seed(7)
x_train, y_train = np.array(x_train), np.array(y_train)
x_train = np.reshape(x_train, (x_train.shape[0], 60, 1))
for i in range(60, len(test_set_sacle)):
x_test.append(test_set_sacle[i - 60:i, 0])
y_test.append(test_set_sacle[i, 0])
x_test, y_test = np.array(x_test), np.array(y_test)
x_test = np.reshape(x_test, (x_test.shape[0], 60, 1))
model = tf.keras.models.Sequential([
LSTM(80, return_sequences=True),
Dropout(0.2),
LSTM(100),
Dropout(0.2),
Dense(1)
])
model.compile(optimizer=tf.keras.optimizers.Adam(0.001),
loss="mean_squared_error")
model_save_path = './maotai_LSTM_checkpoint/LSTM_stock.ckpt'
if os.path.exists(model_save_path + '.index'):
print("----------load model------------")
model.load_weights(model_save_path)
cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=model_save_path,
save_weights_only=True,
save_best_only=True,
monitor='val_loss')
history = model.fit(x_train, y_train, batch_size=32, epochs=50, validation_data=(x_test, y_test),
validation_freq=1, callbacks=[cp_callback])
model.summary()
with open("./lstm_maotai_weights.txt", "w") as f:
for v in model.trainable_variables:
f.write(str(v.name) + '\n')
f.write(str(v.shape) + '\n')
f.write(str(v.numpy()) + '\n')
loss = history.history['loss']
val_loss = history.history['val_loss']
plt.plot(loss, label='training loss')
plt.plot(val_loss, label="validation loss")
plt.title("training and validation loss")
plt.legend()
plt.show()
predicted_price = sc.inverse_transform(model.predict(x_test))
real_price = sc.inverse_transform(test_set_sacle[60:])
plt.plot(real_price, color='red', label='maotai stock price')
plt.plot(predicted_price, color='blue', label='predicted maotai price')
plt.title("maotai stock predicted price")
plt.xlabel('time')
plt.ylabel('price')
plt.legend()
plt.show()
mse = mean_squared_error(predicted_price, real_price)
rmse = math.sqrt(mse)
mae = mean_absolute_error(predicted_price, real_price)
print(mse)
print(rmse)
print(mae) |
from django.urls import path
from . import views
urlpatterns = [
path('first/', views.index),
path('temp/',views.home_temp)
] |
# SPACE PYTHON
import random
balance=100
def startgame():
Start_Game = input ("Would you like to Start Game? ")
if Start_Game[0].lower() == "y":
print ("Great")
greet()
elif Start_Game[0][0].lower() == "n":
endgame()
else:
print("Please write yes or no")
endgame()
def greet():
name = input("Hello! What is your name? ")
print(f"hi {name}! you are given £100 to spend how you like")
print("You are given a various life decisions, try not to lose you money!")
print("Your bank pin is 3333 to get your money out")
lotto(100)
def endgame():
print('GAME OVER')
userEnd=input("play again? ")
if userEnd[0].lower() == 'y':
startgame()
else:
print('fine, dont play')
def cashmachine(pin,amount,balance):
if pin==3333:
balance= balance-amount
print(f"(taking £{amount} from your account. Your new balance is £{balance})")
return balance
else:
print('wrong pin')
lotto(balance)
def lotto(balance):
lottery = input("Do you want to spend £5 of your money on a lottery game? ")
while balance>0:
if lottery[0].lower()=="y":
pin = int(input("Please enter your pin "))
balance=cashmachine(pin, 5, balance)
play_lotto(balance)
elif lottery[0].lower()=="n":
balance=cashmachine(3333, 0, balance)
dosomething(balance)
else:
print("Please write yes or no next time ")
lotto(balance)
def play_lotto(balance):
print ("Your numbers for today are: ")
lottery_numbers = list(range(1,31))
random.shuffle(lottery_numbers)
three_nums = lottery_numbers[:3]
print(three_nums)
gennums(three_nums,balance)
def gennums(three_nums,balance):
result = str(input ("Would you like today's results? "))
if result[0].lower() == ("y"):
print ("Today's winning numbers are:")
Correct_numbers = list (range(1,31))
random.shuffle(Correct_numbers)
three_winning = Correct_numbers[:3]
print (three_winning)
counter = 0
for number in three_nums:
if number in three_winning:
counter += 1
print ("You matched "+str(counter) +" numbers!")
# prize = 0
if counter==0:
prize=0
print ("You win £ "+str(prize))
balance=cashmachine(3333,-prize, balance)
lotto(balance)
elif counter==1:
prize=100
print ("You win £ "+str(prize))
balance=cashmachine(3333,-prize, balance)
afterlotto(balance)
elif counter==2:
prize=400
print ("You win £ "+str(prize))
balance=cashmachine(3333,-prize, balance)
afterlotto(balance)
password_puzzle()
elif counter==3:
prize=1000
print ("You win £ "+str(prize))
balance=cashmachine(3333,-prize, balance)
gamecomplete()
else:
print ("Please enter 'yes/no'")
gennums(three_nums,balance)
def afterlotto(balance):
options= input ("buy another one or do something with the money? a/d ")
if options[0].lower()=="a":
play_lotto(balance)
elif options[0].lower()=="d":
dosomething(balance)
else:
print("Either write 'a' or 'd'")
afterlotto()
def pet_snake():
while True:
buy_snake = input ("You can afford a regular pet snake! To buy enter Y ")
if not buy_snake[0].lower() == ("y"):
print ("Please enter Y ")
continue
else:
print ("Excellent. Which colour? RED or GREEN ")
break
colour_snake = input()
while colour_snake != ("red") and colour_snake != ("green"):
print ("please choose red or green")
colour_snake = input()
if colour_snake == ("green"):
print ("You have a green snake.")
password_puzzle()
if colour_snake == ("red"):
print ("You have a red snake. it bites you!")
endgame()
def gamecomplete():
print("CONGRATULATIONS, you completed the game!!")
def dosomething(balance):
charity = int(input("How much of your money will you give to charity? £"))
if charity==0:
print("karma gets you")
cashmachine(3333,20,balance)
boat_seat=input("man or lady? Type M or L ")
if boat_seat[0].lower()==("m"):
print("man")
code_puzzle()
elif boat_seat[0].lower()==("l"):
input("lady")
riddle()
elif charity<5:
print("thats not much, but something")
cashmachine(3333,charity,balance)
friend = input ("Do you want to meet you friends?")
if friend[0].lower()=="y":
print("yes")
print("You got coronavirus for being too social! Go home!")
endgame()
elif friend[0].lower()=="n":
print("Being unsocial huh? ")
code_puzzle()
elif charity<70:
print("thank you")
cashmachine(3333,charity,balance)
elif charity>=100:
print("holy fk")
cashmachine(3333,charity,balance)
else:
print("put a number in zero or above")
dosomething()
def password_puzzle():
print("the snake asks you a question")
puzzle = input("Combine two strings to make the password: 'Which is the most common programming language' + 'the lowest common multiple of 5 and 7? ")
if puzzle.lower() == ("python35"):
print("Correct answer. You getthe snake. On the way back to the boat the guide falls down abottomless pit. You return to the boat nearby")
gamecomplete()
else:
endgame()
def riddle():
print("solve this riddle...")
riddle=input("When was the last year that was the same upside down? ")
if riddle==("1961"):
print("""Woohoo! """)
gamecomplete()
else:
print("wrong")
lotto()
def code_puzzle():
input("Here is the puzzle- # identify if a word has the same start and end letter! press enter to continue")
print("")
print("word = 'randomword'")
print("if ...")
print(" print('Same start and end letter')")
print("else:")
print(" print('different start and end letter')")
print("")
answer=input("what should replace the '...' (exclude spaces!): ")
if answer== "word[0]==word[-1]:":
print("CORRECT!!")
gamecomplete()
else:
print("WRONG!")
lotto()
startgame() |
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from django import forms
from .models import *
# Create your forms here.
class Product(forms.ModelForm):
class Meta:
model = Product
fields = ["img", "name", "price", "desc", "url"]
#
class NewUserForm(UserCreationForm):
email = forms.EmailField(max_length=50, required=True)
username = forms.CharField(max_length=30, required=True)
password = forms.CharField(required=True, max_length=50)
first_name = forms.CharField(max_length=30, required=True)
class Meta:
model = User
fields = ("username", "email", "password", "first_name", "last_name")
def save(self, commit=True):
user = super(NewUserForm, self).save(commit=False)
user.email = self.cleaned_data['email']
user.username = self.cleaned_data['username']
user.password = self.cleaned_data['password']
user.first_name = self.cleaned_data['first_name']
user.last_name = self.cleaned_data['last_name']
if commit:
user.save()
return user |
from app import db
from werkzeug import generate_password_hash
class Base(db.Model):
__abstract__ = True
id = db.Column(db.Integer, primary_key=True)
date_created = db.Column(db.DateTime, default=db.func.current_timestamp())
date_modified = db.Column(
db.DateTime,
default=db.func.current_timestamp(),
onupdate=db.func.current_timestamp())
class User(Base):
__tablename__ = 'auth_user'
# User Name
name = db.Column(db.String(20), nullable=False)
# Email
email = db.Column(db.String(128), nullable=False)
# Password
password = db.Column(db.String(128), nullable=False)
def __init__(self, name, email, password):
self.name = name.lower()
self.email = email.lower()
self.password = generate_password_hash(password)
def __repr__(self):
return f'[User {self.name}]'
def remove(self):
db.session.delete(self)
db.session.commit()
return self
def save(self):
db.session.add(self)
db.session.commit()
return self |
#!/usr/bin/env python
'''
Utility to help decide which data each axis corresponds to based on a variety
of axis deciders.
'''
__author__ = 'Aditya Viswanathan'
__email__ = 'aditya@adityaviswanathan.com'
from axis import Axis
import csv
from date_axis_decider import DateAxisDecider
from title_axis_decider import TitleAxisDecider
class AxisDecision(object):
def __init__(self, file_name):
self.file_name = file_name
self.row_values = []
self.col_values = []
self.index_values()
self.date_axis = Axis.NONE
self.date_index = -1
self.title_axis = Axis.NONE
self.title_index = -1
def index_values(self):
# Underlying data indexed by either row or col.
with open(self.file_name) as csv_file:
for row_index, row in enumerate(csv.reader(csv_file, delimiter=',')):
self.row_values.append([])
for col_index, col in enumerate(row):
self.row_values[row_index].append(col)
if len(self.col_values)-1 < col_index:
self.col_values.append([])
self.col_values[col_index].append(col)
@staticmethod
def find_axis(row_decider, col_decider):
if row_decider.is_axis() and col_decider.is_axis():
row_index = row_decider.top_indexes[0]
col_index = col_decider.top_indexes[0]
if (row_decider.entries_scores[row_index] >
col_decider.entries_scores[col_index]):
return (Axis.ROW, row_decider)
elif row_decider.is_axis():
return (Axis.ROW, row_decider)
elif col_decider.is_axis():
return (Axis.COL, col_decider)
return (Axis.NONE, None)
def find_date_axis(self):
row_date_decider = DateAxisDecider(self.row_values)
col_date_decider = DateAxisDecider(self.col_values)
return AxisDecision.find_axis(row_date_decider, col_date_decider)
def find_title_axis(self):
row_title_decider = TitleAxisDecider(self.row_values)
col_title_decider = TitleAxisDecider(self.col_values)
return AxisDecision.find_axis(
row_title_decider, col_title_decider)
def decide(self):
self.date_axis, date_axis_metadata = self.find_date_axis()
self.title_axis, title_axis_metadata = self.find_title_axis()
if self.date_axis is Axis.NONE:
raise Exception('Unable to identify date axis of report file {csv}'
.format(csv=self.file_name))
self.date_index = date_axis_metadata.top_indexes[0]
if self.title_axis is Axis.NONE:
# Unable to auto-assign title axis, resorting to picking
# axis opposite to date axis.
self.title_axis = Axis.opposite(self.date_axis)
self.title_index = 0
else:
self.title_index = title_axis_metadata.top_indexes[0]
# HACK: ensure title axis is not the same as date axis and if so,
# prefer DateAxisDecider and pick the title axis to be the
# opposite axis.
if self.title_axis is self.date_axis:
self.title_axis = Axis.opposite(self.date_axis)
|
"""
CCT 建模优化代码
A19 直线二极磁铁 LocalUniformMagnet
作者:赵润晓
日期:2021年5月2日
"""
from os import error, path
import sys
sys.path.append(path.dirname(path.abspath(path.dirname(__file__))))
from cctpy import *
lcs = LocalCoordinateSystem(
location=P3(1,2,3),
x_direction=P3.y_direct(),
z_direction=P3.x_direct()
)
lum = LocalUniformMagnet(
local_coordinate_system=lcs,
length=0.5,
aperture_radius=0.05,
magnetic_field=50
)
print(lum.magnetic_field_at(P3(1,2,3)+P3.x_direct(0.001)))
print(lum.magnetic_field_at(P3(1,2,3)-P3.x_direct(0.001)))
print(lum.magnetic_field_at(P3(1,2,3)+P3.x_direct(0.001)+P3.y_direct(0.001)))
print(lum.magnetic_field_at(P3(1,2,3)+P3.x_direct(0.001)+P3.y_direct(0.5)))
print(lum.magnetic_field_at(P3(1,2,3)+P3.x_direct(0.001)-P3.y_direct(0.5))) |
"""
pg_activity
author: Julien Tachoires <julmon@gmail.com>
license: PostgreSQL License
Copyright (c) 2012 - 2016, Julien Tachoires
Permission to use, copy, modify, and distribute this software and its
documentation for any purpose, without fee, and without a written
agreement is hereby granted, provided that the above copyright notice
and this paragraph and the following two paragraphs appear in all copies.
IN NO EVENT SHALL JULIEN TACHOIRES BE LIABLE TO ANY PARTY FOR DIRECT,
INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST
PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION,
EVEN IF JULIEN TACHOIRES HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
JULIEN TACHOIRES SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT
NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS"
BASIS, AND JULIEN TACHOIRES HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE,
SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
"""
class Process():
"""
Simple class for process management.
"""
def __init__(self, pid = None, database = None, user = None, \
client = None, cpu = None, mem = None, read = None, write = None, \
state = None, query = None, duration = None, wait = None, extras = None):
self.pid = pid
self.database = database
self.user = user
self.client = client
self.cpu = cpu
self.mem = mem
self.read = read
self.write = write
self.state = state
self.query = query
self.duration = duration
self.wait = wait
self.extras = extras
def set_extra(self, key, value):
"""
Set a pair of key/value in extras dict
"""
self.extras[key] = value
def get_extra(self, key):
"""
Get a value from extras dict
"""
if self.extras is not None and key in self.extras:
return self.extras[key]
|
# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.engine.console import Console
from pants.engine.goal import CurrentExecutingGoals, Goal, GoalSubsystem, LineOriented
from pants.engine.rules import collect_rules, goal_rule
from pants.option.scope import ScopeInfo
from pants.testutil.option_util import create_goal_subsystem, create_options_bootstrapper
from pants.testutil.rule_runner import RuleRunner, mock_console, run_rule_with_mocks
def test_line_oriented_goal() -> None:
class OutputtingGoalOptions(LineOriented, GoalSubsystem):
name = "dummy"
class OutputtingGoal(Goal):
subsystem_cls = OutputtingGoalOptions
environment_behavior = Goal.EnvironmentBehavior.LOCAL_ONLY
@goal_rule
def output_rule(console: Console, options: OutputtingGoalOptions) -> OutputtingGoal:
with options.output(console) as write_stdout:
write_stdout("output...")
with options.line_oriented(console) as print_stdout:
print_stdout("line oriented")
return OutputtingGoal(0)
with mock_console(create_options_bootstrapper()) as (console, stdio_reader):
result: OutputtingGoal = run_rule_with_mocks(
output_rule,
rule_args=[
console,
create_goal_subsystem(OutputtingGoalOptions, sep="\\n", output_file=None),
],
)
assert result.exit_code == 0
assert stdio_reader.get_stdout() == "output...line oriented\n"
def test_goal_scope_flag() -> None:
class DummyGoal(GoalSubsystem):
name = "dummy"
dummy = create_goal_subsystem(DummyGoal)
assert dummy.get_scope_info() == ScopeInfo(scope="dummy", subsystem_cls=DummyGoal, is_goal=True)
def test_current_executing_goals() -> None:
class OutputtingGoalOptions(LineOriented, GoalSubsystem):
name = "dummy"
help = "dummy help"
class OutputtingGoal(Goal):
subsystem_cls = OutputtingGoalOptions
environment_behavior = Goal.EnvironmentBehavior.LOCAL_ONLY
@goal_rule
def output_rule(
console: Console,
options: OutputtingGoalOptions,
current_executing_goals: CurrentExecutingGoals,
) -> OutputtingGoal:
with options.output(console) as write_stdout:
write_stdout(f"current goals are: {', '.join(current_executing_goals.executing)}")
return OutputtingGoal(0)
rule_runner = RuleRunner(
rules=collect_rules(locals()),
)
result = rule_runner.run_goal_rule(OutputtingGoal)
assert result.exit_code == 0
assert result.stdout == "current goals are: dummy"
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#import sys
#sys.path.remove('/opt/ros/melodic/lib/python2.7/dist-packages')
import glob
import pickle
import cv2
import rospy
import numpy as np
from sensor_msgs.msg import Image
# OpenCV를 ROS에서 사용할 수 있게 해주는 모듈
from cv_bridge import CvBridge
bridge = CvBridge()
# 이미지를 담을 빈 배열 생성
#cv_image = np.empty(shape=[0])
rospy.init_node('image_pulisher', anonymous=True)
camera_pub = rospy.Publisher('/TFF/camera_topic', Image, queue_size=10)
def get_cameramat_dist(filename):
f = open(filename, 'rb')
mat, dist, rvecs, tvecs = pickle.load(f)
f.close()
#print("camera matrix")
#print(mat)
#print("distortion coeff")
#print(dist)
return mat,dist
def main():
mat, dist = get_cameramat_dist("/home/nohs/catkin_ws/src/enet_ros/src/cam_calib.pkl")
cap = cv2.VideoCapture("v4l2src device=/dev/video0 ! video/x-raw, width=640, height=480, format=(string)YUY2,framerate=30/1 ! videoconvert ! video/x-raw,width=640,height=480,format=BGR ! appsink")
ret, frame = cap.read()
width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
print("width:",width,"height:",height)
#fourcc = cv2.VideoWriter_fourcc(*'DIVX')
#fps = cap.get(cv2.CAP_PROP_FPS)
frame = cv2.flip(frame, -1)
rsz = cv2.resize(frame, dsize=(640,480))
gray = cv2.cvtColor(rsz, cv2.COLOR_BGR2GRAY)
h, w = gray.shape[:2]
newcameramtx, roi=cv2.getOptimalNewCameraMatrix(mat,dist,(w,h),0,(w,h))
#out = cv2.VideoWriter('out.avi', fourcc, fps, (int(480), int(360)))
while(True):
ret, frame = cap.read()
frame = cv2.flip(frame,-1)
#print(frame.shape)
rsz = cv2.resize(frame, dsize=(640,480))
gray = rsz
# undistort
mapx,mapy = cv2.initUndistortRectifyMap(mat,dist,None,newcameramtx,(w,h),5)
res = cv2.remap(gray,mapx,mapy,cv2.INTER_LINEAR)
# crop the image
x,y,w,h = roi
res = res[y:y+h, x:x+w]
res = cv2.resize(res,(480,360))
#out.write(res)
#cv2.imshow('res',res)
cv_image = bridge.cv2_to_imgmsg(res,'bgr8')
camera_pub.publish(cv_image)
if cv2.waitKey(10) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/11/4 10:10
# @Author : wildkid1024
# @Site :
# @File : utils.py
# @Software: PyCharm
import os
import numpy as np
import torch
import torchvision
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from torch.utils.data.sampler import SubsetRandomSampler
from libs.config import Conf
from dataset.gtrsb_db import GTSRB
def load_data(dataset_name, root_dir='./dataset/', n_worker=0, batch_size=4096, transform=transforms.ToTensor()):
dataset_dict = {'CIFAR10': datasets.CIFAR10,
'CIFAR100':datasets.CIFAR100,
'FMNIST': datasets.FashionMNIST,
'MNIST': datasets.MNIST,
'SVHN': datasets.SVHN,
}
data_dir = root_dir + dataset_name.upper()
# data_dir = root_dir
if dataset_name.upper() == 'IMAGENET':
data_dir = root_dir + 'ImageNet/'
traindir = os.path.join(data_dir, 'train')
testdir = os.path.join(data_dir, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
transformer = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])
train_dataset = datasets.ImageFolder(traindir, transform=transformer)
test_dataset = datasets.ImageFolder(testdir, transform=transformer)
elif dataset_name.upper() == 'SVHN':
train_dataset = dataset_dict[dataset_name](root=data_dir, split='train', transform=transform)
test_dataset = dataset_dict[dataset_name](root=data_dir, split='test', transform=transform)
elif dataset_name.upper() == 'GTSRB':
traindir = os.path.join(data_dir, 'GTSRB/Final_Training/Images/')
valdir = os.path.join(data_dir, 'GTSRB/Final_Test/')
transform = transforms.Compose([
transforms.Scale(48),
transforms.CenterCrop((48, 48)),
transforms.ToTensor()
])
train_dataset = GTSRB(
root=traindir,
train=True,
transform=transform
)
test_dataset = GTSRB(
root=valdir,
train=False,
transform=transform
)
else:
dataset_name = dataset_name.upper()
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
train_dataset = dataset_dict[dataset_name](
root=data_dir, train=True, transform=transform_train, download=True)
test_dataset = dataset_dict[dataset_name](
root=data_dir, train=False, transform=transform_test, download=True)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=n_worker, pin_memory=True)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False, num_workers=n_worker, pin_memory=True)
return train_loader, test_loader
def get_split_train_dataset(dataset_name, batch_size, n_worker, val_size, train_size=None, random_seed=1,
data_root='data/imagenet', for_inception=False, shuffle=True):
if shuffle:
index_sampler = SubsetRandomSampler
else:
# use the same order
class SubsetSequentialSampler(SubsetRandomSampler):
def __iter__(self):
return (self.indices[i] for i in torch.arange(len(self.indices)).int())
index_sampler = SubsetSequentialSampler
print('==> Preparing data..')
if dataset_name == 'imagenet':
data_root = data_root + 'ImageNet/'
traindir = os.path.join(data_root, 'train')
valdir = os.path.join(data_root, 'val')
assert os.path.exists(traindir), traindir + ' not found'
assert os.path.exists(valdir), valdir + ' not found'
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
input_size = 299 if for_inception else 224
train_transform = transforms.Compose([
transforms.RandomResizedCrop(input_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
test_transform = transforms.Compose([
transforms.Resize(int(input_size/0.875)),
transforms.CenterCrop(input_size),
transforms.ToTensor(),
normalize,
])
trainset = datasets.ImageFolder(traindir, train_transform)
valset = datasets.ImageFolder(traindir, test_transform)
n_train = len(trainset)
indices = list(range(n_train))
# shuffle the indices
np.random.seed(random_seed)
np.random.shuffle(indices)
assert val_size < n_train, 'val size should less than n_train'
train_idx, val_idx = indices[val_size:], indices[:val_size]
if train_size:
train_idx = train_idx[:train_size]
print('Data: train: {}, val: {}'.format(len(train_idx), len(val_idx)))
train_sampler = index_sampler(train_idx)
val_sampler = index_sampler(val_idx)
train_loader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, sampler=train_sampler,
num_workers=n_worker, pin_memory=True)
val_loader = torch.utils.data.DataLoader(valset, batch_size=batch_size, sampler=val_sampler,
num_workers=n_worker, pin_memory=True)
n_class = 1000
elif dataset_name == 'gtsrb':
traindir = os.path.join(data_root, 'GTSRB/Final_Training/Images/')
valdir = os.path.join(data_root, 'val')
transform = transforms.Compose([
transforms.Scale(48),
transforms.CenterCrop((48, 48)),
transforms.ToTensor()
])
trainset = GTSRB(
root=traindir,
train=True,
transform=transform
)
N = int(len(trainset) * 0.7)
train_db, val_db = torch.utils.data.random_split(trainset, [N, len(trainset)-N])
train_loader = torch.utils.data.DataLoader(
train_db,
batch_size=batch_size,
shuffle=True,
)
val_loader = torch.utils.data.DataLoader(
val_db,
batch_size=batch_size,
shuffle=True,
)
n_class = 43
else:
raise NotImplementedError
return train_loader, val_loader, n_class
|
#!/usr/bin/python3
import socket
import sys
import _thread
import queue
from array import array
import time
# memory allocation
dataFromClient = queue.Queue()
dataToSendClient = queue.Queue()
# network configuration
serverAddress = ('localhost', 4211)
clientAddress = ('localhost', 4212)
ProtocolID = 0x7A28
def send_data_to_client():
print("1")
while True:
data_item = dataToSendClient.get()
#clientConnection.sendto(data_item, clientAddress)
print("Sent data to client...")
def receive_data_from_client():
print("2")
client_connection = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
client_connection.bind(serverAddress)
while True:
data_item, address = client_connection.recvfrom(2048)
dataFromClient.put(data_item)
print("Received data from client...")
def verify_client_data():
print("3")
while True:
data_item = dataFromClient.get()
if data_item[1] == 0x7A and data_item[0] == 0x28:
print("valid data")
else:
print("INVALID data")
def main():
print(sys.version)
print("Starting up server...")
_thread.start_new_thread(send_data_to_client, ())
_thread.start_new_thread(receive_data_from_client, ())
_thread.start_new_thread(verify_client_data, ())
# keep main thread running for subthreads to not exit
while 1:
pass
main()
|
from Jumpscale import j
THREEBOT_DOMAIN = j.core.myenv.config.get("THREEBOT_DOMAIN")
EXPLORER_DOMAIN = j.core.myenv.config.get("EXPLORER_ADDR")
MASTERIP = "192.168.99.254"
class gateway(j.baseclasses.threebot_actor):
# COREDNS redis backend
def _init(self, **kwargs):
# QUESTION: should it work against local database or against remote one? as it's generic enough
if j.sal.nettools.waitConnectionTest(MASTERIP, port=6378, timeout=1):
redisclient = j.clients.redis.get(MASTERIP, port=6378)
self._gateway = j.tools.tf_gateway.get(redisclient)
self.explorer = j.clients.gedis.get(
name="phonebook_explorer", host=EXPLORER_DOMAIN, port=8901, package_name="tfgrid.phonebook"
)
else:
self._log_error(f"CONNECTION ERROR TO {MASTERIP}")
@j.baseclasses.actor_method
def domain_list(self, schema_out=None, user_session=None):
return self._gateway.domain_list()
@j.baseclasses.actor_method
def domain_exists(self, domain, schema_out=None, user_session=None):
"""
```in
domain = (S)
```
"""
return self._gateway.domain_exists(domain)
@j.baseclasses.actor_method
def domain_dump(self, domain, schema_out=None, user_session=None):
"""
```in
domain = (S)
```
"""
return self._gateway.domain_dump(domain)
@j.baseclasses.actor_method
def subdomain_get(self, domain, subdomain):
"""
```in
domain = (S)
subdomain = (S)
```
"""
return self._gateway.subdomain_get(domain, subdomain)
@j.baseclasses.actor_method
def domain_register_a(self, name, domain, record_ip, schema_out=None, user_session=None):
"""
```in
name = (S)
domain = (S)
record_ip = (S)
```
"""
return self._gateway.domain_register_a(name, domain, record_ip)
@j.baseclasses.actor_method
def domain_register_aaaa(self, name, domain, record_ip, schema_out=None, user_session=None):
"""
```in
name = (S)
domain = (S)
record_ip = (S)
```
"""
return self._gateway.domain_register_aaaa(name, domain, record_ip)
@j.baseclasses.actor_method
def domain_register_cname(self, name, domain, host, schema_out=None, user_session=None):
"""
```in
name = (S)
domain = (S)
host = (S)
```
"""
return self._gateway.domain_register_cname(name, domain, host)
@j.baseclasses.actor_method
def domain_register_ns(self, name, domain, host, schema_out=None, user_session=None):
"""
```in
name = (S)
domain = (S)
host = (S)
```
register NS record
:param name: name
:type name: str
:param domain: str, defaults to "bots.grid.tf."
:type domain: str, optional
:param host: host
:type host: str
"""
return self._gateway.domain_register_ns(name, domain, host)
@j.baseclasses.actor_method
def domain_register_txt(self, name, domain, text, schema_out=None, user_session=None):
"""
```in
name = (S)
domain = (S)
text = (S)
```
"""
return self._gateway.domain_register_txt(name, domain, text)
@j.baseclasses.actor_method
def domain_register_mx(self, name, domain, host, priority=10, schema_out=None, user_session=None):
"""
```in
name = (S)
domain = (S)
host = (S)
priority = 10
```
"""
return self._gateway.domain_register_mx(name, domain, host, priority)
@j.baseclasses.actor_method
def domain_register_srv(
self, name, domain, host, port, priority=10, weight=100, schema_out=None, user_session=None
):
"""
```in
name = (S)
domain = (S)
host = (S)
port = (I)
priority = 10
weight = 100
```
"""
return self._gateway.domain_register_srv(name, domain, host, port, priority, weight)
@j.baseclasses.actor_method
def domain_unregister_a(self, name, domain, record_ip, schema_out=None, user_session=None):
"""
```in
name = (S)
domain = (S)
record_ip = (S)
```
"""
return self._gateway.domain_unregister_a(name, domain, record_ip)
@j.baseclasses.actor_method
def domain_unregister_aaaa(self, name, domain, record_ip, schema_out=None, user_session=None):
"""
```in
name = (S)
domain = (S)
record_ip = (S)
```
"""
return self._gateway.domain_unregister_aaaa(name, domain, record_ip)
@j.baseclasses.actor_method
def domain_unregister_cname(self, name, domain, host, schema_out=None, user_session=None):
"""
```in
name = (S)
domain = (S)
host = (S)
```
"""
return self._gateway.domain_unregister_cname(name, domain, host)
@j.baseclasses.actor_method
def domain_unregister_ns(self, name, domain, host, schema_out=None, user_session=None):
"""
```in
name = (S)
domain = (S)
host = (S)
```
unregister NS record
:param name: name
:type name: str
:param domain: str, defaults to "bots.grid.tf."
:type domain: str, optional
:param host: host
:type host: str
"""
return self._gateway.domain_unregister_ns(name, domain, host)
@j.baseclasses.actor_method
def domain_unregister_txt(self, name, domain, text, schema_out=None, user_session=None):
"""
```in
name = (S)
domain = (S)
text = (S)
```
"""
return self._gateway.domain_unregister_txt(name, domain, text)
@j.baseclasses.actor_method
def domain_unregister_mx(self, name, domain, host, priority=10, schema_out=None, user_session=None):
"""
```in
name = (S)
domain = (S)
host = (S)
priority = 10
```
"""
return self._gateway.domain_unregister_mx(name, domain, host, priority)
@j.baseclasses.actor_method
def domain_unregister_srv(
self, name, domain, host, port, priority=10, weight=100, schema_out=None, user_session=None
):
"""
```in
name = (S)
domain = (S)
host = (S)
port = (I)
priority = 10
weight = 100
```
"""
return self._gateway.domain_unregister_srv(name, domain, host, port, priority, weight)
## TCP Router redis backend
@j.baseclasses.actor_method
def tcpservice_ip_register(self, domain, privateip="", schema_out=None, user_session=None):
"""
```in
domain = (S)
privateip = (S)
```
"""
return self._gateway.tcpservice_register(domain, privateip)
@j.baseclasses.actor_method
def tcpservice_dump(self, domain):
return self._gateway.tcpservice_dump(domain)
@j.baseclasses.actor_method
def tcpservice_unregister(self, domain):
return self._gateway.tcpservice_unregister(domain)
@j.baseclasses.actor_method
def tcpservice_client_register(self, domain, client_secret="", schema_out=None, user_session=None):
"""
```in
domain = (S)
client_secret = (S)
```
"""
return self._gateway.tcpservice_register(domain=domain, client_secret=client_secret)
@j.baseclasses.actor_method
def domain_tcpservice_ip_expose(self, threebot_name, privateip, signature, schema_out=None, user_session=None):
"""
Registers a domain in coredns (needs to be authoritative)
```in
threebot_name = (S) # 3bot threebot_name
privateip = (S)
signature = (S) #the signature of the payload "{threebot_name}"
```
"""
result = self.explorer.actors.phonebook.validate_signature(
name=threebot_name, payload=threebot_name, signature=signature
)
if not result.is_valid:
raise j.exceptions.Value("Invalid signature")
fqdn = f"{threebot_name}.{THREEBOT_DOMAIN}"
self._gateway.tcpservice_register(fqdn, privateip)
self._gateway.domain_register_cname("@", f"{threebot_name}.{THREEBOT_DOMAIN}", f"{THREEBOT_DOMAIN}.")
self._gateway.domain_register_a(threebot_name, f"{THREEBOT_DOMAIN}", privateip)
return True
@j.baseclasses.actor_method
def domain_tcpservice_client_expose(
self, threebot_name, client_secret, signature, schema_out=None, user_session=None
):
"""
Registers a domain in coredns (needs to be authoritative)
```in
threebot_name = (S) # 3bot threebot_name
client_secret = (S)
signature = (S) #the signature of the payload "{threebot_name}"
```
"""
result = self.explorer.actors.phonebook.validate_signature(
name=threebot_name, payload=threebot_name, signature=signature
)
if not result.is_valid:
raise j.exceptions.Value("Invalid signature")
fqdn = f"{threebot_name}.{THREEBOT_DOMAIN}"
self._gateway.tcpservice_register(fqdn, client_secret=client_secret)
ips = j.tools.dnstools.default.namerecords_get(THREEBOT_DOMAIN)
self._gateway.domain_register_a(threebot_name, f"{THREEBOT_DOMAIN}", ips[0])
return True
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 25 18:08:52 2021
@author: osman
"""
import streamlit as st
from Tools import *
st.title('Entreprise Jumeaux')
st.write("Vous voulez savoir quels sont les entreprises que ressemble à la vôtre ?")
st.write("**Alors remplissez le formulaire ci-dessous**")
st.header("**Formulaire**")
# Recuperation du job
activite = st.text_input('Secteur d\'Activité :','Ex: Patisserie')
# Recuperation de la ville
region = st.text_input('Région :','Ex: île-de-france')
# Capital
capital = st.number_input('Capital',value=1000.0)
# trouver les entreprises jumeaux
st.header('**Trouver les entreprise Jumeaux**')
if st.button('Chercher'):
liste_result = search(activite, region, capital)
st.write(liste_result)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__version__ = '1.0.1'
get_traceable_object_brand_list_dropdown_query = """
SELECT tobb.id AS id,
tobb.name AS name
FROM public.traceable_object_brand AS tobb
WHERE tobb.deleted is FALSE
AND (
$1::VARCHAR is NULL OR
tobb.name ILIKE $1::VARCHAR || '%' OR
tobb.name ILIKE '%' || $1::VARCHAR || '%' OR
tobb.name ILIKE $1::VARCHAR || '%')
"""
|
from fastapi import FastAPI
from db import Virus, MeaslesEp, MumpsEp, RubellaEp, row2dict, result2dict
from sqlalchemy import and_, or_, func
from typing import List
from starlette.middleware.cors import CORSMiddleware
from collections import defaultdict
from Bio.Align.Applications import MuscleCommandline, ClustalOmegaCommandline
import uuid
import os
import subprocess
app = FastAPI()
#cors
app.add_middleware(CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"])
def runCommand(command):
return subprocess.run(command, stdout=subprocess.PIPE).stdout.decode('utf-8')
def getUniqeValues(field_name, filter_):
col = getattr(Virus, field_name)
result = Virus.query.with_entities(col).filter(filter_).distinct().all()
buff = [i[0] for i in result]
buff = list(sorted(buff, key=lambda x: (x is None or x == "N/A", x)))
return buff
def getUniqeValues2(field_name1, field_name2, filter_):
col1 = getattr(Virus, field_name1)
col2 = getattr(Virus, field_name2)
result = Virus.query.with_entities(col1,col2).filter(filter_).distinct().all()
return result
@app.get("/")
def read_root():
return row2dict(Virus.query.first())
@app.post("/algo/msa/{msa_type}")
def algo_msa(msa_type: str, seq_id: List[int], consensus: bool = None):
if len(seq_id) > 10:
return "Cannot process more than 10 sequences for MSA. Operation aborted."
result = Virus.query.with_entities("id", "fasta").filter(Virus.id.in_(seq_id))
result_dict = {}
for r in result:
result_dict[r[0]] = r[1]
fasta_file = "tmp/%s" % str(uuid.uuid4())
with open(fasta_file, "w") as fasta:
# Ensure ordering of sequences based on input
for i in seq_id:
fasta.write(result_dict[i]+ "\n\n")
msa_command = None
if msa_type == "muscle":
msa_command = MuscleCommandline("muscle", input=fasta_file, html=True, quiet=True)
ret = msa_command()
elif msa_type == "clustalo":
msa_command = ClustalOmegaCommandline(infile=fasta_file)
ret = msa_command()
else: # if msa_type == "mview":
clustal_file = "tmp/%s" % str(uuid.uuid4())
msa_command = ClustalOmegaCommandline(infile=fasta_file, outfile=clustal_file)
msa_command()
con = "on" if consensus else "off"
ret = runCommand(["mview", "--css", "on", "--pcid", "aligned", "--ruler", "on", "--width", "80",
"-coloring", "mismatch", "-colormap", "pink", "-consensus", con, "-con_threshold", "100",
"-html", "head", "-in", "fasta", clustal_file])
os.remove(clustal_file)
os.remove(fasta_file)
return ret
@app.post("/map/by_criteria/{virus_specimen}")
def read_map_criteria(virus_specimen: str, gene_symbol: str = None, host: str = None,
country: str = None, collection_date: int = None):
ftr = Virus.virus_specimen == virus_specimen
if gene_symbol != None:
ftr = and_(ftr, Virus.gene_symbol == gene_symbol)
if host != None:
ftr = and_(ftr, Virus.host == host)
if country != None:
ftr = and_(ftr, Virus.country == country)
if collection_date != None:
ftr = and_(ftr, Virus.collection_date == collection_date)
result = Virus.query.with_entities("country").filter(ftr).all()
ret = defaultdict(int)
for i in result:
ret[i[0]] += 1
return [[k,v] for k,v in ret.items()]
@app.get("/viruses/search/by_accession/{accession_number}")
def read_virus(accession_number: str):
result = Virus.query.filter(or_(Virus.genbank_genome_accession == accession_number,
Virus.genbank_protein_accession == accession_number)).all()
return result2dict(result)
@app.get("/epitopes/{specimen}")
def read_epitopes(specimen: str):
EpTable = None
if specimen == "mumps":
EpTable = MumpsEp
elif specimen == "rubella":
EpTable = RubellaEp
else:
EpTable = MeaslesEp
result = EpTable.query.all()
return result2dict(result)
@app.post("/viruses/search/by_criteria/{sequence_type}/{virus_specimen}")
def read_virus_by_criteria(virus_specimen: str, sequence_type: str, gene_symbol: List[str] = None, host: List[str] = None,
country: List[str] = None, collection_date: List[str] = None):
in_gene_symbol = True if gene_symbol is None else Virus.gene_symbol.in_(gene_symbol)
in_host = True if host is None else Virus.host.in_(host)
in_country = True if country is None else Virus.country.in_(country)
in_collection_date = True if collection_date is None else Virus.collection_date.in_(collection_date)
and_filter = and_(Virus.sequence_type == sequence_type, Virus.virus_specimen == virus_specimen,
in_gene_symbol, in_host, in_country, in_collection_date)
result = Virus.query.filter(and_filter)
ret = result2dict(result.all())
return ret
@app.post("/viruses/search_criteria/result_count/{sequence_type}/{virus_specimen}")
def read_search_criteria_count(virus_specimen: str, sequence_type: str, gene_symbol: List[str] = None, host: List[str] = None,
country: List[str] = None, collection_date: List[str] = None):
in_gene_symbol = True if gene_symbol is None else Virus.gene_symbol.in_(gene_symbol)
in_host = True if host is None else Virus.host.in_(host)
in_country = True if country is None else Virus.country.in_(country)
in_collection_date = True if collection_date is None else Virus.collection_date.in_(collection_date)
and_filter = and_(Virus.sequence_type == sequence_type, Virus.virus_specimen == virus_specimen, in_gene_symbol, in_host, in_country, in_collection_date)
result = Virus.query.filter(and_filter)
ret = result.count()
return ret
@app.post("/viruses/search_criteria/{sequence_type}/{virus_specimen}")
def read_search_criteria_ex(virus_specimen: str, sequence_type: str, gene_symbol: List[str] = None, host: List[str] = None,
country: List[str] = None, collection_date: List[str] = None):
in_gene_symbol = True if gene_symbol is None else Virus.gene_symbol.in_(gene_symbol)
in_host = True if host is None else Virus.host.in_(host)
in_country = True if country is None else Virus.country.in_(country)
in_collection_date = True if collection_date is None else Virus.collection_date.in_(collection_date)
and_filter = and_(Virus.sequence_type == sequence_type, Virus.virus_specimen == virus_specimen,
in_gene_symbol, in_host, in_country, in_collection_date)
ret = {}
ret["genes"] = getUniqeValues2("gene_symbol", "gene_product_name", and_filter)
ret["gene_symbol"] = getUniqeValues("gene_symbol", and_filter)
ret["gene_product_name"] = getUniqeValues("gene_product_name", and_filter)
ret["host"] = getUniqeValues("host", and_filter)
ret["country"] = getUniqeValues("country", and_filter)
ret["collection_date"] = getUniqeValues("collection_date", and_filter)
return ret
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-04-02 13:41
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('user', '0005_auto_20160402_1936'),
]
operations = [
migrations.RemoveField(
model_name='author',
name='user',
),
]
|
#!/usr/bin/env python3
from time import sleep
from _thread import start_new_thread
from os.path import exists
from sys import exit
from time import time
limit = 1.001
stop = 0.9995
value = 10
p = 1
class Instrument:
name = ""
log_file = None
def __init__(self, name, fx):
self.name = name
self.fx = fx
self.is_opened = False
self.init_log_file()
start_new_thread(self.init_clandles, ())
def init_clandles(self):
self.fx.subscribe_market_data(self.name, ())
while True:
try:
d = self.fx.get_last_price(self.name)
if not (self.is_opened):
self.fx.open_trade(symbol=self.name, is_buy=True, amount=str(value*p), time_in_force='GTC', order_type='AtMarket', is_in_pips=False, limit=(d[0]*limit), stop=(d[0]*stop))
print("Opened trade - " + self.name + ":\topen = " + str(round(d[0], 6)) +"\tamount = " + str(round(value*p, 6)) + "\tlimit = " + str(round(d[0]*limit, 6)) + "\tstop = " + str(round(d[0]*stop, 6)))
currencies = self.fx.get_open_positions()['currency']
self.is_opened = False
for currency in currencies:
if (self.name == currency):
self.is_opened = True
#print("Thread-" + self.name + " | ", d[0], d[1], d[2], d[3])
self.log_file.write(str(int(time() * 1000)) + ',' + str(d[0]) + ',' + str(d[1]) + ',' + str(d[2]) + ',' + str(d[3]) + "\n")
self.log_file.flush()
sleep(1)
except:
self.fx.close()
exit(1)
def init_log_file(self):
if not exists(self.get_log_path()):
d = open(self.get_log_path(), "w+")
d.write("Timestamp,Bid,Ask,High,Low\n")
d.close()
self.log_file = open(self.get_log_path(), 'a')
def get_log_path(self):
return "./logs/" + self.name.replace('/', "-") + ".csv"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.