text stringlengths 8 6.05M |
|---|
from django.conf.urls import url
from . import views
app_name = 'documents'
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^(?P<class_id>[0-9A-Za-z]+)$', views.list, name='list'),
url(r'^(?P<class_id>[0-9A-Za-z]+)/upload/$', views.upload, name='upload'),
url(r'^(?P<class_id>[0-9A-Za-z]+)/(?P<document_id>[0-9]+)/$', views.detail, name='detail'),
url(r'^(?P<class_id>[0-9A-Za-z]+)/(?P<document_id>[0-9]+)/download/$', views.download, name='download'),
url(r'^(?P<class_id>[0-9A-Za-z]+)/(?P<document_id>[0-9]+)/delete/$', views.delete, name='delete'),
url(r'^(?P<class_id>[0-9A-Za-z]+)/(?P<document_id>[0-9]+)/update/$', views.update, name='update'),
] |
import numpy as np
import math
import pandas as pd
from sklearn.cross_validation import train_test_split
from sklearn.datasets import load_iris
# 高斯模型,伯努利模型,多项式模型
from sklearn.naive_bayes import GaussianNB, BernoulliNB, MultinomialNB
def create_data():
iris = load_iris()
df = pd.DataFrame(iris.data, columns=iris.feature_names)
df['label'] = iris.target
df.columns = ['sepal length', 'sepal width', 'petal length', 'petal width', 'label']
data = np.array(df.iloc[:100, :])
return data[:, :-1], data[:, -1]
class NaiveBayesModel:
"""
通过计算各个feature的mead, std,并将这个feature认为是一個正太分布,得到正太分布的概率密度函数
"""
def __init__(self):
self.model = dict()
@staticmethod
def compute(xs):
summaries = [(np.mean(x_col), np.std(x_col)) for x_col in zip(*xs)]
return summaries
@staticmethod
def gaussian_probability(x, mean, std):
exponent = math.exp(-math.pow((x-mean), 2) / (2 * math.pow(std, 2)))
return math.pow(math.sqrt(2 * math.pi) * std, -1) * exponent
def fit(self, xs, ys):
labels = list(set(ys))
label_x_dict = {label: [] for label in labels}
for x, y in zip(xs, ys):
label_x_dict[y].append(x)
# 在这种假设下,相当于计算除了 P(xj / y = ck) = f(xj / y=ck)这是个条件概率密度函数
# 每一个y,这里都有对应的密度函数,给一个x, 那么就能计算出概率
self.model = {y: self.compute(xs) for y, xs in label_x_dict.items()}
def predict(self, xs):
label_prob_dict = {}
for label, values in self.model.items():
probability = 1
for i in range(len(xs)):
probability *= self.gaussian_probability(xs[i], values[i][0], values[i][1])
label_prob_dict[label] = probability
return sorted(label_prob_dict.items(), key=lambda item: item[1])[-1][0]
def score(self, xs, ys):
right_count = 0
for x, y in zip(xs, ys):
label = self.predict(x)
if label == y:
right_count += 1
return float(right_count) / len(ys)
X, Y = create_data()
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.3)
print('一个数据: ', X_test[0], y_test[0])
my_model = True
if my_model:
model = NaiveBayesModel()
model.fit(X_train, y_train)
print('score: ', model.score(X_test, y_test))
print('predict: ', model.predict([4.4, 3.2, 1.3, 0.2]))
else:
# model = GaussianNB()
model = BernoulliNB()
model.fit(X_train, y_train)
print('score: ', model.score(X_test, y_test))
test_data = np.array([4.4, 3.2, 1.3, 0.2]).reshape(1, -1)
print('test_data: ', test_data)
print('predict: ', model.predict(test_data)) |
# -*- coding: utf-8 -*-
#test.py
import hyperlink_fetch
import re
import text_manip
from bs4 import BeautifulSoup
hyperlink_fetch.wiki_get_all(root_link="https://en.wikipedia.org/wiki/Fury_and_Hecla_Strait", max_depth=1, input_root_folderpath="F:\Workspaces\Python\Wikipedia_closure\example", force_redo=True) # this was the only one I found which had a manageable number of links
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 6 23:16:31 2019
Topic : anagram method2
"""
def anagramCheck(string1, string2):
if len(string1) != len(string2):
label = False
else:
label = True
list1 = list(string1)
list2 = list(string2)
list1.sort()
list2.sort()
pos = 0
while label and pos < len(string1):
if list1[pos] == list2[pos]:
pos = pos + 1
else:
label = False
return label
print(anagramCheck('adf','fad'))
|
import re
def binary(data):
return re.match(r'[01]', data)
def binary_even(data):
if re.match(r'.+0$', data):
if binary(data):
return True
def hex(data):
return re.match(r'[0-9A-F][^G-Z][^a-z]', data)
def word(data):
if re.match(r'.*[^0-9].*', data):
if re.match(r'[^!+]', data):
if not re.search(r'[*]', data):
return True
def words(data, **ct):
if data == '':
return False
check_data = word(data)
how_split = re.compile(r' [^ !+]').split(data)
count = len(how_split)
if check_data:
return True, count
else:
return False, count
def phone_number(data):
if re.search(r'.*\d{3}.*\d{3}.*\d{4}', data):
return True
def money(data):
if re.match(r'^\${1}(\d*(\d\.?|\.\d{0,2}))$', data):
return True
# if re.match(r'.*[,\d{3}]', data):
# num = data.split('.')
# num_commas = num[0].split(',')
# i = len(num_commas)
# for all in range(1, i):
# if len(num_commas[all]) != 3:
# return False
def zipcode(data):
if len(data) > 5:
x = list(data)
if x[5] != '-':
return False
elif len(data.split('-')[1]) != 4:
return False
if re.search(r'(\d{5}){1}(-\d{4})?', data):
return True
def date(data):
if re.match(r'\d{4}', data[:4]):
dat = data[5:] + '-' + data[:4]
data = dat
if re.match(r'\d{1,2}.\d{1,2}.\d{4}', data):
return True
|
#!/usr/bin/env python
# coding=utf-8
from InceptionV3 import *
import torch
import torch.nn as nn
import torch.nn.functional as F
class HydraPlusNet(nn.Module):
def __init__(self,num_class,is_fusion=True):
super(HydraPlusNet,self).__init__()
self.is_fusion = is_fusion
self.MNet = Inception3(num_class,is_fusion,False) #MNet
if is_fusion:
self.AF1 = AF1()
self.AF2 = AF2()
self.AF3 = AF3()
self.fc_ = nn.Linear(2048*73,num_class)
def forward(self,x):
if self.is_fusion:
x,y,z = self.MNet(x)
F1 = self.AF1([x,y,z])
F2 = self.AF2([x,y,z])
F3 = self.AF3([x,y,z])
ret = torch.cat((z,F1,F2,F3),dim=1)
ret = F.avg_pool2d(ret,kernel_size=8)
ret = F.dropout(ret,training = self.training)
ret = ret.view(ret.size(0),-1)
ret = self.fc_(ret)
else:
ret = self.MNet(x)
return ret
class AF1(nn.Module):
def __init__(self):
super(AF1,self).__init__()
self.att = BasicConv2d(288,8,kernel_size =1)
self.incep2 = nn.Sequential(InceptionB(288),InceptionC(768,channels_7x7 = 128),InceptionC(768,channels_7x7=160),InceptionC(768,channels_7x7=160),InceptionC(768,channels_7x7=192))
self.incep3 = nn.Sequential(InceptionD(768),InceptionE(1280),InceptionE(2048))
self.incep3_2 = nn.Sequential(InceptionD(768),InceptionE(1280),InceptionE(2048))
def forward(self,input):
x,y,z = input
attentive = self.att(x)
ret = 0
for i in range(8):
temp = attentive[:,i].clone()
temp = temp.view(-1,1,35,35).expand(-1,288,35,35)
R1 = x *temp
R1 = self.incep2(R1)
R1 = self.incep3(R1)
if i == 0:
ret = R1
else:
ret = torch.cat((ret,R1),dim=1)
attentive2 = F.avg_pool2d(attentive,kernel_size=2,stride=2)
for i in range(8):
temp = attentive2[:,i].clone()
temp = temp.view(-1,1,17,17).expand(-1,768,17,17)
R2 = y*temp
R2 = self.incep3_2(R2)
ret = torch.cat((ret,R2),dim=1)
attentive3 = F.avg_pool2d(attentive,kernel_size=4,stride=4)
for i in range(8):
temp = attentive3[:,i].clone()
temp = temp.view(-1,1,8,8).expand(-1,2048,8,8)
R3 = z*temp
ret = torch.cat((ret,R3),dim=1)
return ret
class AF2(nn.Module):
def __init__(self):
super(AF2,self).__init__()
self.att = BasicConv2d(768,8,kernel_size=1)
self.incep2 = nn.Sequential(InceptionB(288),InceptionC(768,channels_7x7=128),InceptionC(768,channels_7x7=160),InceptionC(768,channels_7x7=160),InceptionC(768,channels_7x7=192))
self.incep3 = nn.Sequential(InceptionD(768),InceptionE(1280),InceptionE(2048))
self.incep3_2 = nn.Sequential(InceptionD(768),InceptionE(1280),InceptionE(2048))
self.patch = nn.ReflectionPad2d((0,1,0,1))
def forward(self,input):
x,y,z = input
attentive = self.att(y)
attentive1 = self.patch(F.upsample(attentive,scale_factor=2))
for i in range(8):
temp = attentive1[:,i].clone()
temp = temp.view(-1,1,35,35).expand(-1,288,35,35)
R1 = x *temp
R1 = self.incep2(R1)
R1 = self.incep3(R1)
if i == 0:
ret =R1
else:
ret = torch.cat((ret,R1),dim=1)
attentive2 = attentive
for i in range(8):
temp = attentive2[:,i].clone()
temp = temp.view(-1,1,17,17).expand(-1,768,17,17)
R2 = y*temp
R2 = self.incep3_2(R2)
ret = torch.cat((ret,R2),dim=1)
attentive3 = F.avg_pool2d(attentive,kernel_size=2,stride=2)
for i in range(8):
temp = attentive3[:,i].clone()
temp = temp.view(-1,1,8,8).expand(-1,2048,8,8)
R3 = z *temp
ret = torch.cat((ret,R3),dim=1)
return ret
class AF3(nn.Module):
def __init__(self):
super(AF3,self).__init__()
self.att = BasicConv2d(2048,8,kernel_size=1)
self.incep2 = nn.Sequential(InceptionB(288),InceptionC(768,channels_7x7=128),InceptionC(768,channels_7x7=160),InceptionC(768,channels_7x7=160),InceptionC(768,channels_7x7=192))
self.incep3 = nn.Sequential(InceptionD(768),InceptionE(1280),InceptionE(2048))
self.incep3_2 = nn.Sequential(InceptionD(768),InceptionE(1280),InceptionE(2048))
self.patch = nn.ReflectionPad2d((0,1,0,1))
self.patch2 = nn.ReflectionPad2d((0,1,0,1))
def forward(self,input):
x,y,z = input
attentive = self.att(z)
attentive2 = self.patch(F.upsample(attentive,scale_factor=2))
attentive1 = self.patch(F.upsample(attentive2,scale_factor=2))
for i in range(8):
temp = attentive1[:,i].contiguous().view(-1,1,35,35).expand(-1,288,35,35)
R1 = x *temp
R1 = self.incep2(R1)
R1 = self.incep3(R1)
if i == 0:
ret = R1
else:
ret = torch.cat((ret,R1),dim=1)
for i in range(8):
temp = attentive2[:,i].contiguous().view(-1,1,17,17).expand(-1,768,17,17)
R2 = y*temp
R2 = self.incep3_2(R2)
ret = torch.cat((ret,R2),dim=1)
attentive3 = attentive
for i in range(8):
temp = attentive3[:,i].contiguous().view(-1,1,8,8).expand(-1,2048,8,8)
R3 = z*temp
ret = torch.cat((ret,R3),dim=1)
return ret
|
# -*- coding: utf-8 -*-
import scrapy
import csv
import os
class RadioguideSpider(scrapy.Spider):
name = 'radioguide'
allowed_domains = ['radioguide.fm']
start_urls = ['https://www.radioguide.fm/countries']
def parse(self, response):
datas = response.xpath('.//*[@class="col-md-4 col-xs-6 col-sm-6"]/a').extract()
for data in datas:
sel = scrapy.Selector(text=data)
link = sel.xpath('.//a/@href').extract_first()
country = sel.xpath('.//a/p/text()').extract_first()
print(country)
yield scrapy.Request(response.urljoin(link),callback=self.getstations,meta={
'country':country
})
def getstations(self,response):
links = response.xpath('.//*[@class="clearfix countries"]/li/div/a/@href').extract()
for link in links:
yield scrapy.Request(response.urljoin(link),callback=self.getdata,meta={
'country':response.meta.get('country')
})
def getdata(self,response):
title = response.xpath('.//*[@itemprop="name"]/text()').extract_first()
description = response.xpath('.//*[@itemprop="description"]/text()').extract_first()
if "radioguide.csv" not in os.listdir(os.getcwd()):
with open("radioguide.csv","a") as f:
writer = csv.writer(f)
writer.writerow(['country','title','description'])
with open("radioguide.csv","a") as f:
writer = csv.writer(f)
writer.writerow([response.meta.get('country'),title,description])
print([response.meta.get('country'),title,description])
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('frontend', '0006_auto_20150309_1937'),
]
operations = [
migrations.AddField(
model_name='article',
name='preview_en',
field=models.TextField(default=b'', null=True, verbose_name='preview ru', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='article',
name='preview_ru',
field=models.TextField(default=b'', null=True, verbose_name='preview ru', blank=True),
preserve_default=True,
),
]
|
import re
import logging
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import plot_tree
def calibration_plot(
scores,
target,
n_bins=20
):
binned_score = pd.qcut(scores, n_bins)
bin_centres = sorted(pd.Series(scores).groupby(binned_score).mean().values)
targets = target.groupby(binned_score).mean()
score_qs = target.groupby(binned_score)\
.agg(['mean', 'sem'])\
.unstack()
_, axis = plt.subplots(figsize=(10,6))
# Plotting the line
axis.plot(
bin_centres,
score_qs['mean'],
color='blue'
)
# Plotting the standard error of the mean as different CI shades
axis.fill_between(
bin_centres,
score_qs['mean'] + score_qs['sem'],
score_qs['mean'] - score_qs['sem'],
facecolor='blue',
alpha=2**1.5 / (3 ** 1.5)
)
axis.fill_between(
bin_centres,
score_qs['mean'] + score_qs['sem'],
score_qs['mean'] + 2 * score_qs['sem'],
facecolor='blue',
alpha=1**1.5 / (3 ** 1.5)
)
axis.fill_between(
bin_centres,
score_qs['mean'] - score_qs['sem'],
score_qs['mean'] - 2 * score_qs['sem'],
facecolor='blue',
alpha=1**1.5 / (3 ** 1.5)
)
axis.set_xlabel(scores.name)
axis.set_ylabel(target.name)
axis.plot(
bin_centres, bin_centres, ls='--', color='black'
)
return axis
def plot_labelled_tree(estimator, features, shift=0, title=None, save=False, node_label=False):
_, ax = plt.subplots(figsize=(18, 6))
annotations = plot_tree(
estimator,
ax=ax,
feature_names=features,
proportion=True,
node_ids=True,
fontsize=10
)
leaf_nodes = [i for i, x in enumerate(estimator.tree_.feature) if x == -2]
annotation_dict = {
node_id: 'segment #{}'.format(segment+shift) for segment, node_id in enumerate(leaf_nodes)
}
n_nodes = estimator.tree_.node_count
for i in range(n_nodes):
for j in range(n_nodes-1, -1, -1):
if node_label:
continue
else:
annotations[i].set_text(
re.sub(
r'gini = 0.\d+\n',
'',
annotations[i].get_text().replace('node #{}'.format(j), '{}'.format(
annotation_dict[j] if j in annotation_dict.keys() else ''
))
)
)
if 'segment #' in annotations[i].get_text():
annotations[i].get_bbox_patch().set_facecolor('lavender')
else:
annotations[i].get_bbox_patch().set_facecolor('aliceblue')
if title:
plt.title(title, fontsize=16)
if save:
plt.savefig('_'.join(title.split(' '))+'.png')
plt.show()
print('\n') |
# Alternative to_sql() *method* for DBs that support COPY FROM
import csv
from functools import partial, reduce
from getpass import getpass
from io import StringIO
from operator import add, iadd
from typing import Callable
import psycopg2
from sqlalchemy import MetaData, Table, bindparam, create_engine, func, text
from sqlalchemy.engine.url import URL
from sqlalchemy.exc import IntegrityError
from sqlalchemy.ext import baked
from sqlalchemy.orm import load_only, sessionmaker
from sqlalchemy.sql import select
from utils import compose, pd
def write_rows_to_sql(df: pd.DataFrame,
conn,
schema: str,
table: str = 'gene_trees',
n=10):
"""very slow! writes batches of n from a df to the table, ignoring duplicates"""
wrote = 0
# for _, row in df.iterrows():
for i in range(0, len(df), 2):
try:
# row.to_frame().T
df[i:i+n].to_sql(
table,
conn,
schema=schema,
method='multi',
if_exists='append',
index=False)
wrote += n
except IntegrityError as e:
# print(e)
continue
return wrote
def psql_insert_copy(table, conn, keys, data_iter):
"""
Execute SQL statement inserting data
Parameters
----------
table : pandas.io.sql.SQLTable
conn : sqlalchemy.engine.Engine or sqlalchemy.engine.Connection
keys : list of str
Column names
data_iter : Iterable that iterates the values to be inserted
"""
# gets a DBAPI connection that can provide a cursor
dbapi_conn = conn.connection
with dbapi_conn.cursor() as cur:
s_buf = StringIO()
writer = csv.writer(s_buf)
writer.writerows(data_iter)
s_buf.seek(0)
columns = ', '.join('"{}"'.format(k) for k in keys)
if table.schema:
table_name = '{}.{}'.format(table.schema, table.name)
else:
table_name = table.name
sql = 'COPY {} ({}) FROM STDIN WITH CSV'.format(
table_name, columns)
cur.copy_expert(sql=sql, file=s_buf)
def make_session(args):
url = URL("postgresql", username=args.user, host="localhost",
database=args.db)
engine = create_engine(url,
max_overflow=0,
connect_args={"timeout": 3000,
'pool_timeout': 3000})
conn = engine.connect()
Session = sessionmaker(bind=engine)
session = Session()
if hasattr(args, "schema"):
metadata = MetaData(bind=engine, schema=args.schema)
else:
metadata = MetaData(bind=engine)
metadata.reflect(views=True)
return (session, metadata, conn)
def filter_table_by(table, **kwargs):
"""returns a func that adds filters for all kwargs to a query"""
def f(q):
for k, v in kwargs.items():
q = q.filter(table.c[k] == v)
return q
return f
class Baker(object):
"""wrapper for bakery which can be serialized"""
def __init__(self, initial_func, yieldper=None):
"""initial func must take a session object and return a query"""
self.initial = initial_func
self.yield_per = (
lambda x: x if yieldper is None else lambda q: q.yield_per(
yieldper)
)
self.funcs = []
def add_func(self, func):
"""add a func which takes a query and returns another query"""
self.funcs.append(func)
return self
def __iadd__(self, other):
"""add a func in-place"""
self.add_func(other)
return self
def __add__(self, other):
"""make a copy and add a func"""
new = self.copy()
new.add_func(other)
return new
def copy(self):
new = Baker(self.initial)
new.funcs = self.funcs.copy()
return new
def to_statement(self, session):
"""returns Baked.Result if params, else SQLAlechemy statement.
must call statement"""
# TODO: clean up
bakery = baked.bakery()
return (
reduce(iadd, self.funcs, bakery(self.initial)
).to_query(session).statement
)
def compile(self, session, **params):
"""return result obj if params specified"""
bakery = baked.bakery()
q = reduce(iadd, self.funcs +
[self.yield_per], bakery(self.initial))(session)
if params:
return q.params(**params)
else:
print("q:", type(q))
return q
def __repr__(self):
return str(self.initial) + " " + str(self.funcs)
class SlimBaker(Baker):
"""functional syntax of baked objects, but returns
a sqlalchemy.sql.expression.Selectable object"""
def __init__(self, initial_func, yieldper=None):
"""initial func must take a session object and return a query"""
self.func = initial_func
self.yield_per = (
lambda x: x if yieldper is None else lambda q: q.yield_per(
yieldper)
)
def add_func(self, func):
"""add a func which takes a query and returns another query"""
self.func = compose(func, self.func)
return self
def __iadd__(self, other):
"""add a func in-place"""
self.add_func(other)
return self
def __add__(self, other):
"""make a copy and add a func"""
new = self.copy()
new.add_func(other)
return new
def copy(self):
return Baker(self.func)
def to_statement(self, session, **params):
"""returns Baked.Result if params, else SQLAlechemy statement.
must call statement"""
# TODO: clean up
return self.yield_per(self.func(session)).statement
def compile(self, session, **params):
"""return result obj if params specified"""
q = self.func(session)
if params:
q = q.params(**params)
return self.yield_per(q)
def make_connection(schema):
with open("/N/u/bkrosenz/BigRed3/.ssh/db.pwd") as f:
password = f.read().strip()
session, conn = make_session_kw(
username="bkrosenz_root",
password=password,
database="bkrosenz",
schema=schema,
port=5444,
host="10.79.161.8",
with_metadata=False, # sasrdspp02.uits.iu.edu'
)
return conn
def prepare_bootstrap(conn, schema, table, columns):
s = '''prepare sample(int) as
select {cols} from {table}
order by random()
limit $1;'''.format(
cols=','.join(columns),
table=table
)
conn.execute('set search_path to {};'.format(schema))
conn.execute(s)
def prepare(conn):
"""TABLESAMPLE is not completely random.
Sample > target number, then randomly shuffle and limit.
for testing on rec/nonrec - only pick gene trees inferred from correct substitution model.
Disallows model misspecification (LG->LG and WAG->WAG) except in sample_model_nonrec statement.
"""
# args: length,ngenes
nonrec_prepared = '''
prepare sids_nonrec(int,int) as
select s.* from
(select sid from nonrec_inferred
where seq_length=$1
group by sid having count(1)>=$2) i
natural join
(select ebl,ibl,sid from species_trees) s;
prepare sample_sid_nonrec(int,int,int) as
select i.pdist,
i.tid,
c.top_1,
c.top_2,
c.top_3,
c.nsites,
i.seq_length,
i.randomcolumn
from
(select r.pdist,
r.tid,
r.tree_no,
r.sim_model,
r.seq_length,
r.randomcolumn
from nonrec_inferred r
-- tablesample system (90)
where r.seq_length=$2
and position(r.sim_model in r.infer_model)>0
and r.sid=$1) i
natural left join
(select s.top_1,
s.top_2,
s.top_3,
s.nsites,
s.tree_no,
s.sim_model
from nonrec_scf s
where s.seq_length=$2
and s.sid=$1) c
order by randomcolumn
limit $3;
prepare sample_sid_model_nonrec(int,int,text,text,int) as
select i.pdist,
i.tid,
c.top_1,
c.top_2,
c.top_3,
c.nsites,
i.seq_length,
i.randomcolumn
from
(select r.pdist,
r.tid,
r.tree_no,
r.sim_model,
r.seq_length,
r.randomcolumn
from nonrec_inferred r
-- tablesample system (80)
where r.seq_length=$2
and r.sim_model=$3
and position($4 in r.infer_model)>0
and r.sid=$1) i
natural left join
(select s.top_1,
s.top_2,
s.top_3,
s.nsites,
s.tree_no
from nonrec_scf s
where s.seq_length=$2
and s.sim_model=$3
and s.sid=$1) c
order by randomcolumn
limit $5;'''
rec_prepared = '''prepare sids_rec(int,int) as
select s.* from
(select sid from rec_inferred
where seq_length=$1
group by sid having count(1)>=$2) i
natural join
(select ebl,ibl,sid from species_trees) s;
prepare sample_sid_blocks_rec(int,int,int,int) as
select i.pdist,
i.tid,
c.top_1,
c.top_2,
c.top_3,
c.nsites,
i.seq_length,
i.randomcolumn
from
(select r.pdist,
r.tid,
r.tree_no,
r.sim_model,
r.ds_no,
r.seq_length,
r.randomcolumn
from rec_inferred r
-- tablesample system (70)
where r.seq_length=$2
and position(r.sim_model in r.infer_model)>0
and r.sid=$1
and array_length(r.tree_no,1)=$3) i
natural left join
(select s.top_1,
s.top_2,
s.top_3,
s.nsites,
s.tree_no,
s.sim_model,
s.ds_no
from rec_scf s
where s.seq_length=$2
and s.sid=$1) c
order by randomcolumn
limit $4;
prepare sample_sid_rec(int,int,int) as
select i.pdist,
i.tid,
c.top_1,
c.top_2,
c.top_3,
c.nsites,
i.seq_length,
i.randomcolumn
from
(select r.pdist,
r.tid,
r.tree_no,
r.sim_model,
r.ds_no,
r.seq_length,
r.randomcolumn
from rec_inferred r
-- tablesample system (70)
where r.seq_length=$2
and position(r.sim_model in r.infer_model)>0
and r.sid=$1) i
natural left join
(select s.top_1,
s.top_2,
s.top_3,
s.nsites,
s.tree_no,
s.sim_model,
s.ds_no
from rec_scf s
where s.seq_length=$2
and s.sid=$1) c
order by randomcolumn
limit $3;'''
conn.execute('set search_path to sim4;')
conn.execute(nonrec_prepared)
conn.execute(rec_prepared)
def prepare_heterotachy(conn):
'''Disallows model misspecification. Only one seq length condition.'''
# args: length,ngenes
prepared = '''prepare sids(int) as
select s.* from
(select sid from gene_trees_heterotachy250
group by sid having count(1)>=$2) i
natural join
(select ebl,ibl,sid from species_trees) s;
prepare sample_sid_model(int,text,text,int) as
select i.pdist,
i.tid,
c.top_1,
c.top_2,
c.top_3,
c.nsites,
i.seq_length
from
(select r.pdist,
r.tid,
r.tree_no,
r.seq_length,
r.sim_model,
r.randomcolumn
from heterotachy_inferred r
--tablesample system (90)
where r.sim_model=$2
and position($3 in r.infer_model)>0
and r.sid=$1
order by r.randomcolumn) i
natural left join
(select s.top_1,
s.top_2,
s.top_3,
s.nsites,
s.tree_no
from heterotachy_scf s
where s.sid=$1
and s.sim_model=$2) c
limit $4;
prepare sample_sid(int,int,int) as
select i.pdist,
i.tid,
c.top_1,
c.top_2,
c.top_3,
c.nsites,
i.seq_length
from
(select r.pdist,
r.tid,
r.tree_no,
r.seq_length,
r.sim_model,
r.randomcolumn
from heterotachy_inferred r
--tablesample system (90)
where r.sid=$1
and position(r.sim_model in r.infer_model)>0
order by r.randomcolumn) i
natural left join
(select s.top_1,
s.top_2,
s.top_3,
s.nsites,
s.tree_no,
s.sim_model
from heterotachy_scf s
where s.sid=$1) c
limit $3;'''
conn.execute('set search_path to sim4;')
conn.execute(prepared)
def prepare_one_rate(conn):
'''Disallows model misspecification. Only one seq length condition.'''
# args: length,ngenes
prepared = '''prepare sids(int) as
select s.* from
(select sid from gene_trees_one_rate
group by sid having count(1)>=$2) i
natural join
(select ebl,ibl,sid from species_trees) s;
prepare sample_sid_model(int,text,text,int) as
select i.pdist,
i.tid,
c.top_1,
c.top_2,
c.top_3,
c.nsites,
i.seq_length
from
(select r.pdist,
r.tid,
r.tree_no,
r.seq_length,
r.sim_model,
r.randomcolumn
from one_rate_inferred r
--tablesample system (90)
where r.sim_model=$2
and position($3 in r.infer_model)>0
and r.sid=$1
order by r.randomcolumn) i
natural left join
(select s.top_1,
s.top_2,
s.top_3,
s.nsites,
s.tree_no
from one_rate_scf s
where s.sid=$1
and s.sim_model=$2) c
limit $4;
prepare sample_sid(int,int,int) as
select i.pdist,
i.tid,
c.top_1,
c.top_2,
c.top_3,
c.nsites,
i.seq_length
from
(select r.pdist,
r.tid,
r.tree_no,
r.seq_length,
r.sim_model,
r.randomcolumn
from one_rate_inferred r
--tablesample system (90)
where r.sid=$1
and position(r.sim_model in r.infer_model)>0
order by r.randomcolumn) i
natural left join
(select s.top_1,
s.top_2,
s.top_3,
s.nsites,
s.tree_no,
s.sim_model
from one_rate_scf s
where s.sid=$1) c
limit $3;'''
conn.execute('set search_path to sim4;')
conn.execute(prepared)
def prepare_all_lengths(conn):
'''Allows model misspecification.'''
# args: length,ngenes
nonrec_prepared = '''
prepare sample_nonrec(numeric) as
select itrees.pdist,
itrees.tid,
scf.top_1,
scf.top_2,
scf.top_3,
scf.nsites,
itrees.seq_length,
itrees.randomcolumn,
strees.ebl,
strees.ibl
from
(select r.sid,
r.pdist,
r.tid,
r.tree_no,
r.sim_model,
r.seq_length,
r.randomcolumn
from nonrec_inferred r
tablesample system ($1)
where position(r.sim_model in r.infer_model)>0
) itrees
natural left join
(select s.sid,
s.top_1,
s.top_2,
s.top_3,
s.seq_length,
s.nsites,
s.tree_no,
s.sim_model
from nonrec_scf s) scf
natural inner join
(select ebl,ibl,sid from species_trees) strees
where ebl<300
-- order by randomcolumn limit $1
;
prepare sids_nonrec(int,int) as
select s.* from
(select sid from nonrec_inferred
group by sid having count(1)>=$2) i
natural join
(select ebl,ibl,sid from species_trees) s;
prepare sample_sid_nonrec(int,int,int) as
select i.pdist,
i.tid,
c.top_1,
c.top_2,
c.top_3,
c.nsites,
i.seq_length,
i.randomcolumn
from
(select r.pdist,
r.tid,
r.tree_no,
r.seq_length,
r.sim_model,
r.randomcolumn
from nonrec_inferred r
--tablesample system (90)
where r.sid=$1) i
natural left join
(select s.top_1,
s.top_2,
s.top_3,
s.nsites,
s.tree_no,
s.sim_model
from nonrec_scf s
where s.sid=$1) c
order by randomcolumn
limit $3;
prepare sample_sid_model_nonrec(int,text,text,int) as
select i.pdist,
i.tid,
c.top_1,
i.seq_length,
c.top_2,
c.top_3,
c.nsites,
i.randomcolumn
from
(select r.pdist,
r.tid,
r.seq_length,
r.tree_no,
r.sim_model,
r.randomcolumn
from nonrec_inferred r
--tablesample system (90)
where r.sim_model=$2
and position($3 in r.infer_model)>0
and r.sid=$1) i
natural left join
(select s.top_1,
s.top_2,
s.top_3,
s.nsites,
s.tree_no
from nonrec_scf s
where s.sid=$1
and s.sim_model=$2) c
order by randomcolumn
limit $4;'''
rec_prepared = '''prepare sids_rec(int,int) as
select s.* from
(select sid from rec_inferred
group by sid having count(1)>=$2) i
natural join
(select ebl,ibl,sid from species_trees) s;
prepare sample_sid_blocks_rec(int,int,int,int) as
select i.pdist,
i.tid,
c.top_1,
c.top_2,
c.top_3,
c.nsites,
i.seq_length,
i.randomcolumn
from
(select r.pdist,
r.tid,
r.tree_no,
r.seq_length,
r.sim_model,
r.ds_no,
r.randomcolumn
from rec_inferred r
-- tablesample system (90)
where r.sid=$1
and array_length(r.tree_no,1)=$3) i
natural left join
(select s.top_1,
s.top_2,
s.top_3,
s.nsites,
s.tree_no,
s.sim_model,
s.ds_no
from rec_scf s
where s.sid=$1) c
order by randomcolumn
limit $4;
prepare sample_sid_rec(int,int,int) as
select i.pdist,
i.tid,
c.top_1,
c.top_2,
c.top_3,
i.seq_length,
c.nsites,
i.randomcolumn
from
(select r.pdist,
r.tid,
r.tree_no,
r.sim_model,
r.ds_no,
r.seq_length,
r.randomcolumn
from rec_inferred r
where r.sid=$1) i
natural left join
(select s.top_1,
s.top_2,
s.top_3,
s.nsites,
s.tree_no,
s.sim_model,
s.ds_no
from rec_scf s
where s.sid=$1) c
order by randomcolumn
limit $3;'''
conn.execute('set search_path to sim4;')
conn.execute(nonrec_prepared)
conn.execute(rec_prepared)
def make_session_kw(username: str,
database: str,
port: int,
schema='public',
host: str = "localhost",
password: str = None,
statement_prepare: Callable = prepare,
with_metadata: bool = True):
"""Uses the IU-hosted DB. prepares query statements"""
try:
if password is None:
password = getpass()
url = URL("postgresql",
username=username,
host=host,
password=password,
port=port,
database=database)
engine = create_engine(
url,
connect_args={'connect_timeout': 5000},
max_overflow=5)
conn = engine.connect()
Session = sessionmaker(bind=engine)
session = Session()
statement_prepare(conn)
except Exception as e:
print(url)
raise e
if with_metadata:
metadata = MetaData(bind=engine, schema=schema)
metadata.reflect(views=True)
return (session, metadata, conn)
else:
return (session, conn)
|
#use the print() method to display somthing in the terminal
print("Hello World")
#Like java you can concatenate strings in this method
print("Hello" + " Tomi" + " welcome to python")
#If you want to concatenate numbers in a print() expression wrap the number in str()
#str is used to convert an object into a string
print("2+3 = " + str(2+3))
#Print can take in a anything regardless of the data type
#Print() is a method meaning it can take in it's own arguments each seprated with a comma what it does is that it calls str() on every argument treating it as a string and also adds a space between each atrgument
print("Tomi did you know that", "2 + 3 =", 5)
#You can pass in sep as an argument which stands for seperator this allows you determine how you want to join items in a print method or assign spaces
#sep comes between elements not around them so be able to account for this
print("hello" , "world" , sep="\n")
print("home","user","documents", sep="/")
print('node', 'child', 'child', sep=' -> ')
#We can use the end arguement in order to prevent linebreaks this argument dictates what we end the line with in this example you can see that after the first print happens ok comes on the same line showing that we ended the line with an empty space
print("Checking file progres....", end="")
print("ok")
#Sep and End can be used together
print('Mercury', 'Venus', 'Earth', sep=', ', end=', ')
print('Mars', 'Jupiter', 'Saturn', sep=', ', end=', ')
print('Uranus', 'Neptune', 'Pluto', sep=', ') |
from django.db import models
# Create your models here.
class User(models.Model):
name = models.CharField(max_length=20)
birthday = models.CharField(max_length=20)
phone_number = models.CharField(max_length=15)
address = models.CharField(max_length=50, blank=True)
mail = models.CharField(max_length = 50, blank = True) |
import numpy as np
class LinearRegression:
def __init__(self, x, y, learning_rate=0.001, num=1000):
arr = np.ones(x.shape)
x = np.append(arr, x, axis=1)
self.x = x
self.y = y
self.row = x.shape[0]
self.col = x.shape[1]
self.learning_rate = learning_rate
self.num_iterations = num
self.params = np.random.randn(x.shape[1])
# self.params = np.random.randn(x.shape[1])
self.gradient_descent_main()
# y = m * x + c
def sum_of_squared_errors(self, y_test, y_pred):
error = np.sum((y_test - y_pred) ** 2)
return error / (2 * self.row)
def gradient_descent_main(self):
for i in range(self.num_iterations):
predicted = np.matmul(self.x, self.params)
error = predicted - self.y
self.params -= ((self.learning_rate / self.row) * (self.x.T.dot(error)))
def predict(self, x_test, y_test):
arr = np.ones(x_test.shape)
x_test = np.append(arr, x_test, axis=1)
self.y_pred = np.matmul(x_test, self.params)
error = self.sum_of_squared_errors(y_test, self.y_pred)
return self.y_pred, error
def getParams(self):
return self.params
|
import ee
# ======================
# Functions to calculate NDVI, NBR, SAVI bands and add band to original image
# for harmonized and merged landsat collection (all_LS) where band names have been changed
# NDVI = (NIR - RED) / (NIR + RED)
# NBR = (NIR - SWIR)/(NIR + SWIR) (Lopez, 1991; Key and Benson, 1995)
# SAVI = ((NIR - RED)/ (NIR + RED + 0.5)) * 1.5
# new indices
# B5 (landsat 5/7); B6 (Landsat 8) (Schroeder et al., 2011) = SWIR2
# Bare Soil Index = ((SWIR1 + RED) - (NIR + BLUE))/((SWIR1 + RED) + (NIR+BLUE)) (Roy et al., 2006?)
# ======================
def getNDVI(image):
NDVI = image.normalizedDifference(['NIR', 'Red']).rename('NDVI')
return image.addBands(NDVI)
def getNBR(image):
NBR = image.normalizedDifference(['NIR', 'SWIR2']).rename('NBR')
return image.addBands(NBR)
def getSAVI(image):
SAVI = image.expression('((NIR - RED)/(NIR + RED + 0.5))* 1.5',
{
'NIR': image.select('NIR').multiply(0.0001),
'RED': image.select('Red').multiply(0.0001)
}).rename('SAVI')
return image.addBands(SAVI)
def getBSI(image):
BSI = image.expression('((SWIR1 + RED) - (NIR + BLUE))/((SWIR1 + RED) + (NIR+BLUE))',
{
'BLUE': image.select('Blue').multiply(0.0001),
'NIR': image.select('NIR').multiply(0.0001),
'RED': image.select('Red').multiply(0.0001),
'SWIR1': image.select('SWIR1').multiply(0.0001)
}).rename('BSI')
return image.addBands(BSI)
# def getB5(image):
# B5 =
|
import argparse
import bencodepy
import os
import tracker
import peer_node
import client_node
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("-tp", "--torrent-path", dest="torrent_path", required=True, help="Path to Torrent File", metavar='\b')
parser.add_argument("-sp", "--save-path", dest="save_path", default="./", help="Destination for Downloaded File", metavar='\b')
parser.add_argument("-w", "--write-method", dest="method", default=1, type=int, help="1 -- Write File as Pieces Arrive\n2 -- Write File After all the Pieces Arrive", metavar='\b')
args = parser.parse_args()
bc = bencodepy.Bencode(encoding=None)
with open(args.torrent_path, 'rb') as f:
torrent = bencodepy.bread(f)
client = client_node.Client(torrent, args)
peer_list = tracker.getPeers(torrent)
print("Peer List:\n", peer_list)
for item in peer_list:
peer = peer_node.Peer(item, torrent, args)
peer.download(client)
if client.is_done():
break
if client.is_done():
if args.method==2:
client.write_to_file()
client.stream.close()
if os.path.exists("received.sav"):
os.remove("received.sav")
if os.path.exists("pieces.sav"):
os.remove("pieces.sav")
|
__author__ = "Narwhale"
def bubble_sort(alist):
"""冒泡排序"""
n = len(alist)
#外循环
for j in range(0,n-1):
#内层循环
count = 0
for i in range(0,n-1-j):
if alist[i] > alist[i+1]:
alist[i],alist[i+1] = alist[i+1],alist[i]
count += 1
if 0 == count:
return
#j=0 i=(0,n-1)
#j=1 i=(0,n-2)
li = [54,26,93,17,77,31,44,55,20]
bubble_sort(li)
print(li)
|
"""
general_plots
--------------------------------------------------------------------------------
GENERATE PLOTS FOR WORKSHOP
David T. Milodowski, 25/03/2019
"""
"""
import libraries needed
"""
import numpy as np
import matplotlib.pyplot as plt # plotting package
import seaborn as sns # another useful plotting package
import pandas as pd
from sklearn.metrics import r2_score, mean_squared_error
"""
Part 2: Random forests
"""
# Figure 1 simple cross plot of three test datasets
def plot_test_data(X1,y1,X2,y2,X3,y3,show=True):
fig,axes = plt.subplots(nrows=1,ncols=3,figsize = (8,3))
axes[0].plot(X1,y1,'.')
axes[1].plot(X2,y2,'.')
axes[2].plot(X3,y3,'.')
axes[0].set_xlim((0,10));axes[0].set_ylim((-5,28))
axes[2].set_xlim((0,10));axes[2].set_ylim((-5,28))
fig.tight_layout()
if show:
fig.show()
return fig,axes
# Figure 2 adding regression results
def plot_test_data_with_regression_results(X1,y1,X2,y2,X3,y3,X_test,y1_test,y1_test_lm,y2_test,y3_test,y3_test_lm,show=True):
fig2,axes = plt.subplots(nrows=1,ncols=3,figsize = (8,3))
axes[0].plot(X1,y1,'.',label='data',color='0.5')
axes[0].plot(X_test,y1_test,'-',color='red',label='naive rf model')
axes[0].plot(X_test,y1_test_lm,'-',color='blue',label='linear regression')
axes[1].plot(X2,y2,'.',color='0.5')
axes[1].plot(X_test,y2_test,'-',color='red')
axes[2].plot(X3,y3,'.',color='0.5')
axes[2].plot(X_test,y3_test,'-',color='red')
axes[2].plot(X_test,y3_test_lm,'-',color='blue')
axes[0].set_xlim((0,10));axes[0].set_ylim((-5,28))
axes[2].set_xlim((0,10));axes[2].set_ylim((-5,28))
axes[0].legend(loc='lower right',fontsize = 8)
fig2.tight_layout()
if show:
fig2.show()
return fig2,axes
# Figure 3, basic cal-val example
def plot_cal_val(y_train,y_train_rf,y_test,y_test_rf,show=True):
calval_df = pd.DataFrame(data = {'val_obs': y_test,
'val_model': y_test_rf,
'cal_obs': y_train,
'cal_model': y_train_rf})
fig3,axes= plt.subplots(nrows=1,ncols=2,figsize=[8,4])
sns.regplot(x='cal_obs',y='cal_model',data=calval_df,marker='+',
truncate=True,ci=None,ax=axes[0])
axes[0].annotate('calibration R$^2$ = %.02f\nRMSE = %.02f' %
(r2_score(y_train,y_train_rf),np.sqrt(mean_squared_error(y_train,y_train_rf))),
xy=(0.05,0.95), xycoords='axes fraction',backgroundcolor='none',
horizontalalignment='left', verticalalignment='top')
sns.regplot(x='val_obs',y='val_model',data=calval_df,marker='+',
truncate=True,ci=None,ax=axes[1])
axes[1].annotate('validation R$^2$ = %.02f\nRMSE = %.02f'
% (r2_score(y_test,y_test_rf),np.sqrt(mean_squared_error(y_test,y_test_rf))),
xy=(0.05,0.95), xycoords='axes fraction',backgroundcolor='none',
horizontalalignment='left', verticalalignment='top')
axes[0].axis('equal')
axes[1].axis('equal')
fig3.tight_layout()
if show:
fig3.show()
return fig3,axes
# figure 4,cal_val with regression line
def plot_cal_val_agb(y_train,y_train_rf,y_test,y_test_rf,show=True):
cal_df = pd.DataFrame(data = {'cal_obs': y_train,
'cal_model': y_train_rf})
val_df = pd.DataFrame(data = {'val_obs': y_test,
'val_model': y_test_rf})
fig4,axes= plt.subplots(nrows=1,ncols=2,figsize=[8,4])
sns.regplot(x='cal_obs',y='cal_model',data=cal_df,marker='.',
truncate=True,ci=None,ax=axes[0],
scatter_kws={'alpha':0.01,'edgecolor':'none'},
line_kws={'color':'k'})
axes[0].annotate('calibration R$^2$ = %.02f\nRMSE = %.02f' %
(r2_score(y_train,y_train_rf),np.sqrt(mean_squared_error(y_train,y_train_rf))),
xy=(0.05,0.95), xycoords='axes fraction',backgroundcolor='none',
horizontalalignment='left', verticalalignment='top')
sns.regplot(x='val_obs',y='val_model',data=val_df,marker='.',
truncate=True,ci=None,ax=axes[1],
scatter_kws={'alpha':0.01,'edgecolor':'none'},
line_kws={'color':'k'})
axes[1].annotate('validation R$^2$ = %.02f\nRMSE = %.02f'
% (r2_score(y_test,y_test_rf),np.sqrt(mean_squared_error(y_test,y_test_rf))),
xy=(0.05,0.95), xycoords='axes fraction',backgroundcolor='none',
horizontalalignment='left', verticalalignment='top')
axes[0].axis('equal')
axes[1].axis('equal')
fig4.tight_layout()
if show:
fig4.show()
return fig4,axes
# figure 5: Importances
def plot_importances(imp_df,show=True):
fig5,axes= plt.subplots(nrows=1,ncols=2,figsize=[8,8],sharex=True)
sns.barplot(x='permutation_importance',y='variable',data=imp_df,ax=axes[0])
axes[0].annotate('permutation importance',
xy=(0.95,0.98), xycoords='axes fraction',backgroundcolor='none',
horizontalalignment='right', verticalalignment='top')
sns.barplot(x='gini_importance',y='variable',data=imp_df,ax=axes[1])
axes[1].annotate('gini importance',
xy=(0.95,0.98), xycoords='axes fraction',backgroundcolor='none',
horizontalalignment='right', verticalalignment='top')
plt.setp(axes[1].get_yticklabels(),visible=False)
axes[1].yaxis.set_ticks_position('left')
fig5.tight_layout()
if show:
fig5.show()
return fig5,axes
# Plot partial dependencies
def plot_partial_dependencies_simple(rf, X, x_label=None, y_label=None,
variable_position=0, show=True):
n_variables=X.shape[1]
var_ = np.linspace(np.min(X[:,variable_position]),np.max(X[:,variable_position])+1,200)
X_RM = np.zeros((var_.size,n_variables))
for i in range(0,n_variables):
if i == variable_position:
X_RM[:,i] = var_.copy()
else:
X_RM[:,i] = np.mean(X[:,i])
# predict with rf model
y_RM = rf.predict(X_RM)
# now plot
fig6,ax = plt.subplots(figsize=[8,5])
ax.plot(var_, y_RM,'-')
if x_label is not None:
ax.set_xlabel(x_label)
if y_label is not None:
ax.set_ylabel(y_label)
fig6.tight_layout()
if show:
fig6.show()
return fig6, ax
def plot_partial_dependencies_multiple(rf, X, x_label=None, y_label=None,
variable_position=0, show=True):
n_variables=X.shape[1]
var_ = np.linspace(np.min(X[:,variable_position]),np.max(X[:,variable_position]),200)
X_RM = np.zeros((var_.size,n_variables))
for i in range(0,n_variables):
if i == variable_position:
X_RM[:,i] = var_.copy()
else:
X_RM[:,i] = np.mean(X[:,i])
# predict with rf model
y_RM = rf.predict(X_RM)
# now plot
fig7,ax = plt.subplots(figsize=[8,5])
N_iterations = 20
for i in range(0,N_iterations):
sample_row = np.random.randint(0,X.shape[0]+1)
X_i = np.zeros((var_.size,n_variables))
for j in range(0,n_variables):
if j == variable_position:
X_i[:,j] = var_.copy()
else:
X_i[:,j] = (X[sample_row,j])
# predict with rf model
y_i = rf.predict(X_i)
ax.plot(var_, y_i,'-',c='0.5',linewidth=0.5,alpha=0.8)
ax.plot(var_, y_RM,'-') # also plot line from before for comparison
if x_label is not None:
ax.set_xlabel(x_label)
if y_label is not None:
ax.set_ylabel(y_label)
fig7.tight_layout()
if show:
fig7.show()
return fig7,axis
# just add new partial dependency onto existing plot
def add_partial_dependency_instance(ax,rf,X,variable_position=1,show=True,N_iterations=1):
ax=ax or plt.gca()
n_variables=X.shape[1]
var_ = np.linspace(np.min(X[:,variable_position]),np.max(X[:,variable_position]),200)
for i in range(0,N_iterations):
sample_row = np.random.randint(0,X.shape[0]+1)
X_i = np.zeros((var_.size,n_variables))
for j in range(0,n_variables):
if j == variable_position:
X_i[:,j] = var_.copy()
else:
X_i[:,j] = (X[sample_row,j])
# predict with rf model
y_i = rf.predict(X_i)
return ax.plot(var_, y_i,'-',c='0.5',linewidth=0.5,alpha=0.8)
|
from database import Database
from notifier import Notifier
import typer, arrow, dotenv
from plan import Plan
CONFIG = dotenv.dotenv_values('.env')
DATABASE: str = CONFIG['database']
USERNAME: str = CONFIG['username']
PASSWORD: str = CONFIG['password']
db = Database(DATABASE)
app = typer.Typer()
@app.command()
def add(name: str, due_date: str = arrow.utcnow()):
"""Add a plan into the database."""
try:
plan = Plan(name, arrow.get(due_date))
db.write(db.read() + [plan])
typer.echo(plan)
except arrow.ParserError:
typer.echo('The time was not correctly formatted.')
@app.command()
def delete(name: str):
"""Remove a plan from the database by name."""
db.write(filter(lambda plan: plan.name != name, db.read()))
@app.command()
def list():
"""List out all active plans."""
plans = db.read()
if len(plans) == 0:
typer.echo(f'No plans set!')
for idx, plan in enumerate(plans):
typer.echo(f'{idx} :: {plan}')
@app.command()
def edit(idx: int, name: str = None, due_date: str = None):
"""Edit an already created plan."""
plans = db.read()
for i, _ in enumerate(plans):
if idx == i:
if name is not None:
plans[i].name = name
if due_date is not None:
plans[i].due_date = arrow.get(due_date)
db.write(plans)
@app.command()
def listen():
"""Notify user when a plan is due. Or when the user receives an email."""
Notifier(db, USERNAME, PASSWORD).listen()
if __name__ == '__main__':
app()
|
from urllib import request
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
url = r"https://www.nvinq.jra.go.jp/jra/servlet/JRAVotereference"
dp = r"C:\driver\94\chromedriver.exe"
options = Options()
# options.add_argument('--headless')
driver = webdriver.Chrome(dp, options=options)
driver.get(url)
uid = driver.find_element_by_name('UID')
pwd = driver.find_element_by_name('PWD')
pars = driver.find_element_by_name('PARS')
|
# 冻结
# VGG19模型训练网络
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
import numpy as np
from keras.applications import ResNet50,VGG19
import matplotlib.pyplot as plt
# tf.test.gpu_device_name()
model_name = "Frozen" # 模块命名,用于绘图时
train_epochs0 = 10 # 设置训练轮次
def show_history_mse(history0): # 绘制mse图像
plt.plot(history0.history['loss'])
plt.plot(history0.history['val_loss'])
plt.title(model_name + ' mse')
plt.ylabel('loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.savefig('drive/app/' + model_name + '_mse.jpg',api=200)
plt.show()
def show_history_ce(history0): # 绘制ce图像
plt.plot(history0.history['ce'])
plt.plot(history0.history['val_ce'])
plt.title(model_name + ' ce')
plt.ylabel('loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.savefig('drive/app/' + model_name + '_ce.jpg')
plt.show()
X = np.load('drive/app/X_data.npy')
Y = np.load('drive/app/Y_data.npy')
# 统一X和Y的数量级
X = X / 25
print("read the data")
# 切片,统一数量级
x_train = X[:5000]
y_train = Y[:5000]
x_test = X[5000:]
y_test = Y[5000:]
print("train data and test data")
#resnet = ResNet50(include_top=False, weights='imagenet', input_shape=(220, 220, 3), pooling='avg')
vgg=VGG19(include_top=False, weights='imagenet', input_shape=(220, 220, 3), pooling='avg')
model = Sequential()
model.add(vgg)
model.add(Dense(1))
model.layers[0].trainable = False # 设置ResNet50不可训练
# print(resnet.summary())
print(model.summary())
print("compile")
model.compile(loss='mean_squared_error', optimizer=Adam())
print("fit")
Hist = model.fit(x_train, y_train, epochs=train_epochs0, batch_size=64, validation_data=(x_test, y_test))
print(Hist.history)
show_history_mse(Hist)
# show_history_ce(Hist)
# loss_and_metrics = model.evaluate(x_test, y_test, batch_size=128)
# print(loss_and_metrics)
del X
del Y
del x_train
del y_train
del x_test
del y_test
# model.save('drive/app/my_model.h5')
|
class Solution:
def generate(self, numRows)
if numRows == 0:
return []
elif numRows == 1:
return [[1]]
elif numRows == 2:
return [[1],[1,1]]
tangle_list = [[1],[1,1]]
for i in range(2,numRows):
temp_list = []
temp_list.append(1)
for j in range(1,i):
temp_list.append(tangle_list[i-1][j-1]+tangle_list[i-1][j])
temp_list.append(1)
tangle_list.append(temp_list)
return tangle_list
if __name__ == '__main__':
mysolution = Solution()
print(mysolution.generate(5))
|
from tornado.ioloop import IOLoop
from log import logger
from server import Server
from echo import Echo
class EchoServer(Server):
def __init__(self):
Server.__init__(self)
self.register(Echo, self.on_echo)
def on_echo(self, connection, header, msg):
logger.info('connection:{0}, msg:{1}'.format(connection, msg))
connection.send_message(msg)
def main():
server = EchoServer()
server.listen(1327)
IOLoop.instance().start()
if __name__ == '__main__':
main()
|
import math
class Solution:
def numSquares(self, n: int) -> int:
a = [0]*(n+1)
for i in range(1,n+1):
r = math.sqrt(i)
if r.is_integer():
a[i] = 1
else:
r = int(r)
ind = i - (r*r)
v1 = 1+a[ind]
v2 = v1
for j in range(1,r):
d = (r-j)*(r-j)
try:
nv = i//d + a[(i%d)]
except:
nv = v1
if v2 > nv:
v2 = nv
a[i] = min(v1,v2)
# print(a)
return(a[-1]) |
# https://www.youtube.com/watch?v=CqvZ3vGoGs0&list=PL-osiE80TeTt2d9bfVyTiXJA-UTHn6WwU&index=9
import random
import sys
import math
import os
import datetime
import antigravity
import calendar
sys.path.append('/home/maxh/Python/My-Modules/')
from my_module import find_index, test
# from my_module import *
# import my_module as mm
courses = ['History', 'Math', 'Physics', 'Comp-Sci']
# index = mm.find_index(courses, 'Math')
index = find_index(courses, 'Math')
print(index)
print(test)
print(sys.path)
rads = math.radians(90)
print(random.choice(courses))
print(math.sin(rads))
print(datetime.date.today())
print(calendar.isleap(2021))
print(os.getcwd())
print(os.__file__)
|
from itertools import groupby
str = input()
print(*[(len(list(c)), int(k)) for k, c in groupby(str)]) |
#!/usr/bin/env python
import argparse
import math
import sys
from collections import Counter
import numpy as np
import sklearn.metrics as metrics
from sklearn import cross_validation
from sklearn.metrics.pairwise import cosine_similarity
import pandas as pd
import codecs
import os
from sklearn.ensemble import GradientBoostingRegressor, RandomForestRegressor
from collections import Counter
def main():
#goldfile="../twokenized/train+trial.dat"
goldfile="../twokenized/train.dat"
goldscores=read_figsenti(goldfile)
cross=cross_validation.KFold(len(goldscores),n_folds=10,indices=True)
folder="dev/"
#folder="final/"
trainfiles=[x for x in os.listdir(folder) if x.startswith("train.")]
for file in trainfiles:
scores = read_figsenti(folder+"/"+file)
acc=[]
for train_index, test_index in cross:
#print("TRAIN:", len(train_index), "TEST:", len(test_index))
y_test,y_pred = goldscores[test_index], scores[test_index]
y_pred=[math.floor(x + 0.5) for x in y_pred]
y_test=[math.floor(x + 0.5) for x in y_test]
acc.append(cosine_similarity(y_test,y_pred)[0][0])
#print acc[-1]
print "{}\t{}".format(np.mean(acc),file)
def read_figsenti(fname):
scores=[]
for line in codecs.open(fname,encoding="utf-8"):
if line.strip():
line = line.strip().split("\t")
value=line[1]
tid=line[0]
scores.append(float(value))
return np.array(scores)
if __name__=="__main__":
main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 26 05:56:17 2019
@author: kanchana
"""
import pandas as pd
import numpy as np
import seaborn as sns; sns.set(color_codes=True)
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
fb=pd.read_csv('dataset_Facebook.csv',sep=';')
print(fb.columns)
print(fb.isnull().sum())
columns_with_nan = fb.columns[fb.isna().any()].tolist()
fb2=fb[~fb['Paid'].isnull()]
fb2=fb2[~fb2['like'].isnull()]
fb2=fb2[~fb2['share'].isnull()]
X=pd.concat([fb2.iloc[:,:10],fb2.iloc[:,11:]], axis=1)
y=fb2.iloc[:,10].values
from sklearn.preprocessing import LabelEncoder
labelencoder_X = LabelEncoder()
X['Type'] = labelencoder_X.fit_transform(X.iloc[:,1])
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
from sklearn.tree import DecisionTreeRegressor
regressor = DecisionTreeRegressor(random_state = 0)
regressor.fit(X_train,y_train)
# Predicting a new result
y_pred=regressor.predict(X_test)
y_pred=regressor.predict(X_train)
plt.figure()
X_grid = np.arange(1, X_train.shape[0]+1, 1)
plt.scatter(X_grid, y_train, c='g', label='data', zorder=1,
edgecolors=(0, 0, 0))
plt.scatter(X_grid, y_pred, c='r', label='test', zorder=1,
edgecolors=(0, 0, 0))
plt.title("Decision Tree Model - Training Dataset")
y_pred=regressor.predict(X_test)
plt.figure()
X_grid = np.arange(1, X_test.shape[0]+1, 1)
plt.scatter(X_grid, y_test, c='g', label='data', zorder=1,
edgecolors=(0, 0, 0))
plt.scatter(X_grid, y_pred, c='r', label='test', zorder=1,
edgecolors=(0, 0, 0))
plt.title("Decision Tree Model - Test Dataset")
plt.figure()
plt.scatter(y_test,y_pred)
plt.xlabel("Lifetime Post Consumers - Actual")
plt.ylabel("Lifetime Post Consumers - Predicted ")
plt.title("Decision Tree Regressor Model- Actual vs Predict")
print(regressor.feature_importances_)
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_1.fit(X_train, y_train)
regr_2.fit(X_train, y_train)
# Predict
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
# Plot the results
plt.figure()
X_test_grid = np.arange(1, X_test.shape[0]+1, 1)
plt.scatter(X_test_grid, y_test, s=20, edgecolor="black",
c="darkorange", label="data")
plt.plot(X_test_grid, y_1, color="cornflowerblue",
label="max_depth=2", linewidth=2)
plt.plot(X_test_grid, y_2, color="yellowgreen", label="max_depth=5", linewidth=2)
plt.xlabel("X_grid")
plt.ylabel("Lifetime Post Consumers")
plt.title("Decision Tree Regression")
plt.legend()
plt.show()
# Plot the results
plt.figure()
X_test_grid = np.arange(1, 11, 1)
plt.scatter(X_test_grid, y_test[:10], s=20, edgecolor="black",
c="darkorange", label="data")
plt.plot(X_test_grid, y_1[:10], color="cornflowerblue",
label="max_depth=2", linewidth=2)
plt.plot(X_test_grid, y_2[:10], color="yellowgreen", label="max_depth=5", linewidth=2)
plt.xlabel("X_grid")
plt.ylabel("'Lifetime Post Consumers'")
plt.title("Decision Tree Regression")
plt.legend()
plt.show()
from sklearn.tree import export_graphviz
export_graphviz(regressor, out_file='tree.dot')
from sklearn.ensemble import AdaBoostRegressor
regr_3 = AdaBoostRegressor(DecisionTreeRegressor(max_depth=5),
n_estimators=300, random_state=123)
regr_3.fit(X_train, y_train)
y_3 = regr_3.predict(X_test)
plt.figure()
X_test_grid = np.arange(1, 51, 1)
plt.scatter(X_test_grid, y_test[:50], s=20, edgecolor="black",
c="darkorange", label="data")
plt.plot(X_test_grid, y_1[:50], color="cornflowerblue",
label="max_depth=2", linewidth=2)
plt.plot(X_test_grid, y_3[:50], color="yellowgreen", label="n_estimators=300,max_depth=5", linewidth=2)
plt.xlabel("X_grid")
plt.ylabel("'Lifetime Post Consumers'")
plt.title("Decision Tree Regression with Adabooth (green line)")
plt.legend()
plt.show()
importances = regressor.feature_importances_
indices = np.argsort(importances)[::-1]
print(dict(zip(fb.columns, regressor.feature_importances_)))
for f in range(X.shape[1]):
print("%d. feature %d %s (%f)" % (f + 1, indices[f], fb.columns[f],importances[indices[f]]))
|
'''
Created on Jan 24, 2016
@author: Andrei Padnevici
@note: This is an exercise: 10.1
'''
try:
fName = input("Enter file name: ")
if fName == "": fName = "mbox-short.txt"
file = open(fName)
except:
print("Invalid file")
exit()
emailsDict = dict()
email = None
for line in file:
words = line.split()
if len(words) == 0 or words[0] != 'From': continue
try:
email = words[1]
except:
print("Cannot parse this '%s' line and get the email address" % line.strip())
if email is not None: emailsDict[email] = emailsDict.get(email, 0) + 1
emailsList = list(sorted([(value, key) for (key, value) in emailsDict.items()], reverse=True))
print("%s %d" % (emailsList[0][1], emailsList[0][0]))
|
import scipy as sp
class kernel(object):
def __init__(self):
pass
def __call__(self, x1,x2):
return 0.
class sqexp1d(kernel):
"""
1d squared exponential kernel
k = A exp(-0.5(x2-x1)**2/l**2)
dn methods are k( x1, d^n x2/dx^n) the caller is responsible for sign changes for other derivatives
"""
def __init__(self,A,l):
super(sqexp1d,self).__init__()
self.hyperpara = (A,l,)
self.A=A
self.overl2 = 1./l**2
return
def __call__(self, x1, x2):
return self.A*sp.exp(-0.5*self.overl2*(x2-x1)**2)
def d1(self,x1,x2):
return -(x2-x1)*self.overl2*self.A*sp.exp(-0.5*self.overl2*(x2-x1)**2)
def d2(self,x1,x2):
return self.overl2*(self.overl2*(x2-x1)**2 - 1)*self.A*sp.exp(-0.5*self.overl2*(x2-x1)**2)
def d3(self,x1,x2):
x = (x2 - x1)
return (self.overl2**2)*x*(3-(x**2)*self.overl2)*self.A*sp.exp(-0.5*self.overl2*(x2-x1)**2)
def d4(self,x1, x2):
x=(x2-x1)
return (self.overl2**2)*(3-6*self.overl2*(x**2)+(x**4)*self.overl2**2)*self.A*sp.exp(-0.5*self.overl2*(x2-x1)**2) |
#Create Flower
#Date August 22, 2018
#Udacity Python exersise
#Dipen Patel
import turtle
def draw_circle(circle):
circle.circle(100)
circle.left(120)
def draw_line(line):
line.right(90)
line.forward(350)
def draw_multiple_triangle ():
window = turtle.Screen ()
window.bgcolor ("white")
window.screensize(400,500)
circles = turtle.Turtle()
circles.color("green")
circles.shape("turtle")
circles.speed(20)
lines = turtle.Turtle()
lines.color("green")
lines.shape("turtle")
lines.speed(5)
for a in range (1,80):
draw_circle(circles)
circles.right(5)
for a in range (1,2):
draw_line(lines)
window.exitonclick ()
draw_multiple_triangle ()
|
"""Device Group Services Classes."""
import logging
from .devicegrouprecords import DeviceGroupRecords
from .devicegrouprecords import DeviceGroups
logging.debug("In the device_group_services __init__.py file.")
__all__: ["DeviceGroupRecords", "DeviceGroups"]
|
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 16 15:44:54 2015
@author: LIght
"""
from sklearn.decomposition import FactorAnalysis
import pandas as pd
class FactAnalysis:
@staticmethod
def groupMovieGenre(user_item_matrix,item_df):
genre_num = 5
fa_model = FactorAnalysis(n_components=genre_num)
fa_model.fit(user_item_matrix)
## decomposited user np array, W represent sementic user info
W = fa_model.fit_transform(user_item_matrix)
## decomposited item np array, H represent sementic item info
H = fa_model.components_
movie_fa = pd.DataFrame(H).T
movie_fa = pd.concat([movie_fa,item_df],axis=1)
movie_genre = ['Action', 'Adventure', 'Animation', 'Children', 'Comedy', 'Crime', 'Documentary', 'Drama', 'Fantasy', 'Film-Noir', 'Horror', 'Musical', 'Mystery', 'Romance', 'Sci-Fi', 'Thriller', 'War', 'Western']
## return ret, 50 group related with movie genre
ret_fa = pd.DataFrame(movie_fa[movie_fa[0]>0.05][movie_genre].sum(axis=0))
for i in range(genre_num):
ret_fa[str(i)] = pd.DataFrame(movie_fa[movie_fa[i]>0.05][movie_genre].sum(axis=0))
return ret_fa, movie_fa
|
#!/usr/bin/env python3.6
# -*- coding: iso-8859-15 -*-
import numpy as np
A = np.array([[0.86, 0.08],
[-0.12, 1.14]])
start_points = np.array([[10, 50],
[100, 200],
[50, 100],
[70, 120]])
start_points = np.random.randint(0, 300, (300, 2))
max_points = 100
xmin, xmax = -300, 300
ymin, ymax = -300, 300
for i, start_point in enumerate(start_points):
current_point = start_point
next_point = current_point
for _ in range(max_points):
current_point = next_point
next_point = np.dot(A, current_point)
if (xmin <= next_point[0] <= xmax) and (ymin <= next_point[1] <= ymax):
print(' '.join(map(str, next_point)), i)
else:
break
|
from django.db import models
from django.utils.translation import gettext_lazy as _
# Create your models here.
class Category(models.Model):
class Meta:
verbose_name_plural = 'Categories'
name = models.CharField(max_length=254)
friendly_name = models.CharField(max_length=254, null=True, blank=True)
def __str__(self):
return self.name
def get_friendly_name(self):
return self.friendly_name
class Product(models.Model):
category = models.ForeignKey('Category', null=True, blank=True,
on_delete=models.SET_NULL)
sku = models.CharField(max_length=254, null=True, blank=True)
name = models.CharField(max_length=254)
description = models.TextField()
price = models.DecimalField(max_digits=6, decimal_places=2)
rating = models.DecimalField(max_digits=6, decimal_places=2,
null=True, blank=True)
image_url = models.URLField(max_length=1024, null=True, blank=True)
image = models.ImageField(null=True, blank=True)
def __str__(self):
return self.name
class Artworks(models.Model):
class art_category(models.TextChoices):
PAINTING = 'PA', _('Painting')
DRAWING = 'DR', _('Drawing')
CERAMIC = 'CE', _('Ceramics')
SCULPTURE = 'SC', _('Sculpture')
PRINTS = 'PR', _('Prints')
CARDS = 'CA', _('Cards')
BOOKS = 'BO', _('Books')
MIXED = 'MX', _('Mixed')
title = models.CharField(max_length=254, null=False, blank=False)
artist_id = models.IntegerField()
price = models.DecimalField(max_digits=6, decimal_places=2)
sold = models.BooleanField(default=False)
category = models.CharField(max_length=2,
choices=art_category.choices,
default=art_category.PAINTING,
)
created_at = models.DateTimeField()
image_path = models.URLField(max_length=1024, null=True, blank=True)
height = models.DecimalField(max_digits=5, decimal_places=2)
width = models.DecimalField(max_digits=5, decimal_places=2)
depth = models.DecimalField(max_digits=5, decimal_places=2, default=0)
def __str__(self):
return self.title
|
import numpy as np
from ..kernels.kernelsForNum import linear_kernel, polynomial_kernel, rbf_kernel
from ..kernels.kernelsForString import get_spectrum_kernel,get_mismatch_kernel
class baseKernel(object):
dict_numeric_kernels = {'linear': linear_kernel,'polynomial': polynomial_kernel,'rbf': rbf_kernel,}
dict_string_kernels = {'spectrum': get_spectrum_kernel,'mismatch': get_mismatch_kernel}
def __init__(self, kernel_name='rbf', **kwargs):
self.kernel_name_ = kernel_name
self.build_kernel_function()
self.get_kernel_params(**kwargs)
def build_kernel_function(self):
if self.kernel_name_ in self.dict_numeric_kernels.keys():
self.kernel_function_ = self.dict_numeric_kernels[self.kernel_name_]
elif self.kernel_name_ in self.dict_string_kernels.keys():
self.kernel_function_ = self.dict_string_kernels[self.kernel_name_]
else:
print("PLease check again your kernel! It is not implemented")
def get_kernel_params(self,**kwargs):
self.kernel_params_ = {}
# numeric kernel
if self.kernel_name_ == 'rbf':
self.kernel_params_['gamma'] = kwargs.get('gamma', 1)
if self.kernel_name_ == 'polynomial':
self.kernel_params_['degree'] = kwargs.get('degree',2)
# string kernel
if self.kernel_name_ == 'spectrum':
self.kernel_params_['k'] = kwargs.get('k',5)
if self.kernel_name_ == 'mismatch':
self.kernel_params_['k'] = kwargs.get('k',5)
self.kernel_params_['m'] = kwargs.get('m',1)
def build_gram_matrix(self,X,Y=None):
#print('params',self.kernel_params_)
K = self.kernel_function_(X,Y,**self.kernel_params_)
return K
# use data
def fit(self,X_train,y_train):
self.X_train = X_train
K_train = self.build_gram_matrix(X_train,None)
return self.fit_use_K(K_train,y_train)
def predict(self,X_test):
K_test = self.build_gram_matrix(X_test,self.X_train)
return self.predict_use_K(K_test)
def fit_use_K(self,K_train, y_train):
pass
def predict_use_K(self,K_test):
pass
|
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper for IAM API client."""
from googleapiclient import errors
from httplib2 import HttpLib2Error
from google.cloud.security.common.gcp_api import _base_repository
from google.cloud.security.common.gcp_api import api_helpers
from google.cloud.security.common.gcp_api import errors as api_errors
from google.cloud.security.common.gcp_api import repository_mixins
from google.cloud.security.common.util import log_util
LOGGER = log_util.get_logger(__name__)
class IamRepositoryClient(_base_repository.BaseRepositoryClient):
"""IAM API Respository."""
def __init__(self,
quota_max_calls=None,
quota_period=1.0,
use_rate_limiter=True):
"""Constructor.
Args:
quota_max_calls (int): Allowed requests per <quota_period> for the
API.
quota_period (float): The time period to limit the requests within.
use_rate_limiter (bool): Set to false to disable the use of a rate
limiter for this service.
"""
if not quota_max_calls:
use_rate_limiter = False
self._projects_serviceaccounts = None
self._projects_serviceaccounts_keys = None
super(IamRepositoryClient, self).__init__(
'iam', versions=['v1'],
quota_max_calls=quota_max_calls,
quota_period=quota_period,
use_rate_limiter=use_rate_limiter)
@property
def projects_serviceaccounts(self):
"""An _IamProjectsServiceAccountsRepository instance.
Returns:
object: An _IamProjectsServiceAccountsRepository instance.
"""
if not self._projects_serviceaccounts:
self._projects_serviceaccounts = self._init_repository(
_IamProjectsServiceAccountsRepository)
return self._projects_serviceaccounts
@property
def projects_serviceaccounts_keys(self):
"""An _IamProjectsServiceAccountsKeysRepository instance.
Returns:
object: An _IamProjectsServiceAccountsKeysRepository instance.
"""
if not self._projects_serviceaccounts_keys:
self._projects_serviceaccounts_keys = self._init_repository(
_IamProjectsServiceAccountsKeysRepository)
return self._projects_serviceaccounts_keys
class _IamProjectsServiceAccountsRepository(
repository_mixins.GetIamPolicyQueryMixin,
repository_mixins.ListQueryMixin,
_base_repository.GCPRepository):
"""Implementation of Iam Projects ServiceAccounts repository."""
def __init__(self, **kwargs):
"""Constructor.
Args:
**kwargs (dict): The args to pass into GCPRepository.__init__()
"""
super(_IamProjectsServiceAccountsRepository, self).__init__(
key_field='name', max_results_field='pageSize',
component='projects.serviceAccounts', **kwargs)
def get_iam_policy(self, resource, fields=None, verb='getIamPolicy',
include_body=False, resource_field='resource', **kwargs):
"""Get Service Account IAM Policy.
Args:
self (GCPRespository): An instance of a GCPRespository class.
resource (str): The id of the resource to fetch.
fields (str): Fields to include in the response - partial response.
verb (str): The method to call on the API.
include_body (bool): If true, include an empty body parameter in the
method args.
resource_field (str): The parameter name of the resource field to
pass to the method.
**kwargs (dict): Optional additional arguments to pass to the query.
Returns:
dict: GCE response.
"""
# The IAM getIamPolicy does not allow the 'body' argument, so this
# overrides the default behavior by setting include_body to False.
return repository_mixins.GetIamPolicyQueryMixin.get_iam_policy(
self, resource, fields=fields, verb=verb, include_body=include_body,
resource_field=resource_field, **kwargs)
@staticmethod
def get_name(project_id):
"""Returns a formatted name field to pass in to the API.
Args:
project_id (str): The id of the project to query.
Returns:
str: A formatted project name.
"""
if not project_id.startswith('projects/'):
project_id = 'projects/{}'.format(project_id)
return project_id
class _IamProjectsServiceAccountsKeysRepository(
repository_mixins.ListQueryMixin,
_base_repository.GCPRepository):
"""Implementation of Iam Projects ServiceAccounts Keys repository."""
def __init__(self, **kwargs):
"""Constructor.
Args:
**kwargs (dict): The args to pass into GCPRepository.__init__()
"""
super(_IamProjectsServiceAccountsKeysRepository, self).__init__(
key_field='name', component='projects.serviceAccounts.keys',
**kwargs)
class IAMClient(object):
"""IAM Client."""
DEFAULT_QUOTA_PERIOD = 1.0
USER_MANAGED = 'USER_MANAGED'
SYSTEM_MANAGED = 'SYSTEM_MANAGED'
KEY_TYPES = frozenset([USER_MANAGED, SYSTEM_MANAGED])
def __init__(self, global_configs, **kwargs):
"""Initialize.
Args:
global_configs (dict): Global configurations.
**kwargs (dict): The kwargs.
"""
max_calls = global_configs.get('max_iam_api_calls_per_second')
self.repository = IamRepositoryClient(
quota_max_calls=max_calls,
quota_period=self.DEFAULT_QUOTA_PERIOD,
use_rate_limiter=kwargs.get('use_rate_limiter', True))
def get_service_accounts(self, project_id):
"""Get Service Accounts associated with a project.
Args:
project_id (str): The project ID to get Service Accounts for.
Returns:
list: List of service accounts associated with the project.
"""
name = self.repository.projects_serviceaccounts.get_name(project_id)
try:
paged_results = self.repository.projects_serviceaccounts.list(name)
return api_helpers.flatten_list_results(paged_results, 'accounts')
except (errors.HttpError, HttpLib2Error) as e:
LOGGER.warn(api_errors.ApiExecutionError(name, e))
raise api_errors.ApiExecutionError('serviceAccounts', e)
def get_service_account_iam_policy(self, name):
"""Get IAM policy associated with a service account.
Args:
name (str): The service account name to query, must be in the format
projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}
Returns:
dict: The IAM policies for the service account.
"""
try:
return self.repository.projects_serviceaccounts.get_iam_policy(name)
except (errors.HttpError, HttpLib2Error) as e:
LOGGER.warn(api_errors.ApiExecutionError(name, e))
raise api_errors.ApiExecutionError('serviceAccountIamPolicy', e)
def get_service_account_keys(self, name, key_type=None):
"""Get keys associated with the given Service Account.
Args:
name (str): The service account name to query, must be in the format
projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}
key_type (str): Optional, the key type to include in the results.
Can be None, USER_MANAGED or SYSTEM_MANAGED. Defaults to
returning all key types.
Returns:
list: List with a dict for each key associated with the account.
Raises:
ValueError: Raised if an invalid key_type is specified.
"""
try:
kwargs = {}
if key_type:
if key_type not in self.KEY_TYPES:
raise ValueError(
'Key type %s is not a valid key type.' % key_type)
kwargs['keyTypes'] = key_type
results = self.repository.projects_serviceaccounts_keys.list(
name, **kwargs)
return api_helpers.flatten_list_results(results, 'keys')
except (errors.HttpError, HttpLib2Error) as e:
LOGGER.warn(api_errors.ApiExecutionError(name, e))
raise api_errors.ApiExecutionError('serviceAccountKeys', e)
|
import json
from flask import Flask
from flask import request
app = Flask(__name__)
@app.route('/', methods = ['POST'])
def reg():
dict = json.loads(request.get_data())
a = [1, 2, 3, 4]
return json.dumps(a)
if __name__ == '__main__':
app.run(debug=False) |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, with_statement
from revolver.core import sudo
from revolver import command, package
def install():
package.ensure(["curl", "git-core"])
url = "https://raw.github.com" \
+ "/nvie/gitflow/develop/contrib/gitflow-installer.sh"
sudo("curl -s %s | bash" % url)
def ensure():
if not command.exists("git-flow"):
install()
|
"""
<Reinforcement Learning and Control>(Year 2020)
by Shengbo Eben Li
@ Intelligent Driving Lab, Tsinghua University
ADP example for lane keeping problem in a circle road
[Method]
Model predictive control(MPC) as comparison
"""
from Solver import Solver
from Config import DynamicsConfig, GeneralConfig
from matplotlib import pyplot as plt
from utils import Numpy2Torch, step_relative
import numpy as np
import time
import os
import Dynamics
import torch
class Baseline(DynamicsConfig):
def __init__(self, initial_state, baseline_dir):
self.config = DynamicsConfig()
self.solver = Solver()
self.initial_state = initial_state
self.baseline_dir = baseline_dir
def mpcSolution(self):
# initialize state model
statemodel_plt = Dynamics.VehicleDynamics()
state = self.initial_state
# state = torch.tensor([[0.0, 0.0, self.psi_init, 0.0, 0.0]])
state.requires_grad_(False)
x_ref = statemodel_plt.reference_trajectory(state[:, -1])
state_r = state.detach().clone() # relative state
state_r[:, 0:4] = state_r[:, 0:4] - x_ref
self.state_history = state.detach().numpy()
plot_length = self.config.SIMULATION_STEPS
self.control_history = []
self.state_r_history = state_r
cal_time = 0
plt.figure(0)
for i in range(plot_length):
x = state_r.tolist()[0]
time_start = time.time()
temp, control = self.solver.mpcSolver(x, self.config.NP)
plt.plot(temp[:,-1],temp[:,0])
cal_time += time.time() - time_start
u = Numpy2Torch(control[0], (-1,self.config.ACTION_DIM))
state, state_r = step_relative(statemodel_plt, state, u)
self.state_history = np.append(self.state_history, state.detach().numpy(), axis=0)
self.control_history = np.append(self.control_history, u.detach().numpy())
self.state_r_history = np.append(self.state_history, state_r.detach().numpy())
print("MPC calculating time: {:.3f}".format(cal_time) + "s")
self.mpcSaveTraj()
def mpcPlot(self):
dy = Dynamics.VehicleDynamics()
ref = dy.reference_trajectory(Numpy2Torch(self.state_history[:,-1],self.state_history[:,-1].shape))
self.state_r_history = self.state_r_history.reshape([-1,5])
plt.figure(1)
plt.plot(self.state_history[:,-1],self.state_history[:,0], label="trajectory")
# plt.plot(state_r_history[:,-1],state_r_history[:,0], label="$trajectory_r$")
# plt.plot(self.state_history[:,-1],self.config.a_curve * np.sin(self.config.k_curve*self.state_history[:,-1]), label="reference")
plt.plot(self.state_history[:,-1], ref[:,0], label="reference")
plt.legend(loc="upper right")
plt.figure(2)
plt.plot(self.state_history[:, -1], self.state_history[:, 2], label="trajectory")
# plt.plot(state_r_history[:,-1],state_r_history[:,0], label="$trajectory_r$")
# plt.plot(self.state_history[:,-1],self.config.a_curve * np.sin(self.config.k_curve*self.state_history[:,-1]), label="reference")
plt.plot(self.state_history[:, -1], ref[:, 2], label="reference")
plt.legend(loc="upper right")
plt.figure(3)
plt.plot(self.state_history[0:-1,-1], self.control_history)
plt.show()
def mpcSaveTraj(self):
np.savetxt(os.path.join(self.baseline_dir, 'structured_MPC_state.txt'), self.state_history)
np.savetxt(os.path.join(self.baseline_dir, 'structured_MPC_control.txt'), self.control_history)
def openLoopSolution(self):
init_state = self.initial_state.detach().numpy().tolist()[0]
state, control = self.solver.openLoopMpcSolver(init_state, self.config.NP_TOTAL)
np.savetxt(os.path.join(self.baseline_dir, 'Open_loop_state.txt'), state)
np.savetxt(os.path.join(self.baseline_dir, 'Open_loop_control.txt'), control)
if __name__ == '__main__':
state = torch.tensor([[0.5, 0.0, 0.0, 0.0, 0.0]])
baseline_dir = "./baseline"
baseline = Baseline(state,baseline_dir)
baseline.mpcSolution()
baseline.mpcPlot()
# baseline.openLoopSolution() |
import numpy as np
from scipy import sparse
"""
Calculate spectral radius of whole weight matrix
"""
def getSpectralRadius(self, weights):
# Stack top and bottom row of weight matrix horizontally
top = sparse.hstack([weights.exex, weights.inex])
bottom = sparse.hstack([weights.exin, weights.inin])
# Stack vertically
wgs = sparse.vstack([top, bottom])
# Calculate and return rounded spectral radius
maxeigval = np.absolute(sparse.linalg.eigs(wgs.asfptype() / 255., k=1, which='LM', return_eigenvectors=False)[0])
return maxeigval
"""
Recombine weight matrix from excitatory probe chunks
"""
def recombineExWeightMatrix(self, initialExWeights, exWeightProbes):
# Get shorthand for some variables
init = initialExWeights
nPerCore = self.p.neuronsPerCore
# Array which finally contains all weight matrices
weightMatrices = []
# Iterate over number of probes over time
numProbes = len(exWeightProbes[0][0][0][0].data)
for p in range(numProbes):
# Calculate trained weight matrix from weight probes
weightMatrix = []
# Iterate over connection chunks between cores
n, m = np.shape(exWeightProbes)
for i in range(n):
# Define from/to indices for indexing
ifr, ito = i*nPerCore, (i+1)*nPerCore
chunks = []
for j in range(m):
# Define from/to indices for indexing
jfr, jto = j*nPerCore, (j+1)*nPerCore
# Get number of synapses in current probe
numSyn = np.shape(exWeightProbes[i][j])[0]
# Iterate over number of synapses in current probe (connections from one core to another)
data = []
for k in range(numSyn):
# Get weights data from probe index p and append to data array
data.append(exWeightProbes[i][j][k][0].data[p])
# Get chunk from initial matrix for defining sparse matrix of the current chunk (need indices and index pointer)
ic = init[jfr:jto, ifr:ito]
# Define sparse matrix, using initial weight matrix indices and index pointerm, as well as shape of chunk
chunks.append(sparse.csr_matrix((data, ic.indices, ic.indptr), shape=np.shape(ic)))
# Stack list of chunks together to column
column = sparse.vstack(chunks)
# Append column to weight matrix
weightMatrix.append(column)
# Stack list of columns together to the whole trained weight matrix
wmcsr = sparse.hstack(weightMatrix).tocsr() # transform to csr, since stacking returns coo format
# Add weight matrix of current
weightMatrices.append(wmcsr)
return weightMatrices
"""
@desc: Get mask of support weights for every cluster in the assembly
@return: Mask of the bottom-left area of the matrix
"""
def getSupportWeightsMask(self, exWeightMatrix):
nCs = self.p.inputNumTargetNeurons
nEx = self.p.reservoirExSize
nC = self.p.inputAlternatingNum
matrix = exWeightMatrix
# Get areas in matrix
#left = matrix[:,:nC*nCs].toarray() # left
#top = matrix[:nC*nCs,:].toarray() # top
#bottom = matrix[nC*nCs:,:].toarray() # bottom
bottomLeft = matrix[nC*nCs:,:nC*nCs].toarray() # bottom-left
# Get single cluster colums in bottom-left area (candidates for support weights)
cols = np.array([ bottomLeft[:,i*nCs:(i+1)*nCs] for i in range(nC)])
# Calculate means for every column in bottom-left
col_rowmeans = np.array([np.mean(cols[i], axis=1) for i in range(nC)])
# Condition 1: Get only rows their mean is greater than total mean
greaterMeanIndices = col_rowmeans > np.mean(bottomLeft)
# Condition 2: Get for every row the column which has max value
col_argmax = np.argmax(col_rowmeans, axis=0)
maxRowIndices = np.array(col_argmax[:,None] == range(nC)).T
# Get final mask in combining both conditions
supportMasks = np.logical_and(greaterMeanIndices, maxRowIndices)
# Create a "false" column, which is necessary if only one column (one input) exists
falseCol = np.zeros((supportMasks.shape[1])).astype(bool)
# Get mask for other neurons
othersMask = np.logical_not(np.logical_or(falseCol, *supportMasks))
# Combine masks for support neurons and other neurons
return np.array([*supportMasks, othersMask])
"""
@desc: Get support masks for weight matrices of all trials
"""
def getSupportMasksForAllTrials(self, initialweightsExex, trainedWeightsExex):
supportMasks = []
# First add initial weights
swm = self.getSupportWeightsMask(initialweightsExex)
supportMasks.append(swm)
# Add all trained weight matrices
for i in range(len(trainedWeightsExex)):
swm = self.getSupportWeightsMask(trainedWeightsExex[i])
supportMasks.append(swm)
return np.array(supportMasks)
"""
@desc: Get turnovers of support neurons
"""
def getSupportNeuronTurnovers(self, supportMasks):
numTrials = np.shape(supportMasks)[0]-1
turnover = []
# Get turonover between all trials
for i in range(numTrials):
# Get difference between support masks
diff = np.subtract(supportMasks[i].astype(int), supportMasks[i+1].astype(int))
# Absolute sum of difference
turnover.append(np.sum(np.abs(diff), axis=1))
return np.array(turnover).T
|
import cvxpy as cvx
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.patches import Ellipse
def show_ellipse(A, b, color, ax):
sigma = np.linalg.inv(A.dot(A.T))
mu = b
vals, vecs = np.linalg.eigh(sigma)
# Compute "tilt" of ellipse using first eigenvector
x, y = vecs[:, 0]
theta = np.degrees(np.arctan2(y, x))
# Eigenvalues give length of ellipse along each eigenvector
w, h = 2 * np.sqrt(np.power(vals, -1))
ellipse = Ellipse(mu, w, h, theta, color=color) # color="k")
ellipse.set_clip_box(ax.bbox)
ellipse.set_alpha(0.6)
ax.add_artist(ellipse)
plt.show()
def get_mve(a, b):
m, n = a.shape
# Create and solve the model
A = cvx.Variable((n, n), symmetric=True)
d = cvx.Variable((n))
obj = cvx.Maximize(cvx.log_det(A))
constrs = [cvx.norm(A * a[i, :], 2) + a[i].T * d <= b[i] for i in range(m)]
prob = cvx.Problem(obj, constrs)
prob.solve(solver=cvx.SCS, verbose=False, eps=1e-6)
print("status:", prob.status)
print("optimal value", prob.value)
print("optimal var", A.value, d.value)
return A.value, d.value
_, ax = plt.subplots()
#Constraints: AX <= B
a = np.array([[1, 1], [0, -1], [-1, 0], [0, 1], [1, 0]])
b = np.array([1.5, 0, 0, 1, 1])
x = np.array([[0, 0], [1, 0], [1, 0.5], [0.5, 1], [0, 1], [0, 0]])
B, d = get_mve(a, b)
plt.plot(x[:, 0], x[:, 1])
show_ellipse(B, d, 'b', ax)
plt.show()
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
__author__ = """Walter Treviño"""
__email__ = 'walter.trevino@gmail.com'
__version__ = '0.1.0'
from .pycfdi import Cfdi
|
from _typeshed import Incomplete
def min_weighted_dominating_set(G, weight: Incomplete | None = None): ...
def min_edge_dominating_set(G): ...
|
from __future__ import division
import datetime
class Helper():
def __init__(self):
self.err_count = 0
pass
def daytime_only(self, data):
# s2 = datetime.strptime(s, '%Y%m%d%H%M')
for k, v in data.items():
try:
timest = datetime.strptime(k, '%Y%m%d%H%M')
if timest.hour not in range(6,19):
#print(' del hour ' + str(timest.hour))
del data[k]
except:
#print('daytime strip not worked ' + str(k))
self.err_count += 1
return data
def round_time(self, dt=None, round_to=60):
"""Round a datetime object to any time lapse in seconds
dt : datetime.datetime object, default now.
roundTo : Closest number of seconds to round to, default 1 minute.
Author: Thierry Husson 2012 - Use it as you want but don't blame me.
"""
if dt == None : dt = datetime.datetime.now()
seconds = (dt.replace(tzinfo=None) - dt.min).seconds
rounding = (seconds + round_to/2) //round_to * round_to
return dt + datetime.timedelta(0,rounding-seconds,-dt.microsecond)
def standardize_date(self, img_name):
'''
get date&time from filename, convert it to standard tstamp ==> round to hours
'''
date = img_name[-22:-7]
try:
rounded_date = str(self.round_time(datetime.datetime(int(date[0:4]), int(date[4:6]), int(date[6:8]), int(date[9:11]), int(date[11:13]), int(date[13:15]), int('0000')), round_to=3600))
standardized_date = rounded_date.replace('-','').replace(' ','').replace(':','')[:-2] #remove seconds,cause synops dont have 'em
return standardized_date
except:
pass
return None
def avg_synops(self, s1, s2):
'''
s1, s2 instances of Load_synops.synops
return dict
'''
s1_keys = set(s1.keys())
s2_keys = set(s2.keys())
common_keys = s1_keys.intersection(s2_keys)
print('common_keys for synops ' + str(len(common_keys)))
avg_synops = dict()
for key in common_keys:
try:
if s1[key] != s2[key]:
avg_synops[key] = (int(s1[key]) + int(s2[key]))/2
else:
avg_synops[key] = s1[key]
except:
# len of synops is not the same, 26208 vs 26214
pass
return avg_synops |
import csv
import pdb
from datetime import date, datetime
import json
from movie_tracker.models import *
import pandas as pd
import tmdbsimple as tmdb
MOVIES = 'tmdb-5000-movie-dataset/tmdb_5000_movies.csv'
CREDITS = 'tmdb-5000-movie-dataset/tmdb_5000_credits.csv'
tmdb.API_KEY = '3cfdb6d49ad9aa0736b9e2c49d6b20ab'
search = tmdb.Search()
POSTER_PATH = 'http://image.tmdb.org/t/p/w185/'
def load_data():
movies_data = pd.read_csv(MOVIES)[:40]
# movies_data = decode_data(movies_data)
movies = load_movies(movies_data)
keywords = load_keywords(movies_data)
credits = pd.read_csv(CREDITS)[:40]
# credits = decode_data(credits)
credits['id'] = credits['movie_id']
movies_data = pd.merge(movies_data, credits, on = ['id', 'title'])
actors = load_actors(movies_data)
return pd.DataFrame({ 'movies': movies, 'keywords': keywords, 'actors': actors })
def decode_data(data):
for k in data.select_dtypes(['object']).keys():
data[k] = data[k].apply(lambda x: x.decode('utf-8').strip())
return data
def load_keywords(movies):
return movies['keywords'].apply(lambda row: json.loads(row))
def load_actors(movies):
return movies['cast'].apply(lambda row: parse_actors(row))
def parse_actors(row):
actors = json.loads(row)
return [{ 'name': actor['name'], 'character': actor['character'] } for actor in actors]
def load_movies(movies):
movies['genres'] = movies['genres'].apply(
lambda row: ', '.join([genre['name'] for genre in json.loads(row)]))
movies['release_date'] = movies['release_date'].dropna().apply(lambda row: format_date((row)))
movies['poster'] = movies['title'].apply(lambda title: get_poster(title))
movies['movie'] = movies.apply(lambda row: {
'id': row['id'],
'title': row['title'],
'release_date': row['release_date'],
'runtime': row['runtime'],
'overview': row['overview'],
'tagline': row['tagline'],
'budget': row['budget'],
'genres': row['genres'],
'poster': row['poster'],
}, axis = 1)
return movies['movie']
def get_poster(title):
data = search.movie(query = title)
if data['total_results'] == 0:
return None
poster = data['results'][0].get('poster_path', None)
if poster != None:
poster = POSTER_PATH + poster
return poster
def format_date(raw_date):
if raw_date == '':
return None
date = datetime.strptime(raw_date, '%Y-%m-%d')
return date.date()
def create_objects(row):
movie_data = row['movies']
keywords_data = row['keywords']
actors_data = row['actors']
movie = Movie(**movie_data)
movie.save()
keywords = []
for keyword_data in keywords_data:
keywords.append(Keyword.objects.get_or_create(**keyword_data)[0])
movie.keywords = keywords
actors = []
for actor_data in actors_data:
actor = Actor.objects.get_or_create(name = actor_data['name'])[0]
actors.append(actor)
ActorMovie(actor_id=actor.id, movie_id=movie.id, character=actor_data['character']).save()
movie.actors = actors
movie.save()
def main():
data = load_data()
for index, row in data.iterrows():
print('start for {0}'.format(index))
create_objects(row)
if __name__ == "__main__":
main()
|
import requests
r = requests.get("http://192.168.1.178:4567/mark")
print(r) |
__author__ = "Narwhale"
# import time
#
# for i in range(1,21):
# print(i)
#
#
# #----------------------------
#
# ls = [i for i in range(1,1000001)]
# # print(type(ls))
# # for r in ls:
# # print(r)
#
# starttime = time.time()
# s = sum(ls)
# stoptime = time.time()
# t =stoptime - starttime
# print(sum(ls))
# print(t)
#-------------------------------
#
# ls1= [i for i in range(1,20,2)]
# for i in ls1:
# print(i)
#---------------------------------
# ls=[]
# ls2 = [i for i in range(3,30) if i%3 == 0]
# print(ls2)
# print(ls2)
# for r in ls2:
# if r%3 == 0:
# ls.append(r)
#
# print(ls)
#----------------------------------
#
# ls3 = [str(i)+'**3' for i in range(1,11)]
# print(ls3)
# for i in ls3:
# print(i) |
from app import app, db
from app.model_types import GUID
from sqlalchemy.sql import func
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy import event
from sqlalchemy.orm.attributes import InstrumentedAttribute
from Crypto.Cipher import AES
import binascii
import uuid
import frontmatter
import re
key = app.config['DB_ENCRYPTION_KEY']
def aes_encrypt(data):
cipher = AES.new(key, AES.MODE_CFB, key[::-1])
return cipher.encrypt(data)
def aes_encrypt_old(data):
cipher = AES.new(key)
data = data + (" " * (16 - (len(data) % 16)))
return binascii.hexlify(cipher.encrypt(data))
def aes_decrypt(data):
# From a new object
if type(data) is InstrumentedAttribute:
return ''
cipher = AES.new(key, AES.MODE_CFB, key[::-1])
decrypted = cipher.decrypt(data)
try:
return decrypted.decode('utf-8')
except:
# Data is in old encryption or it is unencrypted
return aes_decrypt_old(data)
def aes_decrypt_old(data):
try:
cipher = AES.new(key)
return cipher.decrypt(binascii.unhexlify(data)).rstrip().decode('ascii')
except:
# If data is not encrypted, just return it
return data
class User(db.Model):
uuid = db.Column(GUID, primary_key=True, index=True, unique=True, default=lambda: uuid.uuid4())
username = db.Column(db.String(64), unique=True, nullable=False)
password_hash = db.Column(db.String(128), nullable=False)
auto_save = db.Column(db.Boolean, nullable=True)
notes = db.relationship('Note', lazy='dynamic', cascade='all, delete, delete-orphan')
meta = db.relationship('Meta', lazy='dynamic', cascade='all, delete, delete-orphan')
def __repr__(self):
return '<User {}>'.format(self.uuid)
class Meta(db.Model):
uuid = db.Column(GUID, primary_key=True, index=True, unique=True, default=lambda: uuid.uuid4())
user_id = db.Column(GUID, db.ForeignKey('user.uuid'), nullable=False)
note_id = db.Column(GUID, db.ForeignKey('note.uuid'), nullable=False)
name_encrypted = db.Column('name', db.String)
name_compare = db.Column(db.String)
kind = db.Column(db.String)
@hybrid_property
def name(self):
return aes_decrypt(self.name_encrypted)
@name.setter
def name(self, value):
self.name_encrypted = aes_encrypt(value)
def __repr__(self):
return '<Meta {}>'.format(self.uuid)
@property
def serialize(self):
return {
'uuid': self.uuid,
'name': self.name,
'kind': self.kind,
'note_id': self.note_id,
}
class Note(db.Model):
uuid = db.Column(GUID, primary_key=True, index=True, unique=True, default=lambda: uuid.uuid4())
user_id = db.Column(GUID, db.ForeignKey('user.uuid'), nullable=False)
data = db.Column(db.String)
title = db.Column(db.String(128), nullable=False)
date = db.Column(db.DateTime(timezone=True), server_default=func.now())
is_date = db.Column(db.Boolean, default=False)
meta = db.relationship('Meta', lazy='dynamic', cascade='all, delete, delete-orphan')
@hybrid_property
def text(self):
return aes_decrypt(self.data)
@text.setter
def text(self, value):
self.data = aes_encrypt(value)
@hybrid_property
def name(self):
return aes_decrypt(self.title)
@name.setter
def name(self, value):
self.title = aes_encrypt(value)
def __repr__(self):
return '<Note {}>'.format(self.uuid)
@property
def serialize(self):
return {
'uuid': self.uuid,
'data': self.text,
'title': self.name,
'date': self.date,
'is_date': self.is_date,
}
# Update title automatically
def before_change_note(mapper, connection, target):
title = None
data = frontmatter.loads(target.text)
if isinstance(data.get('title'), str) and len(data.get('title')) > 0:
title = data.get('title')
if title and not target.is_date:
target.name = title
# Handle changes to tasks, projects, and tags
def after_change_note(mapper, connection, target):
tags = []
projects = []
data = frontmatter.loads(target.text)
if isinstance(data.get('tags'), list):
tags = list(set([x.replace(',', '\,') for x in data.get('tags')]))
elif isinstance(data.get('tags'), str):
tags = list(set(map(str.strip, data['tags'].split(','))))
tags = [x for x in tags if x]
if isinstance(data.get('projects'), list):
projects = list(set([x.replace(',', '\,') for x in data.get('projects')]))
elif isinstance(data.get('projects'), str):
projects = list(set(map(str.strip, data['projects'].split(','))))
projects = [x for x in projects if x]
tasks = re.findall("- \[[x| ]\] .*$", data.content, re.MULTILINE)
existing_tags = []
existing_projects = []
existing_tasks = []
metas = Meta.query.filter_by(note_id=target.uuid).all()
for meta in metas:
if meta.kind == 'tag':
existing_tags.append(meta)
elif meta.kind == 'project':
existing_projects.append(meta)
elif meta.kind == 'task':
existing_tasks.append(meta)
for tag in existing_tags:
if tag.name not in tags:
connection.execute(
'DELETE FROM meta WHERE uuid = ?',
'{}'.format(tag.uuid).replace('-', '')
)
else:
tags.remove(tag.name)
for tag in tags:
connection.execute(
'INSERT INTO meta (uuid, user_id, note_id, name, kind) VALUES (?, ?, ?, ?, ?)',
'{}'.format(uuid.uuid4()).replace('-', ''),
'{}'.format(target.user_id).replace('-', ''),
'{}'.format(target.uuid).replace('-', ''),
aes_encrypt(tag),
'tag'
)
for project in existing_projects:
if project.name not in projects:
connection.execute(
'DELETE FROM meta WHERE uuid = ?',
'{}'.format(project.uuid).replace('-', '')
)
else:
projects.remove(project.name)
for project in projects:
connection.execute(
'INSERT INTO meta (uuid, user_id, note_id, name, kind) VALUES (?, ?, ?, ?, ?)',
'{}'.format(uuid.uuid4()).replace('-', ''),
'{}'.format(target.user_id).replace('-', ''),
'{}'.format(target.uuid).replace('-', ''),
aes_encrypt(project),
'project'
)
for task in existing_tasks:
if task.name not in tasks:
connection.execute(
'DELETE FROM meta WHERE uuid = ?',
'{}'.format(task.uuid).replace('-', '')
)
else:
tasks.remove(task.name)
for task in tasks:
encrypted_task = aes_encrypt(task)
connection.execute(
'INSERT INTO meta (uuid, user_id, note_id, name, name_compare, kind) VALUES (?, ?, ?, ?, ?, ?)',
'{}'.format(uuid.uuid4()).replace('-', ''),
'{}'.format(target.user_id).replace('-', ''),
'{}'.format(target.uuid).replace('-', ''),
encrypted_task,
encrypted_task,
'task'
)
def before_update_task(mapper, connection, target):
if target.kind != 'task':
return
if target.name_encrypted == target.name_compare:
return
note = Note.query.get(target.note_id)
if not note:
return
note_data = aes_encrypt(note.text.replace(aes_decrypt(target.name_compare), target.name))
connection.execute(
'UPDATE note SET data = ? WHERE uuid = ?',
note_data,
'{}'.format(note.uuid).replace('-', '')
)
target.name_compare = target.name_encrypted
event.listen(Note, 'before_insert', before_change_note)
event.listen(Note, 'before_update', before_change_note)
event.listen(Note, 'after_insert', after_change_note)
event.listen(Note, 'after_update', after_change_note)
event.listen(Meta, 'before_update', before_update_task)
|
import urwid
from diary import Diary
shortcuts = {
"save": "ctrl o",
"toggle-edit": "ctrl x",
"back": "esc",
"discard": "ctrl r",
}
class DiaryWidget(urwid.WidgetWrap):
signals = ["close"]
def __init__(self, diary: Diary):
self._diary = diary
w = urwid.WidgetPlaceholder(urwid.Text(""))
super().__init__(w)
self._render()
def _render(self):
if self._diary.is_readonly():
content = urwid.Text(self._diary.get_contents())
else:
content = urwid.Edit(
edit_text=self._diary.get_contents(),
multiline=True,
allow_tab=True,
)
urwid.connect_signal(
content, "change", lambda _w, val: self._diary.set_contents(val)
)
content = urwid.Filler(content, valign='top')
self._body = urwid.Pile(
[
("pack", HeaderWidget(self._diary)),
("weight", 1, urwid.LineBox(content)),
]
)
self._footer = FooterWidget(self._diary)
frame = urwid.Frame(body=self._body, footer=self._footer)
self._w.original_widget = frame
def keypress(self, size, key):
if key == shortcuts["toggle-edit"]:
if self._diary.is_readonly():
self._diary.set_readonly(False)
else:
self._diary.set_readonly(True)
self._render()
elif key == shortcuts["back"]:
if not self._diary.is_dirty():
self._emit("close")
elif key == shortcuts["discard"]:
self._emit("close")
elif key == shortcuts["save"]:
if self._diary.is_dirty():
self._diary.save()
self._footer.update_status()
else:
retval = super().keypress(size, key)
self._footer.update_status()
return retval
return None
class HeaderWidget(urwid.WidgetWrap):
def __init__(self, diary: Diary):
title = urwid.Text("Diary", align="center")
title = urwid.AttrMap(title, "bright")
title = urwid.Filler(title, top=1)
date = urwid.Text(diary.get_filename())
column = urwid.Columns([("weight", 1, date)])
column = urwid.LineBox(column)
w = urwid.Pile([(2, title), ("pack", column)])
super().__init__(w)
class FooterWidget(urwid.WidgetWrap):
def __init__(self, diary: Diary):
self._diary = diary
super().__init__(urwid.WidgetPlaceholder(urwid.Text("")))
self.update_status()
def update_status(self):
dml = self._diary
status = urwid.Text("")
ws = []
ws.append(("pack", urwid.Text("Ctrl-O: Save")))
if not dml.is_dirty():
ws.append(("pack", urwid.Text("Esc: Go Back")))
else:
ws.append(("pack", urwid.Text("Ctrl-R: Discard and Go Back")))
if dml.is_readonly():
ws.append(("pack", urwid.Text("Ctrl-X: Edit mode")))
else:
ws.append(("pack", urwid.Text("Ctrl-X: Read-only mode")))
ws.append(("weight", 1, urwid.Text("")))
ws.append(("pack", status))
status_text = ""
if dml.is_readonly():
status_text += "Read-only mode. "
if dml.is_dirty():
status_text += "Unsaved changes."
else:
status_text += "Saved."
status.set_text(status_text)
self._w.original_widget = urwid.Columns(ws, dividechars=3)
|
def myFunc ():
a = int(input("Введите первое число: "))
b = int(input("Введите второе число: "))
if a < b:
print("Плохой негативный текст!")
elif a > b:
print("Хороший позитивный текст!")
else:
print("Баланс в природе не нарушен!");
myFunc() |
import os
import torch
from models.resnet import resnet50, resnet101, resnet152
from models.densenet import densenet121, densenet161
from models.senet import senet154, se_resnext101_32x4d, se_resnet101
from models.inception_v4 import inceptionv4
from models.xception import xception
from models.inceptionresnetv2 import inceptionresnetv2
from models.vgg import vgg16
net_dict = {
"vgg16" : vgg16,
"resnet50" : resnet50,
"resnet101" : resnet101,
"resnet152" : resnet152,
"densenet121" : densenet121,
"densenet161" : densenet161,
"inceptionv4" : inceptionv4,
"senet154" : senet154,
"se_resnet101" : se_resnet101,
"se_resnext101_32x4d" : se_resnext101_32x4d,
"xception" : xception,
"inceptionresnetv2" : inceptionresnetv2 }
def model_builder(net_name, num_classes=100, pretrained=None, weight_path=None):
if net_name not in net_dict:
return None
net = net_dict[net_name](num_classes=num_classes, pretrained=pretrained)
if pretrained:
load_weights(net, weight_path, net_name)
return net
def load_weights(net, weight_path, net_name):
if net_name == "vgg16":
weight = torch.load(weight_path)
net.vgg.load_state_dict(weight)
return
if net_name == "inceptionv4" or net_name == "inceptionresnetv2":
weight = torch.load(weight_path)
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in weight.items():
head = k.split('.')[0]
if head == 'last_linear':
pass
else:
name = k
new_state_dict[name] = v
net.load_state_dict(new_state_dict, strict=False)
else:
weight = torch.load(weight_path)
n = len(weight)
count = 0
print("load pretrained weight: ", weight_path, n)
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in weight.items():
if count < n-2:
new_state_dict[k] = v
else:
pass
count += 1
net.load_state_dict(new_state_dict, strict=False) |
#Data Types
"""
Python has multiple data types
Text Type: str
Numeric Types: int, float, complex
Sequence Types: list, tuple, range
Mapping Type: dict
Set Types: set, frozenset
Boolean Type: bool
Binary Types: bytes, bytearray, memoryview
"""
#To get the data type of a variable use the type() method
x=5
print(type(x))
#Eaxh data type has a constructor that you can use to set it
x=float(20.5)
print(type(x))
#Type conversion
#you can actuallky use the construcors of datatypes in order to concert them
x = 1 # int
y = 2.8 # float
#convert from int to float:
a = float(x)
#convert from float to int:
b = int(y)
print(type(a))
print(type(b))
#Casting
"""
Sometimes you may want to specify a variable type on a variable this canb be done with casting
This is done by using the datatype constructor
"""
x = int(1) # x will be 1
y = int(2.8) # y will be 2
z = int("3") # z will be 3
x = float(1) # x will be 1.0
y = float(2.8) # y will be 2.8
z = float("3") # z will be 3.0
w = float("4.2") # w will be 4.2
y = str(2) # y will be '2'
z = str(3.0) # z will be '3.0' |
from django.contrib import admin
from django.urls import path,include
from .views import render_pdf_view,GeneratePdf, generate_pdf
urlpatterns = [
path('pdf', render_pdf_view,name='pdf'),
path('newpdf', generate_pdf, name='pdf'),
path('new', GeneratePdf.as_view(),name='pdf'),
]
|
import cv2.cv as cv
import random
im = cv.LoadImage('meinv.jpg')
thumb = cv.CreateImage((im.width / 2, im.height / 2), cv.CV_8UC2, 3)
cv.Resize(im, thumb)
for k in range(5000):
i = random.randint(0, thumb.height-1)
j = random.randint(0, thumb.width-1)
color = (random.randrange(256), random.randrange(256), random.randrange(256))
thumb[i, j] = color
li = cv.InitLineIterator(thumb, (0, 0), (10, 10))
for (r, g, b) in li:
print (r, g, b)
cv.ShowImage("Noize", thumb)
cv.WaitKey(0)
|
#!/bin/python3
import sys
def divisibleSumPairs(n, k, ar):
count = 0
arrPairs = []
for x in range(len(ar)):
j = 1
while j < len(ar):
pair = []
if x < j and ((ar[x] + ar[j]) % k == 0):
pair.append(x)
pair.append(j)
if pair not in arrPairs:
arrPairs.append(pair)
count += 1
j += 1
return count
n, k = input().strip().split(' ')
n, k = [int(n), int(k)]
ar = list(map(int, input().strip().split(' ')))
result = divisibleSumPairs( n, k, ar)
print(result)
|
import cv2 as cv2
import numpy as np
image = cv2.imread('filtr.jpg')
b,g,r = cv2.split(image.copy())
_, contours, hierarchy = cv2.findContours(b, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnt = contours[0]
img=image.copy()
for c in contours:
x,y,w,h = cv2.boundingRect(c)
img+=cv2.rectangle(image,(x,y),(x+w,y+h),(243,20,20),2)
cv2.drawContours(image, contours,-1, (0,255,0), 3)
cv2.imshow(image)
cv2.waitKey(0) |
from collections import OrderedDict
from datetime import datetime
from uuid import uuid4
from django.contrib import messages
from django.core.urlresolvers import reverse, reverse_lazy
from django.shortcuts import render, get_object_or_404
from django.http import Http404
from django.http.response import HttpResponse, HttpResponseRedirect, HttpResponseBadRequest
from django.utils.functional import cached_property
from django.views.generic import UpdateView, CreateView, TemplateView, DetailView, DeleteView
from gim.timers import sleep
from gim.subscriptions.models import SUBSCRIPTION_STATES
from gim.core.models.projects import Card, Column, Project
from gim.core.tasks import (
IssueEditProjectsJob,
MoveCardJob, CardNoteEditJob,
ColumnEditJob, ColumnMoveJob,
ProjectEditJob,
)
from gim.front.mixins.views import LinkedToUserFormViewMixin, WithAjaxRestrictionViewMixin, WithIssueViewMixin, WithSubscribedRepositoryViewMixin, DependsOnRepositoryViewMixin, WithRepositoryViewMixin, \
LinkedToRepositoryFormViewMixin
from gim.front.repository.dashboard.views import LabelsEditor
from gim.front.utils import make_querystring, forge_request
from gim.front.repository.views import BaseRepositoryView, RepositoryViewMixin
from gim.front.repository.issues.views import IssuesView, IssueEditAssignees, IssueEditLabels, \
IssueEditMilestone, IssueEditState, IssueEditProjects, IssuesFilters, IssueEditRequestedReviewers
from .forms import (
CardNoteCreateForm, CardNoteDeleteForm, CardNoteEditForm,
ColumnCreateForm, ColumnEditForm, ColumnDeleteForm,
ProjectEditForm, ProjectDeleteForm, ProjectCreateForm
)
DEFAULT_BOARDS = OrderedDict((
('auto-state', {
'mode': 'auto',
'key': 'state',
'name': u'issue state',
'description': u'two columns board: open and closed issues',
'columns': OrderedDict((
('open', {
'key': 'open',
'name': u'open',
'description': u'Open issues',
'qs': ('state', 'open'),
}),
('closed', {
'key': 'closed',
'name': u'closed',
'description': u'Closed issues',
'qs': ('state', 'closed'),
}),
)),
}),
('auto-assigned', {
'mode': 'auto',
'key': 'assigned',
'name': u'assignees',
'description': u'one column per assignee',
}),
('auto-requested-reviewer', {
'mode': 'auto',
'key': 'requested-reviewer',
'name': u'requested reviewers',
'description': u'one column per requested reviewer',
}),
('auto-open-milestones', {
'mode': 'auto',
'key': 'open-milestones',
'name': u'open milestones',
'description': u'one column per open milestone',
}),
('auto-all-milestones', {
'mode': 'auto',
'key': 'all-milestones',
'name': u'all milestones',
'description': u'one column per milestone',
}),
))
class BoardMixin(object):
LIMIT_ISSUES = 30
default_qs = 'state=open'
raise_if_no_current_board = True
def __init__(self):
super(BoardMixin, self).__init__()
self.current_board = None
self.current_column = None
@cached_property
def collaborators(self):
return self.repository.collaborators.all()
def get_boards(self):
boards = OrderedDict(DEFAULT_BOARDS)
if len(self.collaborators):
# Fill assigned columns
boards['auto-assigned']['columns'] = OrderedDict([
('__none__', {
'key': '__none__',
'name': u'(No one assigned)',
'description': u'',
'qs': ('assigned', '__none__'),
})
] + [
(user.username, {
'key': user.username,
'name': user.username,
'description': user.full_name,
'qs': ('assigned', user.username),
'object': user,
})
for user in self.collaborators
])
# Fill requested-reviewer columns
boards['auto-requested-reviewer']['columns'] = OrderedDict([
('__none__', {
'key': '__none__',
'name': u'(No one requested)',
'description': u'',
'qs': ('requested_reviewer', '__none__'),
})
] + [
(user.username, {
'key': user.username,
'name': user.username,
'description': user.full_name,
'qs': ('requested_reviewer', user.username),
'object': user,
})
for user in self.collaborators
])
# Fill milestone columns
for milestone_filter in ('open', 'all'):
column_name = 'auto-%s-milestones' % milestone_filter
boards[column_name]['columns'] = OrderedDict([
('__none__', {
'key': '__none__',
'name': u'(No milestone)',
'description': u'',
'qs': ('milestone', '__none__'),
})
] + [
(str(milestone.number), {
'key': str(milestone.number),
'name': '#%d - %s (%s)' % (milestone.number, milestone.title, milestone.state),
'description': milestone.description,
'qs': ('milestone', str(milestone.number)),
'object': milestone,
})
for milestone in reversed(list(self.milestones))
if milestone_filter == 'all' or milestone.is_open
])
# No board on milestones if no milestones
if len(boards[column_name]['columns']) < 2:
del boards[column_name]
# Add projects
if self.repository.has_some_projects:
if self.subscription.state in SUBSCRIPTION_STATES.WRITE_RIGHTS:
projects = self.projects_including_empty
else:
projects = self.projects
for project in projects:
if not project.number:
continue
columns = OrderedDict([
('__none__', {
'key': '__none__',
'name': u'(Not in the project)',
'description': u'not in this project',
'qs': ('project_%s' % project.number, '__none__')
})
] + [
(str(column.pk), {
'key': str(column.pk),
'name': column.name,
'description': u'',
'qs': ('project_%s' % project.number, column.id),
'object': column,
})
for column in project.columns.all()
])
if len(columns) > 1 or self.subscription.state in SUBSCRIPTION_STATES.WRITE_RIGHTS:
boards['project-%d' % project.number] = {
'mode': 'project',
'key': str(project.number),
'name': project.name,
'description': u'Github project with all its columns',
'object': project,
'columns': columns,
'default_qs': 'sort=position&direction=asc'
}
# Add label types
for label_type in self.label_types:
columns = OrderedDict([
('__none__', {
'key': '__none__',
'name': u'(Not set)',
'description': u'no labels for this type',
'qs': ('labels', '%s:__none__' % label_type.name)
})
] + [
(str(label.pk), {
'key': str(label.pk),
'name': label.lower_typed_name,
'description': u'',
'qs': ('labels', label.name),
'object': label,
})
for label in label_type.labels.all()
])
if len(columns) > 1:
boards['labels-%d' % label_type.pk] = {
'mode': 'labels',
'key': str(label_type.pk),
'name': label_type.name,
'description': u'one column for each label of this type',
'object': label_type,
'columns': columns
}
for board_key, board in boards.items():
board['board_url'] = reverse(
'front:repository:board',
kwargs=dict(self.repository.get_reverse_kwargs(),
board_mode=board['mode'],
board_key=board['key'])
)
board['base_url'] = board['board_url']
if board.get('default_qs'):
board['board_url'] += '?' + board['default_qs']
elif self.default_qs:
board['board_url'] += '?' + self.default_qs
board['visible_count'] = len(list(c for c in board['columns'].values() if not c.get('hidden', False)))
return boards
def get_boards_context(self):
context = {'boards': self.get_boards()}
current_board_key = self.kwargs.get('board_key')
if current_board_key:
current_board_key = '%s-%s' % (self.kwargs.get('board_mode'), current_board_key)
if current_board_key not in context['boards']:
current_board_key = None
if self.raise_if_no_current_board and not current_board_key:
raise Http404('No board here')
context['current_board_key'] = current_board_key
if current_board_key:
self.current_board = context['current_board'] = context['boards'][current_board_key]
context['labels_editor_url'] = reverse_lazy( 'front:repository:%s' % LabelsEditor.url_name, kwargs=self.repository.get_reverse_kwargs())
return context
class BoardSelectorView(BoardMixin, BaseRepositoryView):
name = 'Board'
url_name = 'board-selector'
template_name = 'front/repository/board/base.html'
raise_if_no_current_board = False
auto_open_selector = True
display_in_menu = True
def get_context_data(self, **kwargs):
context = super(BoardSelectorView, self).get_context_data(**kwargs)
context.update(self.get_boards_context())
return context
class BoardView(BoardMixin, IssuesFilters, BaseRepositoryView):
name = 'Board'
url_name = 'board'
main_url_name = 'board-selector' # to mark the link in the main menu as current
template_name = 'front/repository/board/board.html'
filters_template_name = 'front/repository/board/include_filters.html'
options_template_name = 'front/repository/board/include_options.html'
display_in_menu = False
def __init__(self):
self.list_uuid = 'board-main' # used for the filters
super(BoardView, self).__init__()
def _can_add_position_sorting(self, qs_parts):
if self.current_board['mode'] == 'project':
return True
return super(BoardView, self)._can_add_position_sorting(qs_parts)
@cached_property
def base_url(self):
# used for the filters
return self.current_board['base_url']
def get_pre_context_data(self, **kwargs):
context = super(BoardView, self).get_pre_context_data(**kwargs)
context.update(self.get_boards_context())
return context
def get_context_data(self, **kwargs):
context = super(BoardView, self).get_context_data(**kwargs)
if not self.current_board:
# Will be redirected in ``render_to_response`` so no need for more context
return context
if not self.request.is_ajax():
for column_key, column in self.current_board['columns'].items():
column['url'] = reverse_lazy(
'front:repository:%s' % BoardColumnView.url_name,
kwargs=dict(
self.repository.get_reverse_kwargs(),
board_mode=self.current_board['mode'],
board_key=self.current_board['key'],
column_key=column_key,
)
)
context.update({
'can_add_issues': True,
'all_metrics': list(self.repository.all_metrics()),
})
context.update(self.repository.get_milestones_for_select(key='number', with_graph_url=True))
context.update({
'list_uuid': self.list_uuid,
'current_issues_url': self.base_url,
'filters_title': 'Filters for all columns',
'can_show_shortcuts': True,
'can_multiselect': context['current_repository_edit_level'] == 'full',
'force_display_groups_options': True,
})
return context
def get_template_names(self):
"""
Use a specific template if the request is an ajax one
"""
if self.request.is_ajax():
return 'front/repository/board/board_ajax.html'
return super(BoardView, self).get_template_names()
def render_to_response(self, context, **response_kwargs):
if not self.current_board:
return HttpResponseRedirect(self.repository.get_view_url('board-selector'))
return super(BoardView, self).render_to_response(context, **response_kwargs)
class BoardColumnMixin(BoardMixin):
@cached_property
def base_url(self):
return reverse_lazy('front:repository:%s' % self.url_name, kwargs=dict(
self.repository.get_reverse_kwargs(),
board_mode=self.kwargs['board_mode'],
board_key=self.kwargs['board_key'],
column_key=self.kwargs['column_key']
))
def get_column_key_from_kwarg(self, name):
column_key = self.kwargs[name]
if column_key not in self.current_board['columns']:
raise Http404
return column_key, self.current_board['columns'][column_key]
def get_boards_context(self):
context = super(BoardColumnMixin, self).get_boards_context()
if not context.get('current_board', None):
raise Http404
current_column_key, current_column = self.get_column_key_from_kwarg('column_key')
context.update({
'current_column_key': current_column_key,
'current_column': current_column
})
self.current_column = current_column
context['current_column']['url'] = self.base_url
return context
class BoardMoveIssueMixin(WithAjaxRestrictionViewMixin, WithIssueViewMixin, BaseRepositoryView):
allowed_rights = SUBSCRIPTION_STATES.WRITE_RIGHTS
display_in_menu = False
ajax_only = True
def get_post_view_info(self):
board = self.current_board
view, url = None, None
if board['mode'] == 'auto':
if board['key'] == 'state':
view = IssueEditState
url = self.issue.edit_field_url('state')
elif board['key'] == 'assigned':
view = IssueEditAssignees
url = self.issue.edit_field_url('assignees')
elif board['key'] == 'requested-reviewer':
view = IssueEditRequestedReviewers
url = self.issue.edit_field_url('requested_reviewers')
elif 'milestone' in board['key']:
view = IssueEditMilestone
url = self.issue.edit_field_url('milestone')
elif board['mode'] == 'labels':
view = IssueEditLabels
url = self.issue.edit_field_url('labels')
if not view:
raise Http404
return view, url
def render_messages(self, **kwargs):
return render(self.request, 'front/messages.html', **kwargs)
class BoardCanMoveIssueView(BoardMoveIssueMixin, BoardColumnMixin):
url_name = 'board-can-move'
def post(self, request, *args, **kwargs):
self.get_boards_context()
view, url = self.get_post_view_info()
current_job, who = view.get_job_for_issue(self.issue)
if current_job:
if who == self.request.user.username:
who = 'yourself'
messages.error(request, view.get_not_editable_user_message(self.issue, who))
return self.render_messages(status=409)
return self.render_messages()
class BoardMoveProjectCardMixin(object):
@cached_property
def issue_or_card(self):
from gim.core.models import Card, Issue
if self.kwargs.get('is_note'):
return get_object_or_404(Card,
pk=self.kwargs['issue_number'],
column=self.current_column['object'],
)
return self.issue
class BoardCanMoveProjectCardView(BoardMoveProjectCardMixin, BoardCanMoveIssueView):
url_name = 'board-can-move-project-card'
@classmethod
def get_job_for_object(cls, obj):
current_job = cls.get_current_job_for_object(obj)
if current_job:
for i in range(0, 3):
sleep(0.1) # wait a little, it may be fast
current_job = cls.get_current_job_for_object(obj)
if not current_job:
break
if current_job:
who = current_job.gh_args.hget('username')
return current_job, who
return None, None
@classmethod
def get_current_job_for_object(cls, obj):
from gim.core.models import Issue
if isinstance(obj, Issue):
to_check = [(IssueEditProjectsJob, 'identifier'), (MoveCardJob, 'issue_id')]
else:
to_check = [(MoveCardJob, 'identifier')]
for job_model, field in to_check:
try:
job = job_model.collection(**{field: obj.pk, 'queued': 1}).instances()[0]
except IndexError:
pass
else:
return job
return None
def post(self, request, *args, **kwargs):
self.get_boards_context()
from gim.core.models import Card, Issue
obj = self.issue_or_card
current_job, who = self.get_job_for_object(obj)
if current_job:
if who == self.request.user.username:
who = 'yourself'
if isinstance(current_job, IssueEditProjects):
message = u"""The <strong>projects</strong> for the %s <strong>#%d</strong> are
currently being updated (asked by <strong>%s</strong>), please
wait a few seconds and retry""" % (self.issue.type, self.issue.number, who)
else:
if isinstance(obj, Issue):
message = u"""A previous move for the %s <strong>#%d</strong> is
currently being saved (asked by <strong>%s</strong>), please
wait a few seconds and retry""" % (self.issue.type, self.issue.number, who)
else:
message = u"""A previous move for this note is
currently being saved (asked by <strong>%s</strong>), please
wait a few seconds and retry""" % who
messages.error(request, message)
return self.render_messages(status=409)
return self.render_messages()
class BoardMoveIssueView(BoardMoveIssueMixin, BoardColumnMixin):
url_name = 'board-move'
def get_post_view_info(self):
view, url = super(BoardMoveIssueView, self).get_post_view_info()
data = {}
skip_reset_front_uuid = False
if view == IssueEditState:
skip_reset_front_uuid = True
data = {'state': self.new_column['key']}
elif view == IssueEditAssignees:
skip_reset_front_uuid = False
assignees = self.issue.assignees.all()
if self.new_column['key'] != self.current_column['key']:
assignees = list(assignees.values_list('pk', flat=True))
if self.new_column['key'] != '__none__':
try:
if self.current_column['key'] == '__none__':
raise ValueError
existing_index = assignees.index(self.current_column['object'].pk)
assignees[existing_index] = self.new_column['object'].pk
except ValueError:
assignees.append(self.new_column['object'].pk)
data = {'assignees': assignees}
elif view == IssueEditRequestedReviewers:
skip_reset_front_uuid = False
requested_reviewers = self.issue.requested_reviewers.all()
if self.new_column['key'] != self.current_column['key']:
requested_reviewers = list(requested_reviewers.values_list('pk', flat=True))
if self.new_column['key'] != '__none__':
try:
if self.current_column['key'] == '__none__':
raise ValueError
existing_index = requested_reviewers.index(self.current_column['object'].pk)
requested_reviewers[existing_index] = self.new_column['object'].pk
except ValueError:
requested_reviewers.append(self.new_column['object'].pk)
data = {'requested_reviewers': requested_reviewers}
elif view == IssueEditMilestone:
skip_reset_front_uuid = True
data = {'milestone': '' if self.new_column['key'] == '__none__' else self.new_column['object'].pk}
elif view == IssueEditLabels:
skip_reset_front_uuid = False
labels = self.issue.labels.all()
if self.new_column['key'] != self.current_column['key']:
if self.new_column['key'] == '__none__':
labels = labels.exclude(label_type_id=self.current_column['object'].label_type_id)
labels = list(labels.values_list('pk', flat=True))
if self.new_column['key'] != '__none__':
try:
if self.current_column['key'] == '__none__':
raise ValueError
existing_index = labels.index(self.current_column['object'].pk)
labels[existing_index] = self.new_column['object'].pk
except ValueError:
labels.append(self.new_column['object'].pk)
data = {'labels': labels}
else:
raise Http404
data['front_uuid'] = self.request.POST['front_uuid']
class InternalBoardMoveView(view):
def form_valid(self, form):
form.instance.skip_reset_front_uuid = skip_reset_front_uuid
self.object = form.save()
self.after_form_valid(form)
return HttpResponse('OK')
return InternalBoardMoveView, data, url
def get_boards_context(self):
context = super(BoardMoveIssueView, self).get_boards_context()
new_column_key, new_column = self.get_column_key_from_kwarg('to_column_key')
context.update({
'current_column_key': new_column_key,
'current_column': new_column
})
self.new_column = new_column
return context
def post(self, request, *args, **kwargs):
self.get_boards_context()
view, data, url = self.get_post_view_info()
new_request = forge_request(path=url, method='POST', post_data=data,
source_request=self.request, headers={
'HTTP_X_REQUESTED_WITH': 'XMLHttpRequest'
}, pass_user=True)
response = view.as_view()(new_request, **self.issue.get_reverse_kwargs())
if response.status_code == 200:
return self.render_messages()
return response
class BoardMoveProjectCardView(BoardMoveProjectCardMixin, BoardMoveIssueView):
url_name = 'board-move-project-card'
def post(self, request, *args, **kwargs):
self.get_boards_context()
project = self.current_board['object']
from_column = self.current_column.get('object')
to_column = self.new_column.get('object')
position = self.request.POST.get('position', None)
if position is not None:
try:
position = int(position)
if position < 1:
raise ValueError()
except ValueError:
return HttpResponseBadRequest()
# here choice is done to not update the positions in db but let the job do it
from gim.core.models import Card, Issue
obj = self.issue_or_card
if isinstance(obj, Card):
card = obj
else:
if to_column:
# we're asked to move a card from a column to another, or add it to the project
try:
card = self.issue.cards.get(column__project=project)
except Card.DoesNotExist:
now = datetime.utcnow()
card = Card.objects.create(
type=Card.CARDTYPE.ISSUE,
created_at=now,
updated_at=now,
issue=self.issue,
column=to_column,
position=position,
)
else:
# the card is in a column but is asked to be removed from the project
card = self.issue.cards.get(column__project=project)
card.front_uuid = self.request.POST['front_uuid']
card.save(update_fields=['front_uuid'])
job_args = {}
if isinstance(obj, Issue):
job_args['issue_id'] = obj.pk
if to_column:
job_args['column_id'] = to_column.pk
if from_column == to_column:
job_args['direction'] = 1 if position > card.position else -1
if position:
job_args['position'] = position
MoveCardJob.add_job(
card.pk,
gh=self.request.user.get_connection(),
**job_args
)
return self.render_messages()
class BoardColumnView(WithAjaxRestrictionViewMixin, BoardColumnMixin, IssuesView):
url_name = 'board-column'
MIN_FILTER_KEYS = 3
display_in_menu = False
ajax_only = True
filters_template_name = 'front/repository/board/include_filters.html'
options_template_name = 'front/repository/board/include_options.html'
filters_and_list_template_name = 'front/repository/board/include_filters_and_list.html'
template_name = filters_and_list_template_name
def get_pre_context_data(self, **kwargs):
context = self.get_boards_context()
context.update(super(BoardColumnView, self).get_pre_context_data(**kwargs))
return context
def get_context_data(self, **kwargs):
context = super(BoardColumnView, self).get_context_data(**kwargs)
if not self.needs_only_queryset:
context.update({
'list_key': self.current_column['key'],
'list_title': self.current_column['name'],
'list_description': self.current_column['description'],
'filters_title': 'Filters for this column',
'can_show_shortcuts': False,
'can_add_issues': False,
'can_handle_positions': False,
'include_board_column_icons': self.request.GET.get('with-icons', False),
})
return context
def get_querystring_context(self, querystring=None):
qs_context = super(BoardColumnView, self).get_querystring_context(querystring)
qs_parts = qs_context['querystring_parts']
mode = self.current_board['mode']
qs_name, qs_value = self.current_column['qs']
if mode in ('auto', 'project'):
qs_parts[qs_name] = qs_value
elif mode == 'labels':
qs_label_names = qs_parts.get('labels', None) or []
if qs_label_names:
label_type = [lt for lt in self.label_types if str(lt.pk) == self.current_board['key']][0]
label_names = {l.lower_name for l in label_type.labels.all()}
if not isinstance(qs_label_names, list):
qs_label_names = [qs_label_names]
qs_label_names = [l for l in qs_label_names if l and l.lower() not in label_names]
qs_parts['labels'] = qs_label_names + [qs_value]
return {
'querystring_parts': qs_parts,
'querystring': make_querystring(qs_parts)[1:],
}
class BoardProjectColumnView(BoardColumnView):
"""A board column for a project column"""
url_name = 'board-project-column'
filters_and_list_template_name = 'front/repository/board/projects/include_filters_and_list.html'
def can_handle_positions(self, filter_parts):
return self.current_column['key'] != '__none__' and \
filter_parts.get('sort') == 'position' and \
not filter_parts.get('group_by')
def can_display_notes(self, filter_parts):
if self.current_column['key'] != '__none__' and \
filter_parts.get('sort') == 'position' and \
not filter_parts.get('group_by'):
# we may be able to display notes, but only if no filters
allowed_keys = {'direction', 'sort', 'project_%s' % self.current_board['object'].number}
return set(filter_parts.keys()) == allowed_keys
else:
return None
def get_context_data(self, **kwargs):
context = super(BoardProjectColumnView, self).get_context_data(**kwargs)
if not self.needs_only_queryset:
context['can_handle_positions'] = self.can_handle_positions(context['issues_filter']['parts'])
context['can_display_notes'] = self.can_display_notes(context['issues_filter']['parts'])
return context
def finalize_issues(self, issues, context):
issues, total_count, limit_reached, original_queryset = \
super(BoardProjectColumnView, self).finalize_issues(issues, context)
if self.can_display_notes(context['issues_filter']['parts']):
from gim.core.models import Card
incr_order = context['issues_filter']['parts']['direction'] == 'asc'
column = self.current_column['object']
issues = list(issues)
issues_by_id = {issue.id: issue for issue in issues}
# get all the cards to display
filters = {}
if limit_reached:
max_position = issues[-1].cards.get(column=column).position
filters['position__lte' if incr_order else 'position__gte'] = max_position
cards = column.cards.filter(**filters).order_by('position' if incr_order else '-position')
# compose the list from the cards
issues = []
for card in cards:
if card.type == Card.CARDTYPE.ISSUE:
if card.issue_id in issues_by_id:
issues.append(issues_by_id[card.issue_id])
else:
issues.append(card)
if limit_reached:
issues = issues[:self.LIMIT_ISSUES]
total_count = column.cards.count()
else:
total_count = len(issues)
if not context['no_limit'] and total_count > self.LIMIT_ISSUES + self.LIMIT_ISSUES_TOLERANCE:
issues = issues[:self.LIMIT_ISSUES]
limit_reached = True
return issues, total_count, limit_reached, original_queryset
def get_template_names(self):
if self.current_column['object'].github_status in Column.GITHUB_STATUS_CHOICES.NOT_READY:
return 'front/repository/board/projects/include_not_ready_column.html'
return super(BoardProjectColumnView, self).get_template_names()
class WithProjectViewMixin(WithRepositoryViewMixin):
"""
A mixin that is meant to be used when a view depends on a project.
Provides stuff provided by WithSubscribedRepositoryViewMixin, plus:
- a "project" property that'll get the project depending on the repository and
the "number" url params
- a "get_project_filter_args" to use to filter a model on a repository's name,
its owner's username, and an project number
And finally, put the project in the context
"""
exclude_waiting_delete = True
def get_project_filter_args(self, filter_root=''):
"""
Return a dict with attribute to filter a model for a given repository's
name, its owner's username and an project number as given in the url.
Use the "filter_root" to prefix the filter.
"""
if filter_root and not filter_root.endswith('__'):
filter_root += '__'
return {
'%srepository_id' % filter_root: self.repository.id,
'%snumber' % filter_root: self.kwargs['project_number']
}
@cached_property
def project(self):
"""
Return (and cache) the project. Raise a 404 if the current user is
not allowed to use this repository, or if the project is not found
or waiting for deletion
"""
queryset = Project.objects.select_related('repository__owner')
if self.exclude_waiting_delete:
queryset = queryset.exclude(github_status=Project.GITHUB_STATUS_CHOICES.WAITING_DELETE)
return get_object_or_404(
queryset,
**self.get_project_filter_args()
)
def get_context_data(self, **kwargs):
"""
Put the current project in the context
"""
context = super(WithProjectViewMixin, self).get_context_data(**kwargs)
context['current_project'] = self.project
context['current_project_edit_level'] = self.get_edit_level(self.project)
return context
def get_edit_level(self, project):
"""
Return the edit level of the given project. It may be None (read only),
or "full"
"""
edit_level = None
if project and project.number:
if self.subscription.state in SUBSCRIPTION_STATES.WRITE_RIGHTS:
edit_level = 'full'
return edit_level
class DependsOnProjectViewMixin(WithProjectViewMixin, DependsOnRepositoryViewMixin):
"""
A simple mixin to use for views when the main object depends on a project
Will limit entries to ones matching the project fetched using url params
and the "allowed_rights" attribute.
The "project_related_name" attribute is the name to use to filter only
on the current project.
"""
project_related_name = 'project'
repository_related_name = 'project__repository'
def get_queryset(self):
"""
Return a queryset based on the current repository, project, and allowed
rights.
"""
return self.model._default_manager.filter(**{
self.project_related_name: self.project
})
class LinkedToProjectFormViewMixin(WithAjaxRestrictionViewMixin, DependsOnProjectViewMixin):
"""
A mixin for form views when the main object depends on a project, and
using a form which is a subclass of LinkedToProjectFormMixin, to have the
current project passed to the form
"""
def get_form_kwargs(self):
kwargs = super(LinkedToProjectFormViewMixin, self).get_form_kwargs()
kwargs['project'] = self.project
return kwargs
class CardNoteView(WithAjaxRestrictionViewMixin, DependsOnProjectViewMixin, DetailView):
context_object_name = 'note'
pk_url_kwarg = 'card_pk'
http_method_names = ['get']
ajax_only = True
url_name = 'project.note'
model = Card
template_name = 'front/repository/board/projects/include_note.html'
job_model = CardNoteEditJob
project_related_name = 'column__project'
repository_related_name = 'column__project__repository'
def get_object(self, queryset=None):
if queryset is None:
queryset = self.get_queryset()
pk = self.kwargs['card_pk']
obj = None
job = None
try:
obj = queryset.get(pk=pk)
except self.model.DoesNotExist:
# maybe the object was deleted and recreated by dist_edit
try:
job = self.job_model.get(identifier=pk, mode='create')
except ValueError:
job = list(self.job_model.collection(identifier=pk, mode='create').instances())[-1]
except self.job_model.DoesNotExist:
pass
if job:
to_wait = 0.3
while to_wait > 0:
created_pk = job.created_pk.hget()
if created_pk:
obj = queryset.get(pk=created_pk)
break
sleep(0.1)
if not obj:
raise Http404("No note found matching the query")
return obj
class CardNoteEditMixin(LinkedToUserFormViewMixin, LinkedToProjectFormViewMixin):
model = Card
job_model = CardNoteEditJob
project_related_name = 'column__project'
repository_related_name = 'column__project__repository'
ajax_only = True # TODO: CHANGE TO TRUE
http_method_names = ['get', 'post']
edit_mode = None
def form_valid(self, form):
"""
Override the default behavior to add a job to edit the note on the
github side
"""
response = super(CardNoteEditMixin, self).form_valid(form)
job_kwargs = {}
if self.object.front_uuid:
job_kwargs = {'extra_args': {
'front_uuid': self.object.front_uuid,
'skip_reset_front_uuid': 1,
}}
self.job_model.add_job(self.object.pk,
mode=self.edit_mode,
gh=self.request.user.get_connection(),
**job_kwargs)
return response
class CardNoteCreateView(CardNoteEditMixin, CreateView):
edit_mode = 'create'
verb = 'created'
template_name = 'front/repository/board/projects/include_note_create.html'
url_name = 'project.note.create'
form_class = CardNoteCreateForm
context_object_name = 'note'
@cached_property
def column(self):
"""
Return (and cache) the column. Raise a 404 if the current user is
not allowed to use this repository, or if the column is not found
"""
return get_object_or_404(
self.project.columns,
pk=self.kwargs['column_id']
)
def get_form_kwargs(self):
self.object = Card(column=self.column)
return super(CardNoteCreateView, self).get_form_kwargs()
def get_context_data(self, **kwargs):
context = super(CardNoteCreateView, self).get_context_data(**kwargs)
context['current_column'] = self.column
context['create_note_uuid'] = self.request.POST.get('front_uuid', None) or uuid4()
return context
class BaseCardNoteEditView(CardNoteEditMixin, UpdateView):
context_object_name = 'note'
pk_url_kwarg = 'card_pk'
def get_object(self, queryset=None):
"""
Early check that the user has enough rights to edit this note
"""
obj = super(BaseCardNoteEditView, self).get_object(queryset)
if self.subscription.state not in SUBSCRIPTION_STATES.WRITE_RIGHTS:
raise Http404
return obj
@classmethod
def get_current_job_for_card(cls, card):
try:
job = cls.job_model.collection(identifier=card.id, queued=1).instances()[0]
except IndexError:
return None
else:
return job
@classmethod
def get_job_for_card(cls, card):
current_job = cls.get_current_job_for_card(card)
if current_job:
for i in range(0, 3):
sleep(0.1) # wait a little, it may be fast
current_job = cls.get_current_job_for_card(card)
if not current_job:
break
if current_job:
who = current_job.gh_args.hget('username')
return current_job, who
return None, None
@classmethod
def get_not_editable_user_message(cls, card, edit_mode, who):
message = u"This note is currently being %sd (asked by <strong>%s</strong>)" % (edit_mode or 'update', who)
if edit_mode != 'delete':
message += u", please wait a few seconds and retry"
return message
def render_not_editable(self, request, edit_mode, who):
if who == request.user.username:
who = 'yourself'
messages.error(request, self.get_not_editable_user_message(self.object, edit_mode, who))
# 409 Conflict Indicates that the request could not be processed because of
# conflict in the request, such as an edit conflict between multiple simultaneous updates.
return self.render_messages(status=409)
def dispatch(self, request, *args, **kwargs):
self.object = self.get_object()
current_job, who = self.get_job_for_card(self.object)
if current_job:
return self.render_not_editable(request, current_job.mode.hget(), who)
return super(CardNoteEditMixin, self).dispatch(request, *args, **kwargs)
class CardNoteEditView(BaseCardNoteEditView):
edit_mode = 'update'
verb = 'updated'
template_name = 'front/repository/board/projects/include_note_edit.html'
url_name = 'project.note.edit'
form_class = CardNoteEditForm
class CardNoteDeleteView(BaseCardNoteEditView):
edit_mode = 'delete'
verb = 'deleted'
template_name = 'front/repository/board/projects/include_note_delete.html'
url_name = 'project.note.delete'
form_class = CardNoteDeleteForm
class ColumnEditMixin(LinkedToProjectFormViewMixin):
model = Column
job_model = ColumnEditJob
project_related_name = 'project'
repository_related_name = 'project__repository'
ajax_only = True
http_method_names = ['get', 'post']
edit_mode = None
def form_valid(self, form):
"""
Override the default behavior to add a job to edit the column on the
github side
"""
response = super(ColumnEditMixin, self).form_valid(form)
job_kwargs = {}
if self.object.front_uuid:
job_kwargs = {'extra_args': {
'front_uuid': self.object.front_uuid,
'skip_reset_front_uuid': 1,
}}
self.job_model.add_job(self.object.pk,
mode=self.edit_mode,
gh=self.request.user.get_connection(),
**job_kwargs)
return response
class ColumnInfoView(LinkedToProjectFormViewMixin, DetailView):
model = Column
ajax_only = True
repository_related_name = 'project__repository'
http_method_names = ['get']
template_name = 'front/repository/board/projects/minimal_column_info.html'
context_object_name = 'column'
pk_url_kwarg = 'column_id'
url_name = 'project.column.info'
class ColumnCreateView(ColumnEditMixin, CreateView):
edit_mode = 'create'
verb = 'created'
template_name = 'front/repository/board/projects/include_column_create.html'
url_name = 'project.column.create'
form_class = ColumnCreateForm
context_object_name = 'column'
def get_context_data(self, **kwargs):
context = super(ColumnCreateView, self).get_context_data(**kwargs)
context['current_project'] = self.project
return context
class BaseColumnEditView(ColumnEditMixin, UpdateView):
context_object_name = 'column'
pk_url_kwarg = 'column_id'
default_edit_mode = 'update'
def get_object(self, queryset=None):
"""
Early check that the user has enough rights to edit this column
"""
obj = super(BaseColumnEditView, self).get_object(queryset)
if self.subscription.state not in SUBSCRIPTION_STATES.WRITE_RIGHTS:
raise Http404
return obj
@classmethod
def get_current_job_for_column(cls, column):
try:
job = cls.job_model.collection(identifier=column.id, queued=1).instances()[0]
except IndexError:
return None
else:
return job
@classmethod
def get_job_for_column(cls, column):
current_job = cls.get_current_job_for_column(column)
if current_job:
for i in range(0, 3):
sleep(0.1) # wait a little, it may be fast
current_job = cls.get_current_job_for_column(column)
if not current_job:
break
if current_job:
who = current_job.gh_args.hget('username')
return current_job, who
return None, None
@classmethod
def get_not_editable_user_message(cls, column, edit_mode, who):
message = u"This column is currently being %sd (asked by <strong>%s</strong>)" % (edit_mode or 'update', who)
if edit_mode != 'delete':
message += u", please wait a few seconds and retry"
return message
def render_not_editable(self, request, edit_mode, who):
if who == request.user.username:
who = 'yourself'
messages.error(request, self.get_not_editable_user_message(self.object, edit_mode, who))
# 409 Conflict Indicates that the request could not be processed because of
# conflict in the request, such as an edit conflict between multiple simultaneous updates.
return self.render_messages(status=409)
def dispatch(self, request, *args, **kwargs):
self.object = self.get_object()
current_job, who = self.get_job_for_column(self.object)
if current_job:
try:
mode = current_job.mode.hget()
except Exception:
mode = self.default_edit_mode
return self.render_not_editable(request, mode, who)
return super(BaseColumnEditView, self).dispatch(request, *args, **kwargs)
class ColumnEditView(BaseColumnEditView):
edit_mode = 'update'
verb = 'updated'
template_name = 'front/repository/board/projects/include_column_edit.html'
url_name = 'project.column.edit'
form_class = ColumnEditForm
def get_success_url(self):
return self.object.get_info_url()
class ColumnDeleteView(BaseColumnEditView):
edit_mode = 'delete'
verb = 'deleted'
template_name = 'front/repository/board/projects/include_column_delete.html'
url_name = 'project.column.delete'
form_class = ColumnDeleteForm
class ColumnCanMoveView(BaseColumnEditView):
job_model = ColumnMoveJob
http_method_names = ['post']
url_name = 'project.column.can-move'
default_edit_mode = 'move'
def post(self, request, *args, **kwargs):
column = self.get_object()
current_job, who = self.get_job_for_column(column)
if current_job:
if who == self.request.user.username:
who = 'yourself'
messages.error(request, self.get_not_editable_user_message(column, 'move', who))
return self.render_messages(status=409)
return self.render_messages()
class ColumnMoveView(ColumnCanMoveView):
url_name = 'project.column.move'
def get_success_url(self):
return self.object.get_info_url()
def post(self, request, *args, **kwargs):
# check if we can move the column
response = super(ColumnMoveView, self).post(request, *args, **kwargs)
if response.status_code >= 300:
return response
# ok we can move
position = self.request.POST.get('position', None)
if position is not None:
try:
position = int(position)
if position < 1:
raise ValueError()
except ValueError:
return HttpResponseBadRequest()
column = self.get_object()
old_position = column.position
if position != old_position:
if position > old_position: # going right
# we move to the left all columns between the old position and the new one
# excluding the old position (it's the column we move) and including the new one
# (the column we move takes its place and the old one is on the left)
to_move = column.project.columns.filter(position__gt=old_position, position__lte=position)
for column_to_move in to_move:
column_to_move.position -= 1
column_to_move.save(update_fields=['position'])
else:
# we move to the right all columns between the old position and the new one
# including the new position (the column we move takes its place and the old one
# is on the right) and excluding the old position (it's the column we move)
to_move = column.project.columns.filter(position__gte=position, position__lt=old_position)
for column_to_move in to_move:
column_to_move.position += 1
column_to_move.save(update_fields=['position'])
# and update the column
column.position = position
column.front_uuid = self.request.POST['front_uuid']
fields = ['position']
if column.front_uuid:
fields.append('front_uuid')
column.save(update_fields=fields)
# now we can create the job
job_kwargs = {}
if column.front_uuid:
job_kwargs = {'extra_args': {
'front_uuid': column.front_uuid,
}}
self.job_model.add_job(column.pk,
gh=self.request.user.get_connection(),
**job_kwargs)
return HttpResponseRedirect(self.get_success_url())
class ProjectSummaryView(WithAjaxRestrictionViewMixin, DependsOnRepositoryViewMixin, DetailView):
model = Project
ajax_only = True
http_method_names = ['get']
template_name = 'front/repository/board/projects/project_modal.html'
context_object_name = 'project'
slug_field = 'number'
slug_url_kwarg = 'project_number'
url_name = 'project.summary'
exclude_waiting_delete = False
class NewProjectSummaryView(ProjectSummaryView):
pk_url_kwarg = 'project_id'
slug_url_kwarg = None
slug_field = None
url_name = 'project.summary.new'
class ProjectEditMixin(LinkedToRepositoryFormViewMixin):
model = Project
job_model = ProjectEditJob
ajax_only=True
http_method_names = ['get', 'post']
edit_mode = None
def form_valid(self, form):
"""
Override the default behavior to add a job to edit the project on the
github side
"""
response = super(ProjectEditMixin, self).form_valid(form)
job_kwargs = {}
if self.object.front_uuid:
job_kwargs = {'extra_args': {
'front_uuid': self.object.front_uuid,
'skip_reset_front_uuid': 1,
}}
self.job_model.add_job(self.object.pk,
mode=self.edit_mode,
gh=self.request.user.get_connection(),
**job_kwargs)
message = u"The project <strong>%s</strong> will be updated shortly" % self.object.name
messages.success(self.request, message)
return response
class BaseProjectEditView(ProjectEditMixin, UpdateView):
context_object_name = 'project'
slug_field = 'number'
slug_url_kwarg = 'project_number'
default_edit_mode = 'update'
def get_object(self, queryset=None):
"""
Early check that the user has enough rights to edit this column
"""
obj = super(BaseProjectEditView, self).get_object(queryset)
if self.subscription.state not in SUBSCRIPTION_STATES.WRITE_RIGHTS:
raise Http404
return obj
@classmethod
def get_current_job_for_project(cls, project):
try:
job = cls.job_model.collection(identifier=project.id, queued=1).instances()[0]
except IndexError:
return None
else:
return job
@classmethod
def get_job_for_project(cls, project):
current_job = cls.get_current_job_for_project(project)
if current_job:
for i in range(0, 3):
sleep(0.1) # wait a little, it may be fast
current_job = cls.get_current_job_for_project(project)
if not current_job:
break
if current_job:
who = current_job.gh_args.hget('username')
return current_job, who
return None, None
@classmethod
def get_not_editable_user_message(cls, column, edit_mode, who):
message = u"This project is currently being %sd (asked by <strong>%s</strong>)" % (edit_mode or 'update', who)
if edit_mode != 'delete':
message += u", please wait a few seconds and retry"
return message
def render_not_editable(self, request, edit_mode, who):
if who == request.user.username:
who = 'yourself'
messages.error(request, self.get_not_editable_user_message(self.object, edit_mode, who))
# 409 Conflict Indicates that the request could not be processed because of
# conflict in the request, such as an edit conflict between multiple simultaneous updates.
return self.render_messages(status=409)
def dispatch(self, request, *args, **kwargs):
self.object = self.get_object()
current_job, who = self.get_job_for_project(self.object)
if current_job:
try:
mode = current_job.mode.hget()
except Exception:
mode = self.default_edit_mode
return self.render_not_editable(request, mode, who)
return super(BaseProjectEditView, self).dispatch(request, *args, **kwargs)
class ProjectEditView(BaseProjectEditView):
edit_mode = 'update'
verb = 'updated'
template_name = 'front/repository/board/projects/include_project_edit.html'
url_name = 'project.edit'
form_class = ProjectEditForm
def get_success_url(self):
return self.object.get_summary_url()
class ProjectDeleteView(BaseProjectEditView):
edit_mode = 'delete'
verb = 'deleted'
template_name = 'front/repository/board/projects/include_project_edit.html'
url_name = 'project.column.delete'
form_class = ProjectDeleteForm
def get_success_url(self):
return self.object.get_summary_url()
class ProjectCreateView(ProjectEditMixin, LinkedToUserFormViewMixin, CreateView):
edit_mode = 'create'
verb = 'created'
template_name = 'front/repository/board/projects/include_project_create.html'
url_name = 'project.create'
form_class = ProjectCreateForm
context_object_name = 'project'
def dispatch(self, request, *args, **kwargs):
if not request.is_ajax() and self.__class__ != ProjectCreateHomeView:
return ProjectCreateHomeView.as_view()(request, *args, **kwargs)
return super(ProjectCreateView, self).dispatch(request, *args, **kwargs)
def get_success_url(self):
return self.object.get_summary_url()
class ProjectCreateHomeView(ProjectCreateView, BoardMixin, RepositoryViewMixin):
template_name = 'front/repository/board/projects/project_create.html'
ajax_only = False
auto_open_selector = False
raise_if_no_current_board = False
def get_context_data(self, **kwargs):
context = super(ProjectCreateView, self).get_context_data(**kwargs)
context.update(self.get_boards_context())
return context
|
#!/usr/bin/env python
from asyncdnspy.dns_message_header import Header
from asyncdnspy.dns_message_question import Question
from asyncdnspy.dns_message_resourcerecord import ResourceRecord
class DNSMessage(object):
__header = None
__questions = None
__answers = None
__authority = None
__additional = None
@property
def header(self):
"""Header property of dns message."""
return self.__header
@header.setter
def header(self, value):
"""Setter function of header property.
:param value: Value of header.
:type value: Header instance.
"""
self.__header = value
@property
def questions(self):
"""Questions property of dns message."""
return self.__questions
@questions.setter
def questions(self, value):
"""Setter function of questions property.
:param value: Questions.
:type value: List of resource record/s.
"""
self.__questions = value
@property
def answers(self):
"""Asnwers property of dns message."""
return self.__answers
@answers.setter
def answers(self, value):
"""Setter function of answers property.
:param value: Answers.
:type value: List of resource record/s.
"""
self.__answers = value
@property
def authority(self):
"""Authority property of dns message."""
return self.__authority
@authority.setter
def authority(self, value):
"""Setter function of authority property.
:param value: Authority.
:type value: List of resource record/s.
"""
self.__authority = value
@property
def additional(self):
"""Additional property of dns message."""
return self.__additional
@additional.setter
def additional(self, value):
"""Setter function of additional property.
:param value: Additional.
:type value: List of resource record/s.
"""
self.__additional = value
|
#!/usr/bin/env python3
if __name__ == '__main__':
x = int(raw_input())
y = int(raw_input())
z = int(raw_input())
n = int(raw_input())
print([[i, j, k] for i in range(0, x+1) for j in range(0, y+1) for k in range(0, z+1) if i + j + k != n])
|
import math
add_up = lambda x, y: x + y
print(add_up(2, 5))
first_time = lambda my_list: my_list[0]
print(first_time(['cat', 'dog', 'mouse']))
names = ['Magda', 'Jose', 'Anne']
lengths = []
lengths = list(map(len, names))
print(sum(lengths) / len(lengths))
nums = [-3, -5, 1, 4]
print(list(map(lambda x: 1 / (1 + math.exp(-x)), nums)))
names = ['Karen', 'Jin', 'Kim']
print(list(filter(lambda name: len(name) == 3, names)))
nums = list(range(1000))
filtered = filter(lambda x: x % 3 == 0 or x % 7 == 0, nums)
print(sum(filtered))
names = ['Ming', 'Jennifer', 'Andrew', 'Boris']
print(sorted(names, key=lambda x: len(x)))
|
# ****************************************************************** #
# ************************* Byte of Python ************************* #
# ****************************************************************** #
########################
# backup_ver1
########################
# import os
# import time
# source = '"D:\\Work\\Develop\\PythonLearning\\base\\backup source"'
# target_dir = '"D:\\Work\\Develop\\PythonLearning\\base\\backup target"'
# target = target_dir + os.sep + time.strftime("%Y%m%d%H%M%S") + ".zip"
# zip_command = "zip -qr {0} {1}".format(target, source)
# print(zip_command)
# if os.system(zip_command) == 0:
# print("Successful backup to", target)
# else:
# print("Backup FAILED")
########################
# backup_ver2
########################
# import os
# import time
# source = r"D:\Work\Develop\PythonLearning\base\backup source"
# target_dir = r"D:\Work\Develop\PythonLearning\base\backup target"
# # today = '"{0}"'.format(target_dir + os.sep + time.strftime("%Y%m%d"))
# today = target_dir + os.sep + time.strftime("%Y%m%d")
# now = time.strftime("%H%M%S")
# # print(today)
# if not os.path.exists(today):
# os.mkdir(today)
# print("Sucessfully created directory", today)
# target = today + os.sep + now + ".zip"
# # print(target)
# # print(source)
# zip_command = 'zip -r "{0}" "{1}"'.format(target, source)
# # print(zip_command)
# if os.system(zip_command) == 0:
# print("Successful backup to", target)
# else:
# print("Backup FAILED")
########################
# backup_ver3
########################
import os
import time
source = r"D:\Work\Develop\PythonLearning\base\backup source"
target_dir = r"D:\Work\Develop\PythonLearning\base\backup target"
today = target_dir + os.sep + time.strftime("%Y%m%d")
now = time.strftime("%H%M%S")
comment = input("Enter a comment --> ")
if len(comment) == 0:
target = today + os.sep + now + ".zip"
else:
target = today + os.sep + now + "_" + comment.replace(" ", "_") + ".zip"
if not os.path.exists(today):
os.mkdir(today)
print("Successfully created directory", today)
zip_command = 'zip -r "{0}" "{1}"'.format(target, source)
if os.system(zip_command) == 0:
print("Successful backup to", target)
else:
print("Backup FAILED")
|
import numpy as np
x_stop = -2.5
x_brake = -38.5
def braking_spec(time_gap, speed, x_stop=x_stop, x_brake=x_brake):
if x_brake >= x_stop:
return 0, np.inf, np.inf
v0 = speed
x0 = -time_gap*v0
brake_dist = x_stop - x_brake
a = -v0**2/(2*brake_dist)
brake_dur = -v0/a
t_brake = (x_brake - x0)/v0
t_stop = t_brake + brake_dur
return a, t_brake, t_stop
def simulate_trajectory(t, time_gap, speed, braking, x_stop=x_stop, x_brake=x_brake):
v0 = speed
x0 = -time_gap*v0
if not braking:
x = t*v0 + x0
v = np.repeat(speed, len(t))
return x, v, (np.inf, np.inf)
a, t_brake, t_stop = braking_spec(time_gap, speed, x_stop=x_stop, x_brake=x_brake)
v = np.zeros(len(t))
v = np.piecewise(t,
[(t <= t_brake), (t > t_brake) & (t <= t_stop)],
[lambda t: v0, lambda t: (t - t_brake)*a + v0, 0.0],
)
x = np.piecewise(t,
[(t <= t_brake), (t > t_brake) & (t <= t_stop)],
[lambda t: (t*v0 + x0), lambda t: (t_brake*v0 + x0) + (t - t_brake)*v0 + a/2*(t - t_brake)**2, x_stop]
)
return x, v, (t_brake, t_stop)
def test():
import matplotlib.pyplot as plt
v0 = 25/2.237
time_gap = 2.0
trng = np.linspace(-3, 20, 100)
x, v, (t_brake, t_stop) = simulate_trajectory(trng, time_gap, v0, True)
#plt.plot(trng, v)
#plt.plot(trng, x)
tau = -x/v
plt.plot(trng, tau)
plt.axvline(0, color='black')
plt.axhline(time_gap, color='black')
plt.show()
if __name__ == '__main__':
test()
|
#!/usr/bin/env python
from importlib import *
is_package("os") # ImportError
|
'''
152. Maximum Product Subarray
Medium
Given an integer array nums, find the contiguous subarray within an array (containing at least one number) which has the largest product.
Example 1:
Input: [2,3,-2,4]
Output: 6
Explanation: [2,3] has the largest product 6.
Example 2:
Input: [-2,0,-1]
Output: 0
Explanation: The result cannot be 2, because [-2,-1] is not a subarray.
'''
class Solution:
def maxProduct(self, nums):
if len(nums) == 0:
return 0
max_so_far = nums[0]
min_so_far = nums[0]
result = max_so_far
for i in range(1, len(nums)):
cur = nums[i]
temp_max_so_far = max(cur, max_so_far * cur, min_so_far * cur)
min_so_far = min(cur, max_so_far * cur, min_so_far * cur)
max_so_far = temp_max_so_far
result = max(max_so_far, result)
return result
|
"""Top-level project Main function."""
import sys
import pickle
import xmlSheetSearch
import xmlStaticOperators
sys.path.append('../../../runtime_data/')
import RunTimeData
def main():
"""Top-level project Main function."""
def construct_paths(lower_bound, upper_bound):
"""Construct list of file paths for operation."""
file_dict = {int('19' + str(year)): [
'../../text_output/pickle_objects/full_objects_words/Industrials_19{}_dictionary.file'.format(year),
'../../text_output/pickle_objects/firm_location_objects/Industrials_19{}_firms.file'.format(year),
str(year)] for year in range(lower_bound, upper_bound)}
return file_dict
starting_data = RunTimeData.starting_print_statement()
start_time = starting_data[0]
time_elapsed = starting_data[1]
file_dict = construct_paths(40, 42)
for file_path_list in file_dict.values():
in_file = file_path_list[0]
firms_out_file = file_path_list[1]
year = file_path_list[2]
outfiles = {}
time_elapsed = time_elapsed = RunTimeData.interim_print_statement(firms_out_file, start_time, time_elapsed)
with open(in_file, 'rb') as object_in:
xml_data = pickle.load(object_in)
xml_sheet = xmlSheetSearch.xmlSheetSearch(in_file, xml_data)
firm_data = xml_sheet.location_data
outfiles.update({firms_out_file: firm_data})
page_data_outfile = '../../text_output/pickle_objects/page_data_objects/Industrials_19{}_data_dictionary.file'.format(year)
page_data_dictionary = xml_sheet.page_data_dictionary
page_line_data = xml_sheet.line_data
outfiles.update({page_data_outfile: [page_data_dictionary, page_line_data]})
for out_file, data in outfiles.items():
xmlStaticOperators.clear_destination(out_file)
with open(out_file, 'wb') as file_out:
pickle.dump(data, file_out, pickle.HIGHEST_PROTOCOL)
RunTimeData.concluding_print_statement(start_time, time_elapsed)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
#
# This file is part of LUNA.
#
# Copyright (c) 2020 Great Scott Gadgets <info@greatscottgadgets.com>
# SPDX-License-Identifier: BSD-3-Clause
import os
from amaranth import Elaboratable, Module, Signal
from usb_protocol.types import USBTransferType
from usb_protocol.emitters import DeviceDescriptorCollection
from luna import top_level_cli
from luna.usb2 import USBDevice, USBSignalInEndpoint
class USBInterruptExample(Elaboratable):
""" Simple example of a USB device that presents an interrupt endpoint.
This demonstrates use of the ``USBSignalInEndpoint``, which reports the value
of a status signal when polled. Here, we'll create a 32-bit counter, and report
its value each time our interrupt endpoint is polled.
"""
def create_descriptors(self):
""" Create the descriptors we want to use for our device. """
descriptors = DeviceDescriptorCollection()
#
# We'll add the major components of the descriptors we we want.
# The collection we build here will be necessary to create a standard endpoint.
#
# We'll need a device descriptor...
with descriptors.DeviceDescriptor() as d:
d.idVendor = 0x16d0
d.idProduct = 0xf3b
d.iManufacturer = "LUNA"
d.iProduct = "Status interrupt mechanism"
d.iSerialNumber = "1234"
d.bNumConfigurations = 1
# ... and a description of the USB configuration we'll provide.
with descriptors.ConfigurationDescriptor() as c:
with c.InterfaceDescriptor() as i:
i.bInterfaceNumber = 0
# Single in endpoint, EP1/IN.
with i.EndpointDescriptor() as e:
e.bEndpointAddress = 0x81
e.wMaxPacketSize = 64
e.bmAttributes = USBTransferType.INTERRUPT
# Request that we be polled once ber microseconds (2 ^ 3 microframes).
e.bInterval = 4
return descriptors
def elaborate(self, platform):
m = Module()
# Generate our domain clocks/resets.
m.submodules.car = platform.clock_domain_generator()
# Create the 32-bit counter we'll be using as our status signal.
counter = Signal(32)
m.d.usb += counter.eq(counter + 1)
# Create our USB device interface...
ulpi = platform.request(platform.default_usb_connection)
m.submodules.usb = usb = USBDevice(bus=ulpi)
# Add our standard control endpoint to the device.
descriptors = self.create_descriptors()
usb.add_standard_control_endpoint(descriptors)
# Create an interrupt endpoint which will carry the value of our counter to the host
# each time our interrupt EP is polled.
status_ep = USBSignalInEndpoint(width=32, endpoint_number=1, endianness="big")
usb.add_endpoint(status_ep)
m.d.comb += status_ep.signal.eq(counter)
# Connect our device as a high speed device by default.
m.d.comb += [
usb.connect .eq(1),
usb.full_speed_only .eq(1 if os.getenv('LUNA_FULL_ONLY') else 0),
]
return m
if __name__ == "__main__":
top_level_cli(USBInterruptExample)
|
import unittest
from katas.kyu_8.validate_code_with_simple_regex import validate_code
class ValidateCodeTestCase(unittest.TestCase):
def test_true(self):
self.assertTrue(validate_code(123))
def test_true_2(self):
self.assertTrue(validate_code(248))
def test_true_3(self):
self.assertTrue(validate_code(321))
def test_false(self):
self.assertFalse(validate_code(8))
def test_false_2(self):
self.assertFalse(validate_code(9453))
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Base [TrendAV]',
'version': '1.0.1',
'category': 'Hidden',
'author': 'Ing. Rigoberto Martínez',
'maintainer': 'TrendAV',
'website': 'http://www.trendav.com',
'sequence': 1,
'description': """
Update Odoo - TrendAV, needed for all installation.
===================================================
""",
'depends': ['base','product','account','board','base_vat'],
'data': [
'static/src/xml/base_static.xml',
'views/base_menu.xml',
],
'demo': [
],
'test': [
],
'qweb': [],
'installable': True,
'auto_install': False,
'application': False,
}
|
import os
import sys
import logging
import datetime
import collections
from api import ApiClient
import auth
from utils import *
from .remotecontent import get_remote_content_loader
import db
from flask import Flask, render_template, request, \
abort, g, session
app = Flask("BlockedFrontend", subdomain_matching=True)
app.config.from_object('BlockedFrontend.default_settings')
if 'BLOCKEDFRONTEND_SETTINGS' in os.environ:
app.config.from_envvar('BLOCKEDFRONTEND_SETTINGS')
if app.config.get('SITE_THEME'):
searchpath = app.jinja_loader.searchpath
app.jinja_loader.searchpath.insert(0, searchpath[0] + '/' + app.config['SITE_THEME'])
# app.secret_key = app.config['SESSION_KEY']
logging.basicConfig(
level=getattr(logging, app.config.get('LOGLEVEL', 'INFO')),
datefmt="[%Y-%m-%dT%H:%M:%S]",
format="%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s"
)
logging.info("API_EMAIL: %s", app.config['API_EMAIL'])
logging.info("REMOTE_SRC: %s", app.config['REMOTE_SRC'])
# blueprints
www_domain = app.config['SUBDOMAIN_MAIN']
app.url_map.default_subdomain = www_domain
if app.config['MODULE_ADMIN']:
from admin import admin_pages
app.register_blueprint(admin_pages, subdomain=www_domain)
from admin_rightsholder import admin_rightsholder_pages
app.register_blueprint(admin_rightsholder_pages, subdomain=www_domain)
from admin_savedlists import admin_savedlist_pages
app.register_blueprint(admin_savedlist_pages, subdomain=www_domain)
from admin_ispreports import admin_ispreport_pages
app.register_blueprint(admin_ispreport_pages, subdomain=www_domain)
if app.config.get('SITE_THEME') == '451':
from err451 import err451_pages
app.register_blueprint(err451_pages)
else:
from cms import cms_pages, custom_routing
custom_routing(app.config['SITE_THEME'])
app.register_blueprint(cms_pages, subdomain=www_domain)
from site_results import site_pages
app.register_blueprint(site_pages, subdomain=www_domain)
if app.config['MODULE_CATEGORY']:
from category import category_pages
app.register_blueprint(category_pages, subdomain=www_domain)
if app.config['MODULE_UNBLOCK']:
from unblock import unblock_pages
app.register_blueprint(unblock_pages, subdomain=www_domain)
if app.config['MODULE_SAVEDLIST']:
from savedlists import list_pages
app.register_blueprint(list_pages, subdomain=www_domain)
from stats import stats_pages
app.register_blueprint(stats_pages, subdomain=www_domain)
from registry import registry_pages
app.register_blueprint(registry_pages, subdomain=app.config['SUBDOMAIN_NOMINET'])
from cmsassets import cms_assets
app.register_blueprint(cms_assets, subdomain=www_domain)
app.register_blueprint(cms_assets, subdomain=app.config['SUBDOMAIN_INJUNCTIONS'])
app.register_blueprint(cms_assets, subdomain=app.config['SUBDOMAIN_NOMINET'])
@app.before_first_request
def setup_db():
db.setup()
@app.before_request
def open_db():
if request.path.startswith('/static'):
return
g.conn = db.db_connect_pool()
@app.teardown_appcontext
def close_db(error):
if hasattr(g, 'conn'):
db.db_disconnect(g.conn)
@app.before_request
def hook_api():
g.api = ApiClient(
app.config['API_EMAIL'],
app.config['API_SECRET']
)
if 'API' in app.config:
g.api.API = app.config['API']
@app.before_request
def hook_miscdata():
if request.path.startswith('/static'):
return
from resources import load_data
g.miscvars = load_data('misc')
@app.template_filter('fmtime')
def fmtime(s):
if not s:
return ''
if isinstance(s, datetime.datetime):
return s.strftime('%d %B, %Y at %H:%M')
return datetime.datetime.strptime(s, '%Y-%m-%d %H:%M:%S') \
.strftime('%d %B, %Y at %H:%M')
@app.template_filter('fmdate')
def fmdate(s):
if not s:
return ''
if isinstance(s, datetime.date):
return s.strftime('%d %B, %Y')
return datetime.datetime.strptime(s, '%Y-%m-%d') \
.strftime('%d %B, %Y')
@app.template_filter('null')
def null(s, default=''):
if s is None:
return default
if isinstance(s, (str, unicode)) and not s.strip():
return default
return s
@app.template_filter('strip')
def strip(s, chars):
return s.strip(chars)
@app.template_filter('join_en')
def join_en(ls, markup=False):
if markup:
tag = lambda x: "<span>{0}</span>".format(x)
else:
tag = lambda x: x
if len(ls) == 1:
return tag(ls[0])
elif len(ls) >= 2:
return ", ".join([tag(x) for x in ls[:-1]]) + " and " + tag(ls[-1])
return ''
@app.template_filter('domain')
def domain(url):
"""Shorten a URL to just the domain"""
import urlparse
try:
parts = urlparse.urlparse(url)
return parts.netloc
except Exception as exc:
logging.warn("filter.domain exception: %s", repr(exc))
return url
@app.template_filter('customgrouper')
def customgrouper(values, keys):
import itertools
"""Used by legal blocks template in court orders mode.
vars: values - list of values
keys - list of keys
Groups by compound key made of keys
Assumes correctly sorted input.
"""
return itertools.groupby(
values,
lambda values: [values[x] for x in keys],
)
@app.template_filter('noproto')
def filter_noproto(url):
import re
if url is None:
return None
return re.sub(r'^https?://', '', url)
@app.template_filter('stripstyletag')
def filter_strip_style(s):
import re
return re.sub(r'<style[^>]+>.*</style>', '', s)
@app.template_filter('lpad')
def lpad(s, width=20):
return s.ljust(width)
@app.template_filter('rmtasset')
def rmtasset(path):
"""Convert relative path to CMS asset URL"""
if path.startswith(('http:', 'https:')):
return path
return '/cms/assets' + path
@app.template_filter('strip_email_phone')
def strip_email_phone(value):
import re
newvalue = re.sub(r'\+?\d{4,6}\s*[ \-\d]{5,7}', '<phone removed>', value)
newvalue = re.sub(r'\S+@([\S\.])+\.\S+', '<email removed>', newvalue)
return newvalue
@app.errorhandler(Exception)
def on_error(error):
logging.warn("Exception: %s", repr(error))
if app.config['DEBUG']:
raise
return render_template('error.html'), 500
@app.before_request
def check_user():
g.admin = session.get('admin', False)
g.admin_level = session.get('admin_level', 'admin' if g.admin else 'user')
g.is_level = auth.is_level
@app.before_request
def load_remote_content():
if request.path.startswith('/static') or request.path.startswith('/assets'):
return
if app.config.get('SITE_THEME') == '451':
return
g.remote_content = collections.defaultdict(dict)
g.remote_chunks = collections.defaultdict(lambda: None)
if app.config.get('REMOTE_SRC'):
loaderclass = get_remote_content_loader(app.config.get('REMOTE_TYPE'))
g.remote = loaderclass(
app.config['REMOTE_SRC'],
app.config['REMOTE_AUTH'],
app.config['CACHE_PATH'],
app.config['REMOTE_RELOAD'] and g.admin, # remote reload only available to admin users
)
logging.debug("Loading chunks")
g.remote_chunks = g.remote.get_content('chunks')
logging.debug("Got chunks: %s", g.remote_chunks.keys())
def run():
app.run(host='0.0.0.0')
|
class house():
house_type = 'dwelling'
location = 'terrestrial'
def __init__(self, name, material, size, storeys):
self.name = name
self.material = material
self.size = size
self.storeys = storeys
def get_mat(self):
return self.material
def get_size(self):
return self.size
def get_storeys(self):
return self.storeys
small = house('small', 'wood', 500 , 2)
big = house('big', 'brick', 10000, 3)
print(small.get_mat())
print(small.get_size(), 'sq. metres')
print(small.get_storeys(), 'storeys')
print(big.get_mat())
print(big.get_size(), 'sq. metres')
print(big.get_storeys(), 'storeys') |
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('authsystem.urls', namespace = 'auth')),
path('home/', include('home.urls', namespace = 'home'))
]
# if debug is true
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT) |
# Generated by Django 3.1.7 on 2021-03-24 13:58
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('accounts', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='systemuser',
options={'ordering': ['-created_at'], 'verbose_name': 'account', 'verbose_name_plural': 'accounts'},
),
migrations.RenameField(
model_name='systemuser',
old_name='is_delete',
new_name='is_deleted',
),
migrations.RemoveField(
model_name='systemuser',
name='add_at',
),
migrations.RemoveField(
model_name='systemuser',
name='change_at',
),
migrations.RemoveField(
model_name='systemuser',
name='delete_at',
),
migrations.RemoveField(
model_name='systemuser',
name='is_organization_admin',
),
migrations.RemoveField(
model_name='systemuser',
name='is_password_change',
),
migrations.AddField(
model_name='systemuser',
name='changed_at',
field=models.DateTimeField(auto_now=True, null=True, verbose_name='changed at'),
),
migrations.AddField(
model_name='systemuser',
name='created_at',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now, verbose_name='created at'),
preserve_default=False,
),
migrations.AddField(
model_name='systemuser',
name='deleted_at',
field=models.DateTimeField(blank=True, editable=False, null=True, verbose_name='deleted at'),
),
]
|
from django.db import models
# Create your models here.
from django.db import models
from django.contrib.auth.models import AbstractUser
class DiskUser(AbstractUser):
class Meta(AbstractUser.Meta):
pass |
#!/usr/bin/env python
from lab_mc import experiments, null_experiment
from lab_defs import teaching_length
from weeks import all_dates
from loadstore import load_pairs
from assign_students import get_students, match_students
from get_students_by_week import get_students_by_pair, get_pairs_by_week
from sys import argv
from itertools import zip_longest
from datetime import timedelta, date
def get_dates(argv):
if len(argv) < 3:
# borrowing http://stackoverflow.com/questions/8801084/how-to-calculate-next-friday-in-python
to_date = date.today()
to_date += timedelta( (4-to_date.weekday()) % 7 - 1) # Get the Thursday before the next Friday
from_date = to_date - timedelta(7)
elif len(argv) < 6:
to_date = date.today()
to_date += timedelta( (4-to_date.weekday()) % 7 - 1) # Get the Thursday before the next Friday
from_date = date(int(argv[1]), int(argv[2]), int(argv[3]))
else:
to_date = date(int(argv[4]), int(argv[5]), int(argv[6]))
from_date = date(int(argv[1]), int(argv[2]), int(argv[3]))
return to_date, from_date
if __name__ == "__main__":
to_date, from_date = get_dates(argv)
pairs = load_pairs("schedule.dat")
students = get_students("students.csv")
match_students(students, pairs)
experiments_by_week = list(zip_longest(*pairs, fillvalue=null_experiment))
for experiment in experiments.values():
found_pairs = get_pairs_by_week(from_date, experiment.acronym, all_dates, experiments_by_week)
found_students = get_students_by_pair(students.values(), found_pairs)
print("{}:".format(experiment.title))
for student in sorted(found_students):
student_experiments = student.tb1_experiments + student.tb2_experiments
print(" - {} {}\t{}".format(student.number, student.name,
student_experiments[all_dates.index(to_date)].acronym))
|
def merge(line):
length = len(line)
prev = -1
result = []
for i, a in enumerate(line):
if a == 0:
continue
elif prev < 0:
prev = a
if length == i + 1:
result.append(a)
elif a == prev:
result.append(a * 2)
prev = -1
else:
result.append(prev)
prev = a
return result + [0] * (length - len(result))
# def merge(line):
# begin = 0
# length = len(line)
# result = []
# for i, a in enumerate(line):
# if i >= begin:
# for b in xrange(i + 2, length + 1):
# total = sum(line[i:b])
# if total == a * 2 and total > 0:
# result.append(total)
# begin = b
# break
# else:
# if a > 0:
# result.append(a)
# return result + [0] * (length - len(result))
|
from rest_framework import serializers
from .models import Comments
class CommentsSerializer(serializers.ModelSerializer):
"""Serializers for comments"""
class Meta:
model = Comments
fields = ('id', 'content', 'name', 'email', 'created_at', 'parent', 'user', 'post')
read_only_fields = ('id',)
def validate(self, data):
"""
if user is null, it has to have name & email
"""
is_name_email_not_provided = not bool(data.get('name')) or not bool(data.get('email'))
is_user_not_provided = not bool(data.get('user'))
if is_name_email_not_provided and is_user_not_provided:
raise serializers.ValidationError("Please provide name and email or please login.")
return data
|
from flask import Flask
from flask_restful import reqparse, abort, Api, Resource
from collections import OrderedDict
from flask_restful import fields, marshal_with
app = Flask(__name__)
api = Api(app)
TODOS = {
'todo1': {'task': 'build an API'},
'todo2': {'task': '???'},
'todo3': {'task': 'profit!'},
}
def abort_if_todo_doesnt_exist(todo_id):
if todo_id not in TODOS:
abort(404, message="Todo {} doesn't exist".format(todo_id))
parser = reqparse.RequestParser()
parser.add_argument('task')
class TodoDao(object):
'''
In the [QuickStart: Data Formatting example]
(https://flask-restful.readthedocs.io/en/0.3.5/quickstart.html#data-formatting)
fields.Url('todolist') is a function that takes an endpoint name and generates
a URI for that endpoint. Unused in this example
'''
TODO_RESOURCE_FIELDS = {
'task': fields.String,
'uri': fields.Url('todolist')
}
def __init__(self, todo_id, task):
self.todo_id = todo_id
self.task = task
# This field will not be sent in the response
self.status = 'active'
class Todo(Resource):
@marshal_with(TodoDao.TODO_RESOURCE_FIELDS)
def get(self, **kwargs):
return TodoDao(todo_id='my_todo', task='Remembers the milk')
# TodoList
# shows a list of all todos, and lets you POST to add new tasks
class TodoList(Resource):
def get(self):
return TODOS
def post(self):
args = parser.parse_args()
todo_id = int(max(TODOS.keys()).lstrip('todo')) + 1
todo_id = 'todo{}'.format(todo_id)
TODOS[todo_id] = {'task': args['task']}
return TODOS[todo_id], 201
##
# Actually setup the API resource routing here
##
api.add_resource(TodoList, '/todolist')
api.add_resource(Todo, '/todo')
if __name__ == '__main__':
app.run(debug=True)
|
import pygame
print("Program Starting...")
pygame.init()
display_height = 720
display_width = 1280
displayWindow = pygame.display.set_mode((display_width, display_height))
pygame.display.set_caption("Test")
clock = pygame.time.Clock()
black = [0, 0, 0]
white = [255, 255, 255]
grey = [20, 20, 20]
running = True
pressed = False
finaltext = ""
def addText(event):
global finaltext, knownchars
text = str(event)[30]
if pressed:
knownchars = ["qwertyuiopasdfghjklzxcvbnm "]
if text in str(knownchars):
finaltext += str(text)
def text(text, color, size, x, y):
textfont = pygame.font.SysFont("Myriad Pro", size)
textSurf = textfont.render(text, 2, color)
displayWindow.blit(textSurf, (x, y))
def visuals():
global Textbox
displayWindow.fill(grey)
Textbox = pygame.draw.rect(displayWindow, white, pygame.Rect(200, 200, 800, 50))
TextFont = text(finaltext, black, 40, 200, 225)
pygame.display.update()
clock.tick(30)
text(finaltext, black, 20, 20, 20)
while running:
for event in pygame.event.get():
mouse = pygame.mouse.get_pos()
if event.type == pygame.QUIT:
running = False
if event.type == pygame.KEYDOWN:
if pressed:
addText(event)
if event.key == pygame.K_BACKSPACE:
finaltext = ""
if event.type == pygame.MOUSEBUTTONDOWN:
if Textbox.collidepoint(mouse):
pressed = True
if not Textbox.collidepoint(mouse):
pressed = False
print(finaltext)
visuals()
print("Program Ending...")
pygame.quit()
|
import fileinput
class KeyMapper(object):
def __init__(self):
self._build_keymap()
self._build_wordtrie()
def _build_keymap(self):
self.keymapping = {}
keys = {'2' : 'ABC',
'3' : 'DEF',
'4' : 'GHI',
'5' : 'JKL',
'6' : 'MNO',
'7' : 'PQRS',
'8' : 'TUV',
'9' : 'WXYZ',}
for (n,ls) in keys.items():
for l in ls:
self.keymapping[l.lower()] = n
def _build_wordtrie(self):
self.trie = {}
df = open('brit-a-z.txt')
for line in df:
triepoint = self.trie #location in the trie
for char in line:
encoded = self.keymapping.get(char, None)
if encoded:
if encoded not in triepoint:
triepoint[encoded] = dict()
triepoint = triepoint[encoded]
if 'words' not in triepoint:
triepoint['words'] = []
triepoint['words'].append(line.strip())
def _print_options_at_point(self, triepoint):
for word in triepoint.get('words', []):
print(word)
for (k,v) in triepoint.iteritems():
if k != 'words':
self._print_options_at_point(v)
def print_options(self, numstring):
triepoint = self.trie
for digit in numstring:
if digit in triepoint:
triepoint = triepoint[digit]
else:
print('No Matches')
return
self._print_options_at_point(triepoint)
if __name__ == '__main__':
km = KeyMapper()
for line in fileinput.input():
km.print_options(line.strip())
|
import ex12_2
from tabulate import tabulate
ipaddress_test_list = ['10.0.4.1','10.0.1.68-10.0.1.72', '10.0.2.2', '192.168.2.14-20']
def ip_table(reachable_list, unreachable_list):
dict_ipaddress = {'Reachable':[], 'Unreachable':[]}
for ipaddress in reachable_list:
dict_ipaddress['Reachable'].append(ipaddress)
for ipaddress in unreachable_list:
dict_ipaddress['Unreachable'].append(ipaddress)
print(tabulate(dict_ipaddress, headers='keys'))
ip_table(*ex12_2.ping_ip_address(ipaddress_test_list)) |
import asyncio
from unittest.mock import Mock
import pytest
from sc2.ids.unit_typeid import UnitTypeId
from sc2.unit import Unit
from .context import add_unit_to_bot, initial_bot_state
@pytest.mark.asyncio
async def test_does_nothing_on_empty_bo():
bot = initial_bot_state([])
await bot.on_step(0)
@pytest.mark.asyncio
async def test_build_first_unit_from_bo():
bot = initial_bot_state([UnitTypeId.DRONE, UnitTypeId.OVERLORD])
larva: Unit = add_unit_to_bot(UnitTypeId.LARVA, bot)
larva.train = Mock(return_value="returnVal")
bot.can_afford = Mock(return_value=True)
do_stub = Mock(return_value=None)
bot.do = asyncio.coroutine(do_stub)
await bot.on_step(0)
larva.train.assert_called_once_with(UnitTypeId.DRONE)
do_stub.assert_called_once_with(larva.train("any"))
assert UnitTypeId.DRONE not in bot.build_order
@pytest.mark.asyncio
async def test_dont_build_unit_if_cant_afford():
bot = initial_bot_state([UnitTypeId.DRONE, UnitTypeId.OVERLORD])
larva: Unit = add_unit_to_bot(UnitTypeId.LARVA, bot)
larva.train = Mock(return_value="returnVal")
bot.can_afford = Mock(return_value=False)
do_stub = Mock(return_value=None)
bot.do = asyncio.coroutine(do_stub)
await bot.on_step(0)
larva.train.assert_not_called()
do_stub.assert_not_called()
assert UnitTypeId.DRONE in bot.build_order
@pytest.mark.asyncio
async def test_find_supply_for_first_unit_from_bo():
bot = initial_bot_state([UnitTypeId.OVERLORD])
larva: Unit = add_unit_to_bot(UnitTypeId.LARVA, bot)
larva.train = Mock(return_value="returnVal")
bot.can_afford = Mock(return_value=True)
do_stub = Mock(return_value=None)
bot.do = asyncio.coroutine(do_stub)
await bot.on_step(0)
larva.train.assert_called_once_with(UnitTypeId.OVERLORD)
do_stub.assert_called_once_with(larva.train("any"))
assert UnitTypeId.OVERLORD not in bot.build_order
@pytest.mark.asyncio
async def test_build_structure_from_bo():
bot = initial_bot_state([UnitTypeId.SPAWNINGPOOL])
add_unit_to_bot(UnitTypeId.DRONE, bot)
hatch: Unit = add_unit_to_bot(UnitTypeId.HATCHERY, bot)
bot.can_afford = Mock(return_value=True)
build_stub = Mock(return_value=None)
bot.build = asyncio.coroutine(build_stub)
await bot.on_step(0)
build_stub.assert_called_once_with(UnitTypeId.SPAWNINGPOOL, hatch)
assert UnitTypeId.SPAWNINGPOOL not in bot.build_order
@pytest.mark.asyncio
async def test_dont_build_structure_from_bo_without_drones():
bot = initial_bot_state([UnitTypeId.SPAWNINGPOOL])
add_unit_to_bot(UnitTypeId.HATCHERY, bot)
bot.can_afford = Mock(return_value=True)
build_stub = Mock(return_value=None)
bot.build = asyncio.coroutine(build_stub)
await bot.on_step(0)
build_stub.assert_not_called()
assert UnitTypeId.SPAWNINGPOOL in bot.build_order
@pytest.mark.asyncio
async def test_expand_if_hatchery_is_in_bo():
bot = initial_bot_state([UnitTypeId.HATCHERY])
add_unit_to_bot(UnitTypeId.HATCHERY, bot)
bot.can_build_building = Mock(return_value=True)
expand_stub = Mock(return_value=None)
bot.expand_now = asyncio.coroutine(expand_stub)
await bot.on_step(0)
expand_stub.assert_called()
assert UnitTypeId.HATCHERY not in bot.build_order
@pytest.mark.asyncio
async def test_not_expand_if_hatchery_is_in_bo_but_cant_build():
bot = initial_bot_state([UnitTypeId.HATCHERY])
add_unit_to_bot(UnitTypeId.HATCHERY, bot)
bot.can_build_building = Mock(return_value=False)
expand_stub = Mock(return_value=None)
bot.expand_now = asyncio.coroutine(expand_stub)
await bot.on_step(0)
expand_stub.assert_not_called()
assert UnitTypeId.HATCHERY in bot.build_order
|
from . import character
from . import alliance
from . import corporation
|
def download():
""" Downloads ESA list from the ESRD site"""
import ftplib
# Connect to FTP Client and cd to directory containing brownfields`
ftp = ftplib.FTP('ftp.gov.ab.ca')
ftp.login()
ftp.cwd('/env/ESAR')
# Create a list of all ESA entries
# Note: Each index in the list is a string
EsaList = []
ftp.retrlines('RETR CompleteEsaSiteList.csv', EsaList.append )
# Politely log out
ftp.quit()
# Print Length of List to confirm Download Worked
print('There are ' + str(len( EsaList )) + ' items in the file' )
with open( './../../data/ESA-data.txt', 'w' ) as file_object:
for item in EsaList:
file_object.write( str( item ) +"\n" )
print('Saving to: ESA-data.txt')
# Create Function to move CSV to List
def csvToList(infile, rmRow=''):
"""Converts CSV file to a list, option to delete Row
should the file contain '----'"""
import csv
listName = []
with open(infile) as f:
reader = csv.reader(f)
for row in reader:
listName.append( row )
if rmRow:
del listName[rmRow]
return listName
# ----- Process ESA -----#
# Iterate Through Full ESA list to identify:
# 1) ATLA Numbers (7 digits), 2) Edmonton (3rd digit==2)
def decomposeESA(rawESA):
""" Decomposes the ESA into a single 17 digit PBL number. The raw ESA file has the following column headers:
0. Esrd File Number
1. Operation Name
2. File Classification
3. LLD ( Some PBL's and some ATLA numbers)
4. Documents in File
5. Linc
6. 10TM Point Coordinates """
inProcessESA = []
# takes first numbers from LLD
for row in rawESA:
lld = row[3].split(';')[0]
# Tests for Edmonton LLD number ('2' in the third digit)
if ( len(lld) == 7):
if lld[2] =='2':
inProcessESA.append( row )
# Decomposes ESA by splitting the LLD and padding the plan(7), block(4), and lot(6)
# with the appropriate amount of zeros.
ESA_PBL = []
for row in inProcessESA:
pbl = row[3].split(';')
plan = pbl[0].zfill(7)
# Only use first if the row has multiple LLD's.
if len(pbl) >=2 and pbl[1]:
block = pbl[1].split(' ')[0]
block = block.zfill(4)
else:
block = '0000'
if len(pbl) >=3 and pbl[2]:
workingLot = pbl[2].split(' ')[0]
else:
workingLot = '000000'
# Some LLD's have multiple lot numbers
hasComma = workingLot.find(',')
hasDash = workingLot.find('-')
if hasComma==-1 and hasDash==-1:
lot = workingLot
elif hasComma < hasDash:
lot = workingLot.split(',')[0]
elif hasDash < hasComma:
lot = workingLot.split('-')[0]
lot = lot.zfill(6)
inProcessPBL = ''.join([plan,block,lot])
ESA_PBL.append( inProcessPBL )
return( ESA_PBL )
# Decomepose Line of EDMONTON to PBL
def decomposeEdmLine( cell ):
""" Decomposes a LLD in the Edmonton Property Data to a list of its plan, block, lot, and PBL"""
# The function will be passed the cell by the calling line
inProcessPbl = cell.split('/')[0]
inProcessPbl = cell.split(' ')
plan = inProcessPbl[1]
try:
inProcessPbl[4]
except IndexError:
block = '0000'
else:
block = inProcessPbl[4]
try:
inProcessPbl[7]
except IndexError:
lot = '000000'
else:
lot = inProcessPbl[7]
pbl = plan.zfill(7) + block.zfill(4) + lot.zfill(6)
return( pbl, plan, block, lot )
def findContaminatedSites(rawEdm, ESA_PBL):
class Contaminated_Site():
"""An individual contaminated site"""
def __init__(self, esrdFileNum, house, street, plan, block, lot, lat, lng):
self.esrdFileNum = esrdFileNum
self.house = house
self.street = street
self.plan = plan
self.block = block
self.lot = lot
self.lat = lat
self.lng = lng
contaminatedSites = []
for row in rawEdm:
# Not LLD's are PBL's and will produce an IndexError - ignore them and move
# to next row
try:
row[4].split(' ')[1]
except IndexError:
continue
# Break the rawEdm into components, including its unique PBL number
else:
esrdFileNum = row[0]
pbl = decomposeEdmLine( row[4] )
house = row[2]
street = row[3]
plan = pbl[1]
block = pbl[2]
lot = pbl[3]
lat = row[8]
lng = row[9]
# Test the unique PBL numbers for the current with for a match in the ESA
# list. If the match exists, create a new Contaminated_Site instace and add
# it to the list of contaminated sites
for entry in ESA_PBL:
if pbl[0] == entry:
site = Contaminated_Site( esrdFileNum, house, street, plan, block, lot, lat, lng )
contaminatedSites.append( site )
return contaminatedSites
|
########### sample of calling FaceAPI from Python 3.6 #############
## @fujute : March4,2018
## This is modified version from original code from " https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236 "
##
import http.client, urllib.request, urllib.parse, urllib.error, base64
headers = {
# Request headers
'Content-Type': 'application/json',
'Ocp-Apim-Subscription-Key': 'PLEASE REPLACE THIS TEXT WITH YOUR FACE API KEY',
}
# https://www.microsoft.com/cognitive-services/en-us/face-api
# reference https://westus.dev.cognitive.microsoft.com/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236
params = urllib.parse.urlencode({
# Request parameters
'returnFaceId': 'true',
'returnFaceLandmarks': 'false',
'returnFaceAttributes': 'age,gender,smile,facialHair,glasses,headPose,emotion',
})
mypictures = ["{ 'url': 'https://raw.githubusercontent.com/fuju9w/cognitive/master/man-crazy-funny-dude-45882.jpg' }","{ 'url': 'https://raw.githubusercontent.com/fuju9w/cognitive/master/peam-m1-2017.jpg' }","{ 'url': 'https://raw.githubusercontent.com/fuju9w/cognitive/master/pexels-photo-372042.jpg' }"]
try:
conn = http.client.HTTPSConnection('southeastasia.api.cognitive.microsoft.com')
for body in mypictures:
print(body)
conn.request("POST", "/face/v1.0/detect?%s" % params, body, headers)
response = conn.getresponse()
data = response.read()
print(data)
print ("------------------------------------------")
conn.close()
except Exception as e:
print("[Errno {0}] {1}".format(e.errno, e.strerror))
####################################
|
from script.base_api.service_user.masterKey import *
from script.base_api.service_user.permission_groups import *
from script.base_api.service_user.verificationCode import *
from script.base_api.service_user.teachers import *
from script.base_api.service_user.metrics import *
from script.base_api.service_user.ding import *
from script.base_api.service_user.permissionGroup import *
from script.base_api.service_user.dingAuth import *
from script.base_api.service_user.employees import *
from script.base_api.service_user.roles import *
from script.base_api.service_user.permission import *
from script.base_api.service_user.departments import *
from script.base_api.service_user.batchEmp import *
from script.base_api.service_user.auth import *
from script.base_api.service_user.permissionGroups import *
from script.base_api.service_user.company import *
from script.base_api.service_user.workbench import *
from script.base_api.service_user.app import *
from script.base_api.service_user.user_api import *
from script.base_api.service_user.plaso import *
from script.base_api.service_user.menuTree import *
from script.base_api.service_user.permissions import *
from script.base_api.service_user.wxMini import *
from script.base_api.service_user.user import *
from script.base_api.service_user.jst_app import *
from script.base_api.service_user.token import *
from script.base_api.service_user.users import *
|
from django.db import models
from mongoengine import Document, StringField, DictField
class Researcher(Document):
lattes_id = StringField()
researcher_cv = DictField()
def __unicode__(self):
return self.lattes_id |
"""
Convenience class to help accomplish common tasks with the Roomba.
Created on Oct 13, 2015.
Written by: Valerie Galluzzi, Mark Hays, and Muqing Zheng.
"""
# TODO 1: write your name above
import safest_create as create
import time
def main():
""" Calls the TEST functions in this module. """
# TODO 2: add calls to your NEW test functions here.
test_init()
# test_move_forward_by_time()
# test_get_distance_sensor()
# test_move_forward_by_distance()
# test_spin()
# test_distance_traveled()
# TODO 3: add NEW test functions in this space. Add one test function per method below.
# I've already written one for you
def test_init():
print('test valid port; should NOT ask for port')
myrobot = MyRobot('sim')
myrobot.shutdown()
print('test invalid port; should ask for port')
myrobot = MyRobot('bad')
myrobot.shutdown()
# TODO: write your NEW test functions for the other methods below
def test_move_forward_by_time():
myrobot = MyRobot('sim')
myrobot.move_forward_by_time(10, 2)
print('Expected', 100 * 2, 'Actual', myrobot.robot.getSensor(create.Sensors.distance))
myrobot.shutdown()
def test_get_distance_sensor():
myrobot = MyRobot('sim')
myrobot.move_forward_by_time(10, 2)
print('Expected', 100 * 2, 'Actual', myrobot.get_distance_sensor())
myrobot.move_forward_by_time(20, 2)
print('Expected', 200 * 2, 'Actual', myrobot.get_distance_sensor())
myrobot.shutdown()
def test_move_forward_by_distance():
myrobot = MyRobot('sim')
myrobot.move_forward_by_distance(20, 200)
print('Expected', 200, 'Actual', myrobot.get_distance_sensor())
myrobot.move_forward_by_distance(50, 300)
print('Expected', 300, 'Actual', myrobot.get_distance_sensor())
myrobot.shutdown()
def test_spin():
myrobot = MyRobot('sim')
myrobot.spin(60)
print('Expected', 60, 'Actual', myrobot.robot.getSensor(create.Sensors.angle))
myrobot.spin(120)
print('Expected', 120, 'Actual', myrobot.robot.getSensor(create.Sensors.angle))
myrobot.shutdown()
def test_distance_traveled():
myrobot = MyRobot('sim')
myrobot.move_forward_by_distance(20, 20)
print('Expected', 20, 'Actual', myrobot.distance_traveled())
myrobot.move_forward_by_distance(20, 20)
print('Expected', 40, 'Actual', myrobot.distance_traveled())
myrobot.shutdown()
class MyRobot:
def __init__(self, port):
"""
INPUT: port to connect to the Roomba
OUTPUT: nothing
SIDE EFFECTS: the program connects to the Roomba
DESCRIPTION:
Connects to the Roomba on the given port.
If the Roomba fails to connect (HINT: try-except),
ASK the user to enter a NEW port UNTIL it connects.
Initializes other instance variables as needed.
"""
# DONE 4: write a NEW test function for this method.
# Write two tests in the function: one with a valid port, one with an invalid port.
# TODO 5: implement this method.
while True:
try:
self.robot = create.Create(port)
break
except:
port = input('Enter your port')
self.totaldis = 0
def shutdown(self):
"""Shuts down the Roomba"""
# TODO: implement this method.
self.robot.shutdown()
def move_forward_by_time(self, speed, sec):
"""
Moves the robot forward at the given speed for the given number of
seconds, then stops.
"""
# TODO 6: write a NEW test function for this method.
# TODO 7: implement this method.
self.robot.go(speed)
time.sleep(sec)
self.robot.stop()
self.totaldis += speed * sec
def get_distance_sensor(self):
"""
Returns the distance that the robot has traveled since the last time
get_distance_sensor was called.
"""
# TODO 8: write a NEW test function for this method.
# TODO 9: implement this method.
self.initialdis = 0
self.finaldis = self.robot.getSensor(create.Sensors.distance)
diff = self.finaldis - self.initialdis
self.initialdis = self.finaldis
return diff / 10
def move_forward_by_distance(self, speed, distance):
"""
Move the robot forward a fixed distance, then stop.
"""
# TODO 10: write a NEW test function for this method.
# TODO 11: implement this method.
self.robot.go(speed)
time.sleep(distance / speed)
self.robot.stop()
self.totaldis += distance
def spin(self, deg):
"""
Spin the robot the given number of degrees at a rate of 30 degrees/second.
"""
# TODO 12: write a NEW test function for this method.
# TODO 13: implement this method.
self.robot.go(0, 30)
time.sleep(deg / 30)
self.robot.stop()
def distance_traveled(self):
"""
Return the total distance the robot has traveled.
Hint: you already wrote this code in a previous exercise.
"""
# TODO 14: write a NEW test function for this method.
# TODO 15: implement this method.
return self.totaldis
#-----------------------------------------------------------------------
# If this module is running at the top level (as opposed to being
# imported by another module), then call the 'main' function.
#-----------------------------------------------------------------------
if __name__ == '__main__':
main()
|
import re
# open paragraph
f = open("paragraph_2.txt", "r")
# breaks into sentences
sentences = re.split(r"[.!?;]", f.read())
# initialize lists & variables
sentence = []
words = []
word_count = 0
character_count = 0
# split into words
for i in range(0, len(sentences) - 1):
sentence.append(sentences[i])
words.append(sentence[i].split())
# count number of sentences
sentence_count = len(sentences) - 1
# count total words in paragraph
for word in words:
word_count += len(word)
# get character count
for character in word:
character_count += len(character)
# calc average number of words in sentence
average_words = word_count / sentence_count
# calc average number of characters in words
average_character = character_count / word_count
# print results
print("Approximate Word Count: " + str(word_count))
print("Approximate Sentence Count: " + str(sentence_count))
print("Average Letter Count: " + str(round(average_character, 1)))
print("Average Sentence Length: " + str(round(average_words, 1))) |
import pandas as pd
from datetime import date
from os import path
from tkinter import *
from tkinter import ttk
from tkcalendar import DateEntry
from Echo import Echo
from util import *
class Client(ttk.Frame):
def __init__(self, parent):
Frame.__init__(self, parent)
self.parent = parent
self.loadData()
self.loadRDVMs()
f1 = Frame(self)
f1.pack(fill = X)
# Labels for each field
LEFT_LABEL_WIDTH = 16
RIGHT_LABEL_WIDTH = 18
Label(f1, anchor = "w", width = LEFT_LABEL_WIDTH,
text = "Case/Client #").grid(row = 0, column = 0)
Label(f1, anchor = "w", width = LEFT_LABEL_WIDTH,
text = "Client").grid(row = 1, column = 0)
Label(f1, anchor = "w", width = LEFT_LABEL_WIDTH,
text = "Species / Breed / Sex").grid(row = 2, column = 0)
Label(f1, anchor = "w", width = LEFT_LABEL_WIDTH,
text = "Birth / Death").grid(row = 3, column = 0)
Label(f1, anchor = "w", width = LEFT_LABEL_WIDTH,
text = "LA / FU Date").grid(row = 4, column = 0)
Label(f1, anchor = "w", width = LEFT_LABEL_WIDTH,
text = "Clinician").grid(row = 5, column = 0)
Label(f1, anchor = "w", width = RIGHT_LABEL_WIDTH,
text = "Client Last / First").grid(row = 0, column = 4)
Label(f1, anchor = "w", width = RIGHT_LABEL_WIDTH,
text = "City / ZIP / State").grid(row = 1, column = 4)
Label(f1, anchor = "w", width = RIGHT_LABEL_WIDTH,
text = "Hm / Wk / Alt Phones").grid(row = 2, column = 4)
Label(f1, anchor = "w", width = RIGHT_LABEL_WIDTH,
text = "RDVM Last / First").grid(row = 3, column = 4)
Label(f1, anchor = "w", width = RIGHT_LABEL_WIDTH,
text = "Practice").grid(row = 4, column = 4)
Label(f1, anchor = "w", width = RIGHT_LABEL_WIDTH,
text = "RDVM Ph1 / Ph2 / FAX").grid(row = 5, column = 4)
# Left column fields
self.caseNum = Text(f1, width = 12, height = 1)
self.caseNum.grid(row = 0, column = 1, padx = 2)
self.clientNum = Text(f1, width = 12, height = 1)
self.clientNum.grid(row = 0, column = 2, padx = 2)
self.patient = Text(f1, width = 12, height = 1)
self.patient.grid(row = 1, column = 1, padx = 2)
self.species = Text(f1, width = 12, height = 1)
self.species.grid(row = 2, column = 1, padx = 2)
self.breed = Text(f1, width = 12, height = 1)
self.breed.grid(row = 2, column = 2, padx = 2)
self.sex = Text(f1, width = 4, height = 1)
self.sex.grid(row = 2, column = 3, padx = 2)
self.birth = DateEntry(f1)
self.birth.grid(row = 3, column = 1, padx = 2)
self.death = DateEntry(f1)
self.death.grid(row = 3, column = 2, padx = 2)
self.la = DateEntry(f1)
self.la.grid(row = 4, column = 1, padx = 2)
self.fu = DateEntry(f1)
self.fu.grid(row = 4, column = 2, padx = 2)
self.clinicianText = StringVar()
self.clinician = ttk.Combobox(f1, width = 12, text = self.clinicianText, values = CLINICIANS)
self.clinician.grid(row = 5, column = 1, padx = 2)
# Right column fields
self.clientLast = Text(f1, width = 12, height = 1)
self.clientLast.grid(row = 0, column = 5, sticky = "w")
self.clientFirst = Text(f1, width = 12, height = 1)
self.clientFirst.grid(row = 0, column = 5,
sticky = "w", padx = (105, 0))
self.city = Text(f1, width = 12, height = 1)
self.city.grid(row = 1, column = 5, sticky = "w")
self.zipCode = Text(f1, width = 12, height = 1)
self.zipCode.grid(row = 1, column = 5, sticky = "w", padx = (105, 0))
self.state = Text(f1, width = 4, height = 1)
self.state.grid(row = 1, column = 5, sticky = "w", padx = (210, 0))
self.homePhone = Text(f1, width = 12, height = 1)
self.homePhone.grid(row = 2, column = 5, sticky = "w")
self.workPhone = Text(f1, width = 12, height = 1)
self.workPhone.grid(row = 2, column = 5, sticky = "w", padx = (105, 0))
self.altPhone = Text(f1, width = 12, height = 1)
self.altPhone.grid(row = 2, column = 5, sticky = "w", padx = (210, 0))
self.rdvmLast = Text(f1, width = 12, height = 1)
self.rdvmLast.grid(row = 3, column = 5, sticky = "w")
self.rdvmFirst = Text(f1, width = 12, height = 1)
self.rdvmFirst.grid(row = 3, column = 5, sticky = "w", padx = (105, 0))
self.practiceText = StringVar()
self.practice = ttk.Combobox(f1, width = 31, text = self.practiceText, values = list(self.rdvms.iloc[:,0]))
self.practice.grid(row = 4, column = 5, sticky = "w")
self.practice.bind("<<ComboboxSelected>>", self.onPracticeSelection)
self.rdvmPhone1 = Text(f1, width = 12, height = 1)
self.rdvmPhone1.grid(row = 5, column = 5, sticky = "w")
self.rdvmPhone2 = Text(f1, width = 12, height = 1)
self.rdvmPhone2.grid(row = 5, column = 5, sticky = "w", padx = (105, 0))
self.rdvmFAX = Text(f1, width = 12, height = 1)
self.rdvmFAX.grid(row = 5, column = 5, sticky = "w", padx = (210, 0))
# Submit button
self.submitButton = Button(f1, text = "Submit", fg = "Black",
command = self.submit)
self.submitButton.grid(row = 6, column = 5)
self.clearButton = Button(f1, text = "New", fg = "Black",
command = self.clearFields)
self.clearButton.grid(row = 6, column = 4)
self.deleteButton = Button(f1, text = "Delete", fg = "Black",
state = "disabled")
self.deleteButton.grid(row = 6, column = 3)
# Frame 2 contains the information on pets
# Fills with the form information.
f2 = ScrollableFrame(self, height = 150)
# Listbox contains all the basic pet information
self.numEntries = len(self.data.values)
self.listBox = ttk.Treeview(f2.scrollable_frame, height = max(self.numEntries, 6), columns = list(self.data.columns), show = 'headings')
i = 0
for col in self.data.columns:
self.listBox.heading(i, text = col)
self.listBox.column(i, width = columnWidth(len(self.data.columns)))
i += 1
for item in self.data.values:
self.listBox.insert("", "end", values = list(item))
# Get info on click
self.listBox.bind("<Double-1>", self.onTableSelection)
self.listBox.pack(fill = BOTH, expand = True)
f2.pack(fill = BOTH, expand = True, pady = 10)
def loadData(self):
# Read data
if path.exists(DATA_PATH_CLIENT):
self.data = pd.read_pickle(DATA_PATH_CLIENT)
else:
self.initializeData()
def initializeData(self):
d = {'Case number': [],
'Client number': [],
'Patient': [],
'Species': [],
'Breed': [],
'Sex': [],
'Birthdate': [],
'Deathdate': [],
'LAdate': [],
'FUdate': [],
'Client last': [],
'Client first': [],
'Clinician': [],
'City': [],
'State': [],
'ZIP': [],
'Home phone': [],
'Work phone': [],
'Alt phone': [],
'RDVM last': [],
'RDVM first': [],
'Practice': [],
'RDVM phone 1': [],
'RDVM phone 2': [],
'RDVM fax': []}
self.data = pd.DataFrame(d)
def submit(self):
newData = {
'Case number': self.caseNum.get("1.0", "end-1c"),
'Client number': self.clientNum.get("1.0", "end-1c"),
'Patient': self.patient.get("1.0", "end-1c"),
'Species': self.species.get("1.0", "end-1c"),
'Breed': self.breed.get("1.0", "end-1c"),
'Sex': self.sex.get("1.0", "end-1c"),
'Birthdate': self.birth.get(),
'Deathdate': self.death.get(),
'LAdate': self.la.get(),
'FUdate': self.fu.get(),
'Client last': self.clientLast.get("1.0", "end-1c"),
'Client first': self.clientFirst.get("1.0", "end-1c"),
'Clinician': self.clinician.get(),
'City': self.city.get("1.0", "end-1c"),
'State': self.state.get("1.0", "end-1c"),
'ZIP': self.zipCode.get("1.0", "end-1c"),
'Home phone': self.homePhone.get("1.0", "end-1c"),
'Work phone': self.workPhone.get("1.0", "end-1c"),
'Alt phone': self.altPhone.get("1.0", "end-1c"),
'RDVM last': self.rdvmLast.get("1.0", "end-1c"),
'RDVM first': self.rdvmFirst.get("1.0", "end-1c"),
'Practice': self.practice.get(),
'RDVM phone 1': self.rdvmPhone1.get("1.0", "end-1c"),
'RDVM phone 2': self.rdvmPhone2.get("1.0", "end-1c"),
'RDVM fax': self.rdvmFAX.get("1.0", "end-1c")}
self.listBox.insert("", "end", values = list(newData.values()))
self.data = self.data.append(newData, ignore_index = True)
self.clearFields()
self.numEntries += 1
self.listBox.configure(height = max(self.numEntries, 6))
self.saveData()
def clearFields(self):
setText(self.caseNum, "")
setText(self.clientNum, "")
setText(self.patient, "")
setText(self.species, "")
setText(self.breed, "")
setText(self.sex, "")
self.birth.set_date(date.today())
self.death.set_date(date.today())
self.la.set_date(date.today())
self.fu.set_date(date.today())
setText(self.clientLast, "")
setText(self.clientFirst, "")
self.clinicianText.set("")
setText(self.city, "")
setText(self.state, "")
setText(self.zipCode, "")
setText(self.homePhone, "")
setText(self.workPhone, "")
setText(self.altPhone, "")
setText(self.rdvmLast, "")
setText(self.rdvmFirst, "")
self.practiceText.set("")
setText(self.rdvmPhone1, "")
setText(self.rdvmPhone2, "")
setText(self.rdvmFAX, "")
self.parent.updateClient([])
self.submitButton.configure(text = "Submit", command = self.submit)
self.deleteButton.configure(state = "disabled")
def onTableSelection(self, event = None):
if self.listBox.selection():
item = self.listBox.selection()[0]
values = self.listBox.item(item, "values")
self.fillFields(values)
self.submitButton.configure(text = "Modify", command = lambda: self.modify(item))
self.deleteButton.configure(state = "normal", command = lambda: self.deleteDataItem(item))
self.parent.updateClient(values)
def fillFields(self, values):
setText(self.caseNum, values[0])
setText(self.clientNum, values[1])
setText(self.patient, values[2])
setText(self.species, values[3])
setText(self.breed, values[4])
setText(self.sex, values[5])
self.birth.set_date(values[6])
self.death.set_date(values[7])
self.la.set_date(values[8])
self.fu.set_date(values[9])
setText(self.clientLast, values[10])
setText(self.clientFirst, values[11])
self.clinicianText.set(values[12])
setText(self.city, values[13])
setText(self.state, values[14])
setText(self.zipCode, values[15])
setText(self.homePhone, values[16])
setText(self.workPhone, values[17])
setText(self.altPhone, values[18])
setText(self.rdvmLast, values[19])
setText(self.rdvmFirst, values[20])
self.practiceText.set(values[21])
setText(self.rdvmPhone1, values[22])
setText(self.rdvmPhone2, values[23])
setText(self.rdvmFAX, values[24])
def modify(self, item):
moddedData = {
'Case number': self.caseNum.get("1.0", "end-1c"),
'Client number': self.clientNum.get("1.0", "end-1c"),
'Patient': self.patient.get("1.0", "end-1c"),
'Species': self.species.get("1.0", "end-1c"),
'Breed': self.breed.get("1.0", "end-1c"),
'Sex': self.sex.get("1.0", "end-1c"),
'Birthdate': self.birth.get(),
'Deathdate': self.death.get(),
'LAdate': self.la.get(),
'FUdate': self.fu.get(),
'Client last': self.clientLast.get("1.0", "end-1c"),
'Client first': self.clientFirst.get("1.0", "end-1c"),
'Clinician': self.clinician.get(),
'City': self.city.get("1.0", "end-1c"),
'State': self.state.get("1.0", "end-1c"),
'ZIP': self.zipCode.get("1.0", "end-1c"),
'Home phone': self.homePhone.get("1.0", "end-1c"),
'Work phone': self.workPhone.get("1.0", "end-1c"),
'Alt phone': self.altPhone.get("1.0", "end-1c"),
'RDVM last': self.rdvmLast.get("1.0", "end-1c"),
'RDVM first': self.rdvmFirst.get("1.0", "end-1c"),
'Practice': self.practice.get(),
'RDVM phone 1': self.rdvmPhone1.get("1.0", "end-1c"),
'RDVM phone 2': self.rdvmPhone2.get("1.0", "end-1c"),
'RDVM fax': self.rdvmFAX.get("1.0", "end-1c")}
self.listBox.item(item, values = list(moddedData.values()))
row = int(item[-1]) - 1
self.data.loc[row] = list(moddedData.values())
self.clearFields()
self.parent.modifiedData(list(moddedData.values()))
self.saveData()
def loadRDVMs(self):
# Read data
if path.exists(DATA_PATH_RDVMS):
self.rdvms = pd.read_pickle(DATA_PATH_RDVMS)
else:
self.initializeRDVMs()
def initializeRDVMs(self):
d = {'Practice': [],
'Address': [],
'City': [],
'ST': [],
'Zip': [],
'Ph1': [],
'Ph2': [],
'FAX': []}
self.rdvms = pd.DataFrame(d)
def updateRDVMs(self):
self.loadRDVMs()
self.updateClientRDVMs()
self.practice.configure(values = list(self.rdvms.iloc[:,0]))
self.onPracticeSelection()
def rebuildTable(self):
self.numEntries = len(self.data.values)
self.listBox.delete(*self.listBox.get_children())
for item in self.data.values:
self.listBox.insert("", "end", values = list(item))
self.listBox.configure(height = max(self.numEntries, 6))
def updateClientRDVMs(self):
clientData = self.data.drop(columns = ['RDVM phone 1', 'RDVM phone 2', 'RDVM fax'])
rdvmData = self.rdvms[['Practice', 'Ph1', 'Ph2', 'FAX']]
clientData = clientData.merge(rdvmData, how = 'left', on = 'Practice')
clientData = clientData.rename(columns = {"Ph1" : "RDVM phone 1", "Ph2": "RDVM phone 2", "FAX": "RDVM fax"})
noRDVM = clientData["RDVM phone 1"].isnull()
clientData.loc[noRDVM, 'Practice'] = ''
clientData.loc[noRDVM, 'RDVM phone 1'] = ''
clientData.loc[noRDVM, 'RDVM phone 2'] = ''
clientData.loc[noRDVM, 'RDVM fax'] = ''
self.data = clientData
self.saveData()
self.rebuildTable()
def onPracticeSelection(self, event = None):
# The if exists to handle if the user deletes the rdvm currently selected
if len(self.rdvms.loc[self.rdvms['Practice'] == self.practiceText.get()]) > 0:
# In the case of multiple practice with the exact same name, just get the first one
selection = self.rdvms.loc[self.rdvms['Practice'] == self.practiceText.get()].iloc[0, :]
setText(self.rdvmPhone1, selection['Ph1'])
setText(self.rdvmPhone2, selection['Ph2'])
setText(self.rdvmFAX, selection['FAX'])
else:
self.practiceText.set('')
setText(self.rdvmPhone1, '')
setText(self.rdvmPhone2, '')
setText(self.rdvmFAX, '')
def saveData(self):
saveData(self.data, DATA_PATH_CLIENT)
def deleteDataItem(self, item):
row = self.listBox.index(item)
self.listBox.delete(item)
self.numEntries -= 1
self.listBox.configure(height = max(self.numEntries, 6))
self.data = self.data.drop(self.data.index[row])
self.clearFields()
self.saveData() |
from .forum_views import bp_forum
from app.models import User
from app import db
IDENTIFIER = "forum"
NAME = "Forum Plugin"
VERSION = "v0.1"
AUTHOR = "Asyks @ EU-Blackhand"
def init():
setattr(User, "forum_replies", db.relationship("ForumReply", backref="user", lazy="dynamic"))
def install():
setattr(User, "forum_replies", db.relationship("ForumReply", backref="user", lazy="dynamic"))
def get_blueprint():
return bp_forum
|
def sayhi():
print("Hello User") #this function will print Hello User
print("Top")
sayhi()
print("Bottom")
def say_hi(name):
print("Hello " + name)
say_hi("Mike")
say_hi("Steve")
def say_hi_with_age(name, age):
print("Hello " + name + ", you are " + age)
say_hi_with_age("Mike", "35")
say_hi_with_age("Steve", "70") |
from ixnetwork_restpy import SessionAssistant
session_assistant = SessionAssistant(IpAddress='172.31.194.141',
LogLevel=SessionAssistant.LOGLEVEL_INFO,
ClearConfig=True)
ixnetwork = session_assistant.Ixnetwork
#create vport to physical port mapping using PortMapAssistant
port_map = session_assistant.PortMapAssistant()
chassis_ip = '172.21.86.100'
lag_ports = [
dict(Arg1=chassis_ip, Arg2=10, Arg3=17),
dict(Arg1=chassis_ip, Arg2=10, Arg3=18)
]
port_map.Map('172.21.86.100', 10, 19, Name='VpcRx')
vports_1 = ixnetwork.Vport.add().add()
connected_ports = ixnetwork.AssignPorts(lag_ports, [], vports_1, True)
lag_1 = ixnetwork.Lag.add(Name='Lag 1', Vports=vports_1)
lag_1.ProtocolStack.add().Ethernet.add().Lagportlacp.add()
vports_2 = ixnetwork.Vport.find(Name='^VpcRx')
ethernet1 = ixnetwork.Topology.add(Ports=lag_1).DeviceGroup.add(Multiplier = 1).Ethernet.add()
ethernet2 = ixnetwork.Topology.add(Ports=vports_2).DeviceGroup.add(Multiplier = 1).Ethernet.add()
traffic_item = ixnetwork.Traffic.TrafficItem.add(Name='Lag Traffic pyats', TrafficType='raw')
endpoint_set = traffic_item.EndpointSet.add(Destinations=vports_2.Protocols.find(), Sources=lag_1)
|
class Queue:
def __init__(self):
self.queue = []
# инициализация хранилища данных
def enqueue(self, item):
self.queue.insert(0,item)
# вставка в хвост
def dequeue(self):
if self.size() < 1:
return None # если стек пустой
return self.queue.pop(self.size()-1)
def size(self):
return len(self.queue) # размер очереди |
"""
This function allows you to query your Wallet balance. If you enable notify by email, you will receive an email every time the url is called with your APP Key.
URL :
https://greydotapi.me/?k=[APP Key]&do=[FID]
[**APP Key**] Your APP Key
[**FID**]The function ID for Wallet balance is 2
Example url :
https://greydotapi.me/?k=abcdefghijklmnopqrst&do=2
Example reply :
<?xml version="1.0" encoding="utf-8" ?>
<query>
<query_result>
<status>Success</status>
<function>Digit balance</function>
<amount>10.00</amount>
</query_result>
<query_status>DONE</query_status>
<query_code>D0003</query_code>
"""
SAMPLE = """
<?xml version="1.0" encoding="utf-8" ?>
<query>
<query_result>
<status>Success</status>
<function>Digit balance</function>
<amount>10.00</amount>
</query_result>
<query_status>DONE</query_status>
<query_code>D0003</query_code>
"""
|
# @Time :2019/8/4 10:41
# @Author :jinbiao
from Python_0802_job.ddt import ddt, data
import unittest
from Python_0802_job.operation_log import log
from Python_0802_job import operation_config
from Python_0802_job import operation_excel
from Python_0802_job import send_request
@ddt
class TestRegister(unittest.TestCase):
config = operation_config.do_conifg
excel_path = config.get_value(section="PATH", option="excelpath")
oe = operation_excel.OperationExcel(excel_path, sheet_name="register")
test_data = oe.get_data()
@classmethod
def setUpClass(cls) -> None:
log.info("{:-^50}".format("测试用例开始执行"))
@classmethod
def tearDownClass(cls) -> None:
log.info("{:-^50}".format("测试用例结束执行"))
@data(*test_data)
def test_register(self, data):
expect = data["expected"].replace("\n", "").replace(" ", "")
request = send_request.SendRequest()
actual = request.send_request(method=data["method"], url=data["url"],
data=data["data"].encode("utf-8")).text
try:
self.assertEqual(expect, actual)
log.info("用例执行通过")
self.oe.write_data(row=data["case_id"]+1, actual=actual, result="PASS")
except Exception as e:
log.error(f"用例执行失败{e}")
self.oe.write_data(row=data["case_id"]+1, actual=actual, result="FAIL")
raise e
if __name__ == '__main__':
unittest.main()
|
import time
import datetime
from huey import RedisHuey, crontab
from extractors import data_collector
from model import Crypto, db
huey = RedisHuey('jobs', host='localhost', port=6379)
@huey.periodic_task(crontab(minute='*/1'))
def request_coin_price():
coin_prices = data_collector()
with db.atomic():
Crypto.insert_many(coin_prices).execute()
print(coin_prices)
|
from decimal import Decimal, getcontext
from django.utils.text import slugify
from django.db import models
from django.contrib.auth.models import User
from django.urls import reverse
from apps.basic import dictionary
mm_ft = 304.8
cm_inch = 0.39370079
class ProfileMaster(models.Model):
group_id = models.CharField(max_length=10, primary_key=True, verbose_name='Group ID')
group_name = models.CharField(max_length=200, verbose_name='Group Name')
sales_target = models.IntegerField(default=0, verbose_name='Sales Target')
created_by = models.ForeignKey(User, related_name='profile_master_creator', on_delete=models.DO_NOTHING)
created_at = models.DateTimeField(auto_now_add=True, editable=False)
updated_by = models.ForeignKey(User, related_name='profile_master_updater', on_delete=models.DO_NOTHING)
updated_at = models.DateTimeField(auto_now=True, editable=False)
def __str__(self):
return self.group_name
def get_update_url(self):
return reverse("profile_master_update", args=(self.pk,))
def save(self, force_insert=False, force_update=False, *args, **kwargs):
self.group_id = self.group_id.upper()
return super(ProfileMaster, self).save(force_insert, force_update, *args, **kwargs)
class Profile(models.Model):
group_master = models.ForeignKey(ProfileMaster, related_name='group_master', verbose_name='Gourp master', on_delete=models.DO_NOTHING)
profile_group = models.CharField(max_length=100, verbose_name='Profile Group', blank=True, null=True)
profile_group_description = models.CharField(max_length=200, verbose_name='Group description')
profile_id = models.CharField(max_length=5, verbose_name='Profile ID', primary_key=True)
profile_sym = models.CharField(max_length=50, verbose_name='Sym', blank=True, null=True)
description = models.CharField(max_length=255, verbose_name='Description')
image = models.ImageField(upload_to='upload/images/master_file', blank=True, null=True, verbose_name='Image')
width = models.DecimalField(max_digits=10, decimal_places=2, verbose_name='Width(mm)', default=0.0)
thick = models.DecimalField(max_digits=10, decimal_places=3, verbose_name='Thick(mm)', default=0.0)
CBM_Feet = models.DecimalField(max_digits=18, decimal_places=6, verbose_name='CBM/Feet', default=0)
M2_Feet = models.DecimalField(max_digits=18, decimal_places=6, verbose_name='M2/Feet', default=0)
bundle = models.IntegerField(verbose_name='Pcs/Bundle', default=0)
created_by = models.ForeignKey(User, related_name='profile_creator', on_delete=models.DO_NOTHING)
created_at = models.DateTimeField(auto_now_add=True, editable=False)
updated_by = models.ForeignKey(User, related_name='profile_updater', on_delete=models.DO_NOTHING)
updated_at = models.DateTimeField(auto_now=True, editable=False)
def __str__(self):
return self.profile_sym
def save(self, *args, **kwargs):
self.profile_group = self.profile_group.upper()
self.profile_id = self.profile_id.upper()
self.profile_sym = self.profile_sym.upper()
if self.width > 0 and self.thick > 0:
getcontext().prec = 6
self.CBM_Feet = Decimal(self.width) * Decimal(self.thick) * Decimal(mm_ft)*Decimal(10**-9)
self.M2_Feet = Decimal(self.width) + Decimal(self.thick*2) * Decimal(mm_ft)*Decimal(10**-6)
return super(Profile, self).save(*args, **kwargs)
class WoodType(models.Model):
wood_type_id = models.CharField(max_length=3, primary_key=True, verbose_name='Wood type Id')
sym = models.CharField(max_length=5, blank=True, null=True, verbose_name='Sym')
description = models.CharField(max_length=200, verbose_name='Name')
parent_id = models.CharField(max_length=3, blank=True, null=True, default='', verbose_name='Main group')
freqUsed = models.BooleanField(default=True, blank=True, null=True)
wood_group = models.CharField(max_length=50, choices=dictionary.WOOD_GROUP_CHOICE, blank=True, null=True, verbose_name='WG Type')
prod_type = models.CharField(max_length=20, choices=dictionary.PROD_TYPE, blank=True, null=True, verbose_name='Prod Type')
bod_view = models.BooleanField(default=False, blank=True, null=True, verbose_name='Shown')
created_by = models.ForeignKey(User, related_name='wood_type_creator', on_delete=models.DO_NOTHING)
created_at = models.DateTimeField(auto_now_add=True, editable=False)
updated_by = models.ForeignKey(User, related_name='wood_type_updater', on_delete=models.DO_NOTHING)
updated_at = models.DateTimeField(auto_now=True, editable=False)
def __str__(self):
return self.description
def save(self, *args, **kwargs):
self.sym = self.sym.upper()
self.description = self.description.upper()
return super(WoodType, self).save(*args, **kwargs)
class SortGroup(models.Model):
id = models.CharField(max_length=3, primary_key=True, verbose_name='ID')
description = models.CharField(max_length=200, verbose_name='Description')
sym = models.CharField(max_length=20, blank=True, null=True, verbose_name='Sym')
parent = models.CharField(max_length=50, blank=True, null=True, verbose_name='Parent')
freqUse = models.BooleanField(blank=True, null=True,verbose_name='Freq Used')
created_by = models.ForeignKey(User, related_name='sort_group_creator', on_delete=models.DO_NOTHING)
created_at = models.DateTimeField(auto_now_add=True, editable=False)
updated_by = models.ForeignKey(User, related_name='sort_group_updater', on_delete=models.DO_NOTHING)
updated_at = models.DateTimeField(auto_now=True, editable=False)
class Meta:
ordering = ('id',)
def __str__(self):
return self.description
def save(self, *args, **kwargs):
self.sym = self.sym.upper()
return super(SortGroup, self).save(*args, **kwargs)
class Color(models.Model):
color_group = models.CharField(max_length=50, choices=dictionary.COLOR_GROUP, verbose_name='Color Group')
color_id = models.CharField(max_length=4, primary_key=True, verbose_name='Code')
description = models.CharField(max_length=100, verbose_name='Description')
sort_group = models.ForeignKey(SortGroup, related_name='colors', on_delete=models.PROTECT, verbose_name='Sort Group', blank=True, null=True)
sort_group_note = models.CharField(max_length=100, verbose_name='Sort Group Note', blank=True, null=True)
wood_type = models.ForeignKey(WoodType, related_name='woodtype_color', on_delete=models.PROTECT, verbose_name='Wood Type', blank=True, null=True)
gloss = models.CharField(max_length=20, verbose_name='Gloss', blank=True, null=True)
printed = models.BooleanField(verbose_name='Printed', default=False)
distressed = models.BooleanField(verbose_name='Distressed', default=False)
distressed_remark = models.CharField(max_length=100, verbose_name='Distressed remark', blank=True, null=True)
phun_hot = models.BooleanField(verbose_name='Phun hột', default=False)
emboss = models.BooleanField(verbose_name='Emboss', default=False)
scratch = models.BooleanField(verbose_name='Scratch', default=False)
glazed = models.BooleanField(verbose_name='Glazed', default=False)
danh_bui = models.BooleanField(verbose_name='Đánh bụi', default=False)
remark = models.TextField(verbose_name='Remark', blank=True, null=True)
created_by = models.ForeignKey(User, related_name='color_creator', on_delete=models.DO_NOTHING)
updated_by = models.ForeignKey(User, related_name='color_updater', on_delete=models.DO_NOTHING)
class Meta:
ordering = ('color_id',)
def __str__(self):
return self.color_id + '-' + self.description
class Length(models.Model):
mm = models.IntegerField(primary_key=True, default=0, verbose_name='MM')
feet = models.DecimalField(max_digits=8, decimal_places=4, verbose_name='Feet')
inch = models.DecimalField(max_digits=8, decimal_places=2, blank=True, null=True, verbose_name='Inch')
freqUsed = models.BooleanField(blank=True, null=True, verbose_name='Freq. used')
created_by = models.ForeignKey(User, related_name='length_creator', on_delete=models.DO_NOTHING)
created_at = models.DateTimeField(auto_now_add=True, editable=False)
updated_by = models.ForeignKey(User, related_name='length_updater', on_delete=models.DO_NOTHING)
updated_at = models.DateTimeField(auto_now=True, editable=False)
class Meta:
ordering = ('mm',)
def save(self, *args, **kwargs):
self.inch = Decimal(self.feet)*12
super(Length, self).save(*args, **kwargs)
def __str__(self):
return str(self.mm)
class Thick(models.Model):
mm = models.IntegerField(primary_key=True, verbose_name='MM')
cm = models.DecimalField(max_digits=10, decimal_places=3, verbose_name='CM')
inch = models.CharField(max_length=20, verbose_name='Inch', blank=True, null=True)
created_by = models.ForeignKey(User, related_name='thick_creator', on_delete=models.DO_NOTHING)
created_at = models.DateTimeField(auto_now_add=True, editable=False)
updated_by = models.ForeignKey(User, related_name='thick_updater', on_delete=models.DO_NOTHING)
updated_at = models.DateTimeField(auto_now=True, editable=False)
class Meta:
ordering = ('mm',)
def __str__(self):
return str(self.mm)
class LogScale(models.Model):
dia = models.DecimalField(max_digits=18,decimal_places=2, verbose_name='Diameter')
length = models.DecimalField(max_digits=18,decimal_places=2, verbose_name='Length')
BF = models.DecimalField(max_digits=18,decimal_places=2, verbose_name='Board FT')
class Meta:
ordering = ('dia', 'length',)
def save(self, *args, **kwargs):
self.BF = Decimal(self.length) * Decimal(self.dia - 4) * Decimal(self.dia - 4) / 16
super(LogScale, self).save(*args, **kwargs)
def __str__(self):
return str(self.dia)
class Country(models.Model):
sym = models.CharField(max_length=3, primary_key=True, verbose_name='Country')
name = models.CharField(max_length=50, verbose_name='Name')
created_by = models.ForeignKey(User, related_name='country_creator', on_delete=models.DO_NOTHING)
created_at = models.DateTimeField(auto_now_add=True, editable=False)
updated_by = models.ForeignKey(User, related_name='country_updater', on_delete=models.DO_NOTHING)
updated_at = models.DateTimeField(auto_now=True, editable=False)
class Meta:
ordering = ('sym',)
verbose_name_plural = 'Countries'
def __str__(self):
return self.sym
def save(self, *args, **kwargs):
self.sym = self.sym.upper()
return super(Country, self).save(*args, **kwargs)
# Product Master Data
class Warehouse(models.Model):
type = models.CharField(max_length=20, choices=dictionary.MATERIAL_TYPE, verbose_name='Type')
id = models.SlugField(max_length=50, primary_key=True, verbose_name='Code')
description = models.CharField(max_length=200, verbose_name='Description')
credit_acc = models.IntegerField(verbose_name='credit code', blank=True, null=True)
debit_acc = models.IntegerField(verbose_name='debit code', blank=True, null=True)
class Meta:
ordering = ('type', 'id')
# def save(self,*args, **kwargs):
# self.id = slugify(str(self.id).upper())
# super(Warehouse, self).save(*args, **kwargs)
def __str__(self):
return self.description
class Unit(models.Model):
unit = models.CharField(max_length=20, primary_key=True, verbose_name='Unit')
def __str__(self):
return self.unit
class PaymentTerm(models.Model):
term = models.CharField(max_length=200, primary_key=True, verbose_name='Term')
def __str__(self):
return self.term
class ShipMethod(models.Model):
ship_method = models.CharField(max_length=100, verbose_name='Ship Method')
def __str__(self):
return self.ship_method
class UnitConversion(models.Model):
f_unit = models.ForeignKey(Unit, related_name='f_unit', on_delete=models.CASCADE, verbose_name='From unit')
t_unit = models.ForeignKey(Unit, related_name='t_unit', on_delete=models.CASCADE, verbose_name='To unit')
ratio = models.DecimalField(max_digits=10, decimal_places=5, verbose_name='Ratio')
class Category(models.Model):
id = models.SlugField(max_length=100, primary_key=True, verbose_name='Category')
code = models.CharField(max_length=3, verbose_name='Code')
is_root = models.BooleanField(default=False, verbose_name='Is root Catergory')
parent = models.ForeignKey('self', related_name='child', blank=True, null=True, on_delete=models.CASCADE, verbose_name='Parent Category')
description = models.CharField(max_length=250, verbose_name='Description', blank=True, null=True)
useSequence = models.BooleanField(default=True)
class Meta:
ordering = ('parent_id', 'id',)
verbose_name_plural = 'Category'
def __str__(self):
return self.id
def get_main_parent_id(self):
main_category = ''
current_category = self.id
if self.parent_id is None or self.is_root:
return self.id
else:
while current_category.parent is not None:
main_category = current_category.parent
return main_category
def get_parent_description(self):
return Category.objects.values('description').get(id=self.parent_id)
def get_full_parent_code(self):
if not self.parent:
return ''
parent_link = ''
is_top = False
categories = []
current_category = self.id
while current_category is not None:
categories.append(current_category)
current_category = current_category.parent
# '-'.join(map(str, categories))
# '-'.join([str(elem) for elem in categories])
return '-'.join(map(str, categories))
class Product(models.Model):
warehouse = models.ForeignKey(Warehouse, related_name='products', verbose_name='Warehouse', on_delete=models.PROTECT)
category = models.ForeignKey(Category, related_name='products', on_delete=models.PROTECT, verbose_name='Category')
code = models.CharField(max_length=50, primary_key=True, verbose_name='Code')
description = models.CharField(max_length=250, verbose_name='Description')
profile = models.ForeignKey(Profile, related_name='product_profile', blank=True, null=True, on_delete=models.PROTECT)
wood_type = models.ForeignKey(WoodType, related_name='product_woodtype', blank=True, null=True, on_delete=models.PROTECT)
color = models.ForeignKey(Color, related_name='product_color', on_delete=models.PROTECT, verbose_name='Color', blank=True, null=True)
sort_group = models.ForeignKey(SortGroup, related_name='product_sortgroup', verbose_name='Sort Group', on_delete=models.PROTECT, blank=True, null=True)
unit = models.ForeignKey(Unit, related_name='product_unit', verbose_name='Unit', on_delete=models.PROTECT)
min_qty = models.IntegerField(default=0)
max_qty = models.IntegerField(default=0)
created_by = models.ForeignKey(User, related_name='product_creator', on_delete=models.DO_NOTHING)
created_at = models.DateTimeField(auto_now_add=True, editable=False)
updated_by = models.ForeignKey(User, related_name='product_updater', on_delete=models.DO_NOTHING)
updated_at = models.DateTimeField(auto_now=True, editable=False)
def __str__(self):
return str(self.code)
class Bank(models.Model):
code = models.CharField(max_length=50, primary_key=True, verbose_name='Bank Code')
name = models.CharField(max_length=200, verbose_name='Bank Name')
address = models.CharField(max_length=200, verbose_name='Bank address', blank=True, null=True)
swift_code = models.CharField(max_length=20, verbose_name='Bank Code', blank=True, null=True)
phone = models.CharField(max_length=20, verbose_name='Phone', blank=True, null=True)
fax = models.CharField(max_length=20, verbose_name='Fax', blank=True, null=True)
class Partner(models.Model):
id = models.CharField(max_length=5, primary_key=True, verbose_name='ID')
name = models.CharField(max_length=200, verbose_name='Name')
description = models.CharField(max_length=255, verbose_name='Description', blank=True, null=True)
address = models.CharField(max_length=255, verbose_name='Address', blank=True, null=True)
country = models.ForeignKey(Country, related_name='partners', on_delete=models.SET_NULL, verbose_name='Country', blank=True, null=True)
tax_code = models.CharField(max_length=20, verbose_name='Tax code', blank=True, null=True)
fax_no = models.CharField(max_length=20, verbose_name='Fax No', blank=True, null=True)
tel_no = models.CharField(max_length=20, verbose_name='Tel. No', blank=True, null=True)
ext_no = models.CharField(max_length=20, verbose_name='Ext', blank=True, null=True)
website = models.CharField(max_length=100, verbose_name='Website', blank=True, null=True)
contact_person = models.CharField(max_length=100, verbose_name='Contact person', blank=True, null=True)
email = models.EmailField(max_length=50, verbose_name='Email', blank=True, null=True)
payment_term = models.ForeignKey(PaymentTerm, related_name='partners', on_delete=models.DO_NOTHING, verbose_name='Payment term', blank=True, null=True)
ship_method = models.ForeignKey(ShipMethod, related_name='partners', on_delete=models.DO_NOTHING, verbose_name='Ship method', blank=True, null=True)
deli_term = models.CharField(max_length=50, verbose_name='Deli term', blank=True, null=True)
bank = models.ForeignKey(Bank, related_name='partners', on_delete=models.SET_NULL, verbose_name='Bank Name', blank=True, null=True)
bank_acc = models.CharField(max_length=50, verbose_name='Bank Account', blank=True, null=True)
active = models.BooleanField(default=True, verbose_name='Status')
parent_id = models.ForeignKey('self', verbose_name='Parent', on_delete=models.DO_NOTHING, blank=True, null=True)
parent_prefix = models.CharField(max_length=3, verbose_name='Invoice Prefix')
created_by = models.ForeignKey(User, related_name='partner_creator', on_delete=models.DO_NOTHING)
# created_at = models.DateTimeField(auto_now_add=True, editable=False)
updated_by = models.ForeignKey(User, related_name='partner_updater', on_delete=models.DO_NOTHING) |
# coding utf-8
from data_helpers import words
from word2vec import w2vm
from data_split import pre_train, pre_evl
pre_train()
pre_evl()
words.cut_train_file()
words.cut_test_file()
words.clean_words_file()
w2v = w2vm.train_model() |
# Write a function that calculates the average over all elements in a matrix.
import numpy as np
def avemat(a):
numbers = []
for x in np.nditer(a):
numbers.append(x)
return sum(numbers)/len(numbers) |
from datetime import datetime
from collections import defaultdict
def solution(input):
input = sorted(input, key=lambda x: x[0])
guard = None
guard_states = defaultdict(lambda: defaultdict(int))
for i, (dt, msg) in enumerate(input):
if '#' in msg:
guard = int(msg.split()[1][1:])
elif 'falls asleep' in msg:
try:
n = input[i+1][0].minute
except IndexError:
n = 60
for i in range(dt.minute, n):
guard_states[guard][i] += 1
guard = 0
minute = 0
sleep = 0
for gid, ghours in guard_states.items():
for gminute, gsleep in ghours.items():
if gsleep > sleep:
guard = gid
minute = gminute
sleep = gsleep
return guard * minute
def parse_input(input):
for line in input.split('\n'):
ts, msg = line.split(']')
dt = datetime.strptime(ts[1:], '%Y-%m-%d %H:%M')
yield dt, msg
if __name__ == '__main__':
with open('input.txt') as fh:
print(solution(parse_input(fh.read().strip())))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.