code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
import sys
import os
import json
import hashlib
import codecs
import base64
import requests
import ConfigParser
from urllib import quote
from datetime import datetime
DIR = os.path.dirname(__file__)
CONFIG_FILE = os.path.join(DIR, 'settings.cfg')
class Uploader():
def upload_data(self, path, data):
url = '%s:8081/helpdesk/WebObjects/Helpdesk.woa/ra/%s/?username=%s&password=%s' % (SW_URL, path, SW_USER, SW_PWD )
r = requests.post(url, data=data)
if DEBUG:
print '\tHTTP Status Code: %s\nResponse: %s' % (r.status_code, r.text)
elif r.status_code == 400:
print '\tHTTP Status Code: %s\n\t%s' % (r.status_code, r.text)
def update_data(self, path, data):
url = '%s:8081/helpdesk/WebObjects/Helpdesk.woa/ra/%s/?username=%s&password=%s' % (SW_URL, path, SW_USER, SW_PWD )
r = requests.put(url, data=data)
if DEBUG:
print '\tHTTP Status Code: %s\nResponse: %s' % (r.status_code, r.text)
else:
print '\tHTTP Status Code: %s' % r.status_code
def delete_data(self, id):
url = SW_URL+':8081/helpdesk/WebObjects/Helpdesk.woa/ra/Locations/%s?username='+SW_USER+'&password='+SW_PWD % id
r = requests.delete(url)
if DEBUG:
print '\tHTTP Status Code: %s\nResponse: %s' % (r.status_code, r.text)
else:
print '\tHTTP Status Code: %s' % r.status_code
class Reader():
def get_d42_data(self, url):
headers = {
'Authorization': 'Basic ' + base64.b64encode(D42_USER + ':' + D42_PWD),
'Content-Type': 'application/x-www-form-urlencoded'
}
r = requests.get(url, headers=headers, verify=False)
msg = 'Status code: %s' % str(r.status_code)
if DEBUG:
print msg
return r.text
def search_sw_data(self, path, filter, what):
url = '%s:8081/helpdesk/WebObjects/Helpdesk.woa/ra/%s/?qualifier=(%s %s "%s")&username=%s&password=%s' \
% (SW_URL, path, filter, quote("="), what, SW_USER, SW_PWD)
r = requests.get(url)
result = json.loads(r.text)
if DEBUG:
print '\n----------------------------------'
print result
return result
def get_all_manufacturers(self):
url = '%s:8081/helpdesk/WebObjects/Helpdesk.woa/ra/Manufacturers/?username=%s&password=%s' \
% (SW_URL, SW_USER, SW_PWD)
r = requests.get(url)
result = json.loads(r.text)
if DEBUG:
print '\n----------------------------------'
print result
return result
def get_all_types(self):
url = '%s:8081/helpdesk/WebObjects/Helpdesk.woa/ra/AssetTypes/?username=%s&password=%s' \
% (SW_URL, SW_USER, SW_PWD)
r = requests.get(url)
result = r.json()
if DEBUG:
print '\n----------------------------------'
print result
return result
def get_all_locations(self):
url = '%s:8081/helpdesk/WebObjects/Helpdesk.woa/ra/Locations/?username=%s&password=%s' \
% (SW_URL, SW_USER, SW_PWD)
r = requests.get(url)
result = r.json()
if DEBUG:
print '\n----------------------------------'
print result
return result
class Utility():
def read_config(self):
if not os.path.exists(CONFIG_FILE):
msg = '\n[!] Cannot find config file.Exiting...'
print msg
sys.exit()
else:
cc = ConfigParser.RawConfigParser()
cc.readfp(open(CONFIG_FILE,"r"))
# ------------------------------------------------------------------------
# Device42
D42_USER = cc.get('device42', 'username')
D42_PWD = cc.get('device42', 'password')
D42_URL = cc.get('device42', 'url')
# SolarWinds
SW_USER = cc.get('solarwinds', 'username')
SW_PWD = cc.get('solarwinds', 'password')
SW_URL = cc.get('solarwinds', 'url')
#Other
DRY_RUN = cc.getboolean('other', 'dry_run')
DEBUG = cc.getboolean('other', 'debug')
LOGGING = cc.getboolean('other', 'logging')
LOGFILE = cc.get('other', 'logfile')
# ------------------------------------------------------------------------
return D42_USER, D42_PWD, D42_URL, SW_USER, SW_PWD, SW_URL,\
DRY_RUN, DEBUG, LOGGING, os.path.join(DIR, LOGFILE)
class Asset():
def __init__(self):
pass
def get_devices_from_d42(self):
device_names = []
url = D42_URL+'/api/1.0/hardwares/'
result = json.loads(reader.get_d42_data(url))
url = D42_URL + '/api/1.0/devices/'
r = reader.get_d42_data(url)
response = json.loads(r)['Devices']
locations = self.get_locations_from_sw()
for r in response:
device_names.append(r['name'])
for dev in device_names:
url = D42_URL + '/api/1.0/devices/name/%s' % dev
device = json.loads(reader.get_d42_data(url))
if DEBUG:
print '\n-------------------------------------------'
print json.dumps(device, indent=4, sort_keys=True)
print '-------------------------------------------\n'
data = {}
model_name = device['hw_model']
device_name = device['name']
if not device_name:
print '[!] Cannot import device(asset) without name!'
break
if model_name:
hwid = self.get_hwid(model_name.lower())
data.update({'model': {'id':hwid}})
try:
mac = device['ip_addresses'][0]['macaddress']
data.update({'macAddress':mac})
except KeyError:
pass
try:
ip = device['ip_addresses'][0]['ip']
data.update({'networkAddress':ip})
except KeyError:
pass
try:
d42loc = data['location']
loc_id = locations[d42loc]
data.update({'location': {'id':loc_id}})
except KeyError:
pass
data.update({'notes': device['notes']})
data.update({'serialNumber': device['serial_no']})
data.update({'assetNumber' : device_name})
self.create_asset(data, device_name)
else:
'[!] Cannot create asset without model name!'
def get_locations_from_sw(self):
raw = reader.get_all_locations()
locations = {}
for x in raw:
location = x['locationName']
id = x['id']
locations.update({location : id})
return locations
def get_hwid(self, model_name):
"""
Get hardware model ID from WHD by name.
"""
raw = reader.search_sw_data('Models', 'modelName', model_name)
return raw[0]['id']
def create_asset(self, data, asset_no):
print '[!] Creating asset with asset number: "%s"' % asset_no
uploader.upload_data('Assets', json.dumps(data))
def sync_buildings():
"""
Name mappings:
Device42 : "Buildings"
Solarwinds : "Locations"
"""
buildings = reader.get_d42_data(D42_URL+'/api/1.0/buildings/')
for building in json.loads(buildings)['buildings']:
data = {}
name = building['name'].lower()
data.update({'address' :building['address']})
data.update({'locationName':name})
data.update({'note' :building['notes']})
data.update({'phone' :building['contact_phone']})
result = reader.search_sw_data('Locations','locationName', name)
if result:
print '[!] Location "%s" already exists. Updating...' % name
uploader.update_data('Locations', json.dumps(data))
else:
print '[!] Location "%s" does not exist. Creating...' % name
uploader.upload_data('Locations', json.dumps(data))
def sync_manufacturers():
"""
Name mappings:
Device42 : "Vendors"
Solarwinds : "Manufacturers"
"""
url = D42_URL+'/api/1.0/vendors/'
vendors = reader.get_d42_data(url)
for vendor in json.loads(vendors)['vendors']:
data = {}
name = vendor['name'].lower()
data.update({"fullName" : name})
data.update({"url" : vendor['home_page']})
try:
result = reader.search_sw_data('Manufacturers','fullName', name)
exists = True
id = json.loads(json.dumps(result))[0]['id']
except:
exists = None
id = None
if bool(exists) == False:
print '[!] Manufacturer "%s" does not exist. Creating...' % name
uploader.upload_data('Manufacturers', json.dumps(data))
else:
print '[!] Manufacturer "%s" already exists. Updating...' % name
uploader.update_data('Manufacturers', json.dumps(data))
def sync_asset_types():
"""
Name mappings:
Device42 : "Type"
Solarwinds : "Asset Type"
"""
categories = [
u'unknown',
'physical',
'virtual',
'blade',
'cluster',
'other'
]
#get existing categories
result = reader.get_all_types()
sw_types = [x['assetType'] for x in result]
# get diff
diff = set(categories) - set(sw_types)
for type in diff:
uploader.upload_data('AssetTypes', json.dumps({'assetType':type}))
def sync_models():
"""
Name mappings:
Device42 : "Hardware"
Solarwinds : "Models"
"""
# get list of known manufacturers
result = reader.get_all_manufacturers()
all_manufacturers = {}
for manufacturer in result:
name = manufacturer['fullName']
if name:
name = name.lower()
manid = manufacturer['manufacturerId']
all_manufacturers.update({name:manid})
#get all asset types from solarwind
types = reader.get_all_types()
all_types = {}
for type in types:
t = type['assetType']
i = type['id']
all_types.update({t:i})
url = D42_URL+'/api/1.0/hardwares/'
result = json.loads(reader.get_d42_data(url))
models = result['models']
for model in models:
name = model['name'].lower()
manufacturer = model['manufacturer'].lower()
type = model['type'].lower()
if type != '':
typeid = all_types[type]
else:
type = 'unknown'
typeid = all_types[type]
manid = all_manufacturers.get(manufacturer)
data = {"modelName" : name,
"assettype" : {"id":typeid, "type": type},
"manufacturer": { "id": manid, "type": manufacturer }
}
#print data
print '[!] Syncing model "%s"' % name
uploader.upload_data('Models', json.dumps(data))
def main():
print '\n'
sync_buildings()
sync_manufacturers()
sync_asset_types()
sync_models()
asset = Asset()
asset.get_devices_from_d42()
if __name__ == '__main__':
utility = Utility()
reader = Reader()
uploader = Uploader()
D42_USER, D42_PWD, D42_URL, \
SW_USER, SW_PWD, SW_URL,\
DRY_RUN, DEBUG, LOGGING, LOGFILE = utility.read_config()
main()
sys.exit()
|
device42/SolarwindsWHD
|
d42_sync_tool.py
|
Python
|
mit
| 12,130
|
import os
import sys
import numpy as np
from theano import config
from pandas import DataFrame
from seizure.cnn.conv_net import ConvNet
from seizure.cnn_trainer.loader import load_train, load_test
config.floatX = 'float32'
def train_and_test(patient_name, prediction_target, root_path, csv_path):
path = root_path + '/' + patient_name
d = load_train(path)
X = d['x']
Y = d['y']
scaler = d['scaler']
x_train, y_train = d['x_train'], d['y_train']
x_valid, y_valid = d['x_valid'], d['y_valid']
if prediction_target == 'seizure':
Y[np.where(Y == 2)[0]] = 1
y_train[np.where(y_train == 2)[0]] = 1
y_valid[np.where(y_valid == 2)[0]] = 1
print '============ dataset'
print 'train:', x_train.shape
print 'n_pos:', np.sum(y_train), 'n_neg:', len(y_train) - np.sum(y_train)
print 'valid:', x_valid.shape
print 'n_pos:', np.sum(y_valid), 'n_neg:', len(y_valid) - np.sum(y_valid)
print '===================='
# ----------- PARAMETERS 1+2
n_timesteps = d['n_timesteps']
dim = d['n_channels']
dropout_prob = [0.2, 0.5] # on 2 last layers
batch_size = 10
max_iter = 40000
valid_freq = 50
activation = 'tanh'
weights_variance = 0.1
l2_reg = 0.001
objective_function = 'cross_entropy'
# ----------- PARAMETERS 2
# recept_width = [25, 26]
# pool_width = [1, 1]
# nkerns = [20, 40, 128]
# stride = [1, 1]
# ----------- PARAMETERS 3
recept_width = [5, 1]
pool_width = [2, 1]
nkerns = [32, 16, 128]
stride = [5, 1]
# ----------- PARAMETERS 4
# recept_width = [15, 4]
# pool_width = [2, 1]
# nkerns = [32, 64, 128]
# stride = [5, 1]
print '======== parameters'
print 'n_timesteps:', n_timesteps
print 'n_channels:', dim
print 'max_epoch: ', max_iter
print 'valid_freq', valid_freq
print 'nkerns: ', nkerns
print 'receptive width: ', recept_width
print 'pool_width:', pool_width
print 'strides:', stride
print 'dropout_prob: ', dropout_prob
print 'batch_size:', batch_size
print 'activation:', activation
print 'L2_reg:', l2_reg
print 'weights_variance:', weights_variance
print 'objective function:', objective_function
print '===================='
cnn = ConvNet(nkerns=nkerns,
recept_width=recept_width,
pool_width=pool_width,
stride=stride,
dropout_prob=dropout_prob,
l2_reg=l2_reg,
training_batch_size=batch_size,
activation=activation,
weights_variance=weights_variance,
n_timesteps=n_timesteps,
dim=dim,
objective_function=objective_function)
best_iter_cost = cnn.validate(train_set=(x_train, y_train),
valid_set=(x_valid, y_valid),
valid_freq=valid_freq,
max_iter=max_iter)
cnn = ConvNet(nkerns=nkerns,
recept_width=recept_width,
pool_width=pool_width,
stride=stride,
dropout_prob=dropout_prob,
l2_reg=l2_reg,
training_batch_size=batch_size,
activation=activation,
weights_variance=weights_variance,
n_timesteps=n_timesteps,
dim=dim,
objective_function=objective_function)
cnn.train(train_set=(X, Y), max_iter=max(500, best_iter_cost))
# test data
d = load_test(path, scaler)
x_test = d['x']
id = d['id']
test_proba = cnn.get_test_proba(x_test)
ans = zip(id, test_proba)
df = DataFrame(data=ans, columns=['clip', prediction_target])
df.to_csv(csv_path + '/' + patient_name +
prediction_target + '.csv', index=False, header=True)
if __name__ == '__main__':
root_path = sys.argv[1]
csv_path = sys.argv[2]
if not os.path.exists(csv_path):
os.makedirs(csv_path)
names = ['Dog_1', 'Dog_2', 'Dog_3', 'Dog_4', 'Patient_1', 'Patient_2', 'Patient_3',
'Patient_4', 'Patient_5', 'Patient_6', 'Patient_7', 'Patient_8']
for patient_name in names:
print '***********************', patient_name, '***************************'
train_and_test(patient_name, 'seizure', root_path, csv_path)
|
IraKorshunova/kaggle-seizure-detection
|
seizure/cnn_trainer/main.py
|
Python
|
mit
| 4,444
|
# Copyright (c) 2016-2021 John Mihalic <https://github.com/mezz64>
# Licensed under the MIT license.
# Used this guide to create module
# http://peterdowns.com/posts/first-time-with-pypi.html
# git tag 0.1 -m "0.1 release"
# git push --tags origin master
#
# Upload to PyPI Live
# python setup.py register -r pypi
# python setup.py sdist upload -r pypi
from distutils.core import setup
setup(
name='pyHik',
packages=['pyhik'],
version='0.2.9',
description='Provides a python api to interact with a Hikvision camera event stream and toggle motion detection.',
author='John Mihalic',
author_email='mezz64@users.noreply.github.com',
license='MIT',
url='https://github.com/mezz64/pyhik',
download_url='https://github.com/mezz64/pyhik/tarball/0.2.9',
keywords=['hik', 'hikvision', 'event stream', 'events', 'api wrapper', 'homeassistant'],
classifiers=[],
)
|
mezz64/pyHik
|
setup.py
|
Python
|
mit
| 905
|
class Solution(object):
def findKthLargest(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
# 表示怀疑是中等题
return sorted(nums, reverse=True)[k - 1]
|
Junnplus/leetcode
|
algorithms/medium/kth-largest-element-in-an-array.py
|
Python
|
mit
| 242
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import configparser
import datetime
import email.encoders
import email.mime.base
import email.mime.image
import email.mime.multipart
import email.mime.text
import getpass
import optparse
import os
import smtplib
import sys
# ipa参数
ipaRootDir = "/Users/" + getpass.getuser() + "/Desktop/"
ipaFileDir = datetime.datetime.today().strftime(
"%Y-%m-%d-%H-%M-%S")
class Package():
def __init__(self):
# email参数
self.emailFromUser = None
self.emailToUser = None
self.emailPassword = None
self.emailHost = None
self.emailBodyText = None
# 项目参数
self.projectTargetName = None
self.projectChangeLog = None
self.isWorkSpace = False
self.keychainPassword = None
self.isExcuteStepByStep = False
def gitPull(self):
print('*========================*')
print('Git Pull Start')
os.system('git pull origin dev')
if self.isExcuteStepByStep:
input('Press Any Key To Continue')
def mkdir(self):
if not os.path.exists(ipaRootDir + ipaFileDir):
os.system('cd %s;mkdir %s' % (ipaRootDir, ipaFileDir))
os.system("chmod -R 777 %s" % ipaRootDir + ipaFileDir)
def keychainUnlock(self):
os.system("security unlock-keychain -p '%s' %s" %
(self.keychainPassword, "~/Library/Keychains/login.keychain"))
def getConfig(self):
if not os.path.exists('Setting.ini'):
print('*========================*')
print('Please Input Your Setting')
self.setConfig('Setting.ini')
else:
try:
config = configparser.ConfigParser()
config.read('Setting.ini')
self.emailFromUser = config.get('Settings', 'emailFromUser')
self.emailToUser = config.get('Settings', 'emailToUser')
self.emailPassword = config.get('Settings', 'emailPassword')
self.emailHost = config.get('Settings', 'emailHost')
self.keychainPassword = config.get(
'Settings', 'keychainPassword')
except Exception as e:
raise e
finally:
print('*========================*')
print('Your Setting:')
print('emailFromUser:' + self.emailFromUser)
print('emailToUser:' + self.emailToUser)
print('emailHost:' + self.emailHost)
global ipaFileDir
ipaFileDir += ('-' + self.projectTargetName + '/')
def setConfig(self, path):
self.emailFromUser = input('Input EmailFromUser:')
self.emailToUser = input('Input EmailToUser:')
self.emailPassword = input('Input EmailPassword:')
self.emailHost = input('Input EmailHost:')
self.keychainPassword = input('Input KeychainPassword:')
if self.emailFromUser == '' or self.emailToUser == '' or self.emailPassword == '' or self.emailHost == '':
raise ValueError('Please Enter Valid Setting')
config = configparser.ConfigParser()
config.add_section('Settings')
config.set('Settings', 'emailFromUser', self.emailFromUser)
config.set('Settings', 'emailToUser', self.emailToUser)
config.set('Settings', 'emailPassword', self.emailPassword)
config.set('Settings', 'emailHost', self.emailHost)
config.set('Settings', 'keychainPassword', self.keychainPassword)
try:
os.system('touch Setting.ini')
with open(path, 'w') as fileHandler:
config.write(fileHandler)
except Exception as e:
raise e
def removeConfig(self):
if 'Setting.ini' in os.listdir():
os.system('rm Setting.ini')
def setOptParse(self):
p = optparse.OptionParser()
p.add_option("-m", "--message", action="store",
default=None, help="enter email body text")
p.add_option("-r", "--remove", action="store_true",
default=None, help="remove config file")
p.add_option("-c", "--changelog", action="store",
default=None, help="enter changelog")
p.add_option("-s", "--step", action="store_true",
default=None, help="excute step by step")
options, arguments = p.parse_args()
if options.message:
self.emailBodyText = options.message
if options.remove:
removeConfig()
if options.changelog:
self.projectChangeLog = options.changelog
else:
raise ValueError('Please Enter The ChangeLog')
if options.step:
self.isExcuteStepByStep = True
def getTargetName(self):
dirs = os.listdir(os.getcwd())
for file in dirs:
if '.xcodeproj' in file:
name, extend = file.split('.')
self.projectTargetName = name
if '.xcworkspace' in file:
self.isWorkSpace = True
if not self.projectTargetName:
raise Exception('Can Not Find .xcodeproj file')
print('*========================*')
print('TargetName:%s' % (self.projectTargetName))
def getTargetVersion(self):
def plistBuddy(plistFilePath):
plistFilePath = plistFilePath.replace(' ', '\\ ')
ret = os.popen('/usr/libexec/PlistBuddy -c "Print CFBundleShortVersionString" %s' %
plistFilePath)
projectVersion = ret.readline().replace('\n', '')
ret = os.popen('/usr/libexec/PlistBuddy -c "Print CFBundleDisplayName" %s' %
plistFilePath)
projectDisplayName = ret.readline().replace('\n', '')
ret = os.popen('/usr/libexec/PlistBuddy -c "Print CFBundleVersion" %s' %
plistFilePath)
projectBuildVersion = ret.readline().replace('\n', '')
return (projectDisplayName, projectVersion, projectBuildVersion)
rootDirs = os.listdir('./%s' % self.projectTargetName)
plistFilePath = None
for subDir in rootDirs:
if "Info.plist" in subDir:
plistFilePath = ('./%s/Info.plist' % self.projectTargetName)
return plistBuddy(plistFilePath)
elif os.path.isdir('./%s/%s' % (self.projectTargetName, subDir)):
childDirs = os.listdir('./%s/%s' %
(self.projectTargetName, subDir))
for subChildDirs in childDirs:
if "Info.plist" in subChildDirs:
plistFilePath = ('./%s/%s/Info.plist' %
(self.projectTargetName, subDir))
return plistBuddy(plistFilePath)
def cleanProject(self):
print('*========================*')
print('Clean Project Start')
if self.isWorkSpace:
os.system('xcodebuild -workspace %(x)s.xcworkspace -scheme %(x)s clean' %
{'x': self.projectTargetName})
else:
os.system('xcodebuild clean')
if self.isExcuteStepByStep:
input('Press Any Key To Continue')
def buildProject(self):
print('*========================*')
print('Build Project Start')
if self.isWorkSpace:
os.system('xcodebuild -workspace %(x)s.xcworkspace -scheme %(x)s build' %
{'x': self.projectTargetName})
else:
os.system('xcodebuild build')
if self.isExcuteStepByStep:
input('Press Any Key To Continue')
def archiveProject(self):
print('*========================*')
print('Archive Project Start')
if self.isWorkSpace:
os.system('fir build_ipa %(x)s.xcworkspace -o %(y)s -w -S %(x)s' %
{'x': self.projectTargetName, 'y': ipaRootDir + ipaFileDir})
else:
os.system('fir build_ipa %(x)s.xcodeproj -o %(y)s' %
{'x': self.projectTargetName, 'y': ipaRootDir + ipaFileDir})
if self.isExcuteStepByStep:
input('Press Any Key To Continue')
def uploadToFir(self):
print('*========================*')
print('UploadToFir Project Start')
dirs = os.listdir(ipaRootDir + ipaFileDir)
downloadUrl = None
for file in dirs:
if '.ipa' in file:
ipaPath = ipaRootDir + ipaFileDir + file
ret = os.popen('fir publish %(x)s -c "%(y)s" -Q' %
{'x': ipaPath, 'y': self.projectChangeLog})
for info in ret.readlines():
if "Published succeed" in info:
downloadUrl = info[info.find('http'):]
return downloadUrl
def sendMail(self, to_addr, from_addr, subject, body_text, downloadUrl):
print('*========================*')
print('Send Mail Start')
msg = email.mime.multipart.MIMEMultipart()
msg['from'] = from_addr
msg['to'] = to_addr
msg['subject'] = '_'.join(subject)
print('To:', msg['to'])
if body_text:
emailContent = (subject[0] + ':' + '\n' + '\t' +
self.projectChangeLog + '\n' + '\t' + body_text + '\n' + '\t' + downloadUrl + '\n')
else:
emailContent = (subject[0] + ':' + '\n' + '\t' +
self.projectChangeLog + '\n' + '\t' + downloadUrl + '\n')
txt = email.mime.text.MIMEText(emailContent)
msg.attach(txt)
dirs = os.listdir(ipaRootDir + ipaFileDir)
for file in dirs:
if '.png' in file:
with open(ipaRootDir + ipaFileDir + file, 'rb') as fileHandler:
image = email.mime.base.MIMEBase(
'image', 'png', filename=file)
image.add_header('Content-Disposition',
'attachment', filename=file)
image.add_header('Content-ID', '<0>')
image.add_header('X-Attachment-Id', '0')
image.set_payload(fileHandler.read())
email.encoders.encode_base64(image)
msg.attach(image)
break
server = smtplib.SMTP(self.emailHost)
server.login(from_addr, self.emailPassword)
server.sendmail(from_addr, to_addr.split(','), str(msg))
server.quit()
print('Create Ipa Finish')
print('*========================*')
def start(self):
print('*========================*')
print('Create Ipa Start')
# 获取参数
self.setOptParse()
# 获取项目名称
self.getTargetName()
# 获取版本信息
projectInfo = self.getTargetVersion()
# 获取配置文件
self.getConfig()
# 生成打包文件所在文件夹
self.mkdir()
# 解锁钥匙串
self.keychainUnlock()
# 获取最新代码
self.gitPull()
# 清理工程
self.cleanProject()
# 编译
self.buildProject()
# 打包
self.archiveProject()
# 上传到fir
downloadUrl = self.uploadToFir()
# 发送邮件
self.sendMail(self.emailToUser, self.emailFromUser,
projectInfo, self.emailBodyText, downloadUrl)
if __name__ == '__main__':
package = Package()
package.start()
|
atbj505/pythonIpa
|
pythonIpa/package.py
|
Python
|
mit
| 11,636
|
#
# Definition for a binary tree node
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# http://www.geeksforgeeks.org/inorder-tree-traversal-without-recursion-and-without-stack/
#
class BSTIterator(object):
def __init__(self, root):
"""
:type root: TreeNode
"""
self.p = root
def hasNext(self):
"""
:rtype: bool
"""
return self.p is not None
def next(self):
"""
:rtype: int
"""
out = None
while self.p:
if not self.p.left:
out = self.p.val
self.p = self.p.right
break
else:
tmp = self.p.left
while tmp.right and tmp.right != self.p:
tmp = tmp.right
if not tmp.right:
tmp.right = self.p
self.p = self.p.left
else:
out = self.p.val
tmp.right = None
self.p = self.p.right
break
return out
# Your BSTIterator will be called like this:
# i, v = BSTIterator(root), []
# while i.hasNext(): v.append(i.next())
|
hs634/algorithms
|
python/trees/morris_traversal.py
|
Python
|
mit
| 1,400
|
# Copyright 2001 Brad Chapman.
# Revisions copyright 2009-2010 by Peter Cock.
# Revisions copyright 2010 by Phillip Garland.
# All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Definitions for interacting with BLAST related applications.
Wrappers for the new NCBI BLAST+ tools (written in C++):
- NcbiblastpCommandline - Protein-Protein BLAST
- NcbiblastnCommandline - Nucleotide-Nucleotide BLAST
- NcbiblastxCommandline - Translated Query-Protein Subject BLAST
- NcbitblastnCommandline - Protein Query-Translated Subject BLAST
- NcbitblastxCommandline - Translated Query-Protein Subject BLAST
- NcbipsiblastCommandline - Position-Specific Initiated BLAST
- NcbirpsblastCommandline - Reverse Position Specific BLAST
- NcbirpstblastnCommandline - Translated Reverse Position Specific BLAST
- NcbideltablastCommandline - Protein-Protein domain enhanced lookup time accelerated blast
- NcbiblastformatterCommandline - Convert ASN.1 to other BLAST output formats
For further details, see:
Camacho et al. BLAST+: architecture and applications
BMC Bioinformatics 2009, 10:421
doi:10.1186/1471-2105-10-421
"""
from __future__ import print_function
from Bio.Application import _Option, AbstractCommandline, _Switch
class _NcbibaseblastCommandline(AbstractCommandline):
"""Base Commandline object for (new) NCBI BLAST+ wrappers (PRIVATE).
This is provided for subclassing, it deals with shared options
common to all the BLAST tools (blastn, rpsblast, rpsblast, etc
AND blast_formatter).
"""
def __init__(self, cmd=None, **kwargs):
assert cmd is not None
extra_parameters = [
# Core:
_Switch(["-h", "h"],
"Print USAGE and DESCRIPTION; ignore other arguments."),
_Switch(["-help", "help"],
"Print USAGE, DESCRIPTION and ARGUMENTS description; "
"ignore other arguments."),
_Switch(["-version", "version"],
"Print version number; ignore other arguments."),
# Output configuration options
_Option(["-out", "out"],
"Output file for alignment.",
filename=True,
equate=False),
# Formatting options:
_Option(["-outfmt", "outfmt"],
"Alignment view. Integer 0-11. Use 5 for XML output "
"(differs from classic BLAST which used 7 for XML).",
equate=False),
# TODO - Document and test the column options
_Switch(["-show_gis", "show_gis"],
"Show NCBI GIs in deflines?"),
_Option(["-num_descriptions", "num_descriptions"],
"""Number of database sequences to show one-line descriptions for.
Integer argument (at least zero). Default is 500.
See also num_alignments.""",
equate=False),
_Option(["-num_alignments", "num_alignments"],
"""Number of database sequences to show num_alignments for.
Integer argument (at least zero). Default is 200.
See also num_alignments.""",
equate=False),
_Option(["-line_length", "line_length"],
"""Line length for formatting alignments (integer, at least 1, default 60).
Not applicable for outfmt > 4.
Added in BLAST+ 2.2.30.
""",
equate=False),
_Switch(["-html", "html"],
"Produce HTML output? See also the outfmt option."),
# Miscellaneous options
_Switch(["-parse_deflines", "parse_deflines"],
"Should the query and subject defline(s) be parsed?"),
]
try:
# Insert extra parameters - at the start just in case there
# are any arguments which must come last:
self.parameters = extra_parameters + self.parameters
except AttributeError:
# Should we raise an error? The subclass should have set this up!
self.parameters = extra_parameters
AbstractCommandline.__init__(self, cmd, **kwargs)
def _validate_incompatibilities(self, incompatibles):
"""Used by the BLAST+ _validate method (PRIVATE)."""
for a in incompatibles:
if self._get_parameter(a):
for b in incompatibles[a]:
if self._get_parameter(b):
raise ValueError("Options %s and %s are incompatible."
% (a, b))
class _NcbiblastCommandline(_NcbibaseblastCommandline):
"""Base Commandline object for (new) NCBI BLAST+ wrappers (PRIVATE).
This is provided for subclassing, it deals with shared options
common to all the BLAST tools (blastn, rpsblast, rpsblast, etc).
"""
def __init__(self, cmd=None, **kwargs):
assert cmd is not None
extra_parameters = [
# Input query options:
_Option(["-query", "query"],
"The sequence to search with.",
filename=True,
equate=False), # Should this be required?
_Option(["-query_loc", "query_loc"],
"Location on the query sequence (Format: start-stop)",
equate=False),
# General search options:
_Option(["-db", "db"],
"The database to BLAST against.",
equate=False),
_Option(["-evalue", "evalue"],
"Expectation value cutoff.",
equate=False),
_Option(["-word_size", "word_size"],
"""Word size for wordfinder algorithm.
Integer. Minimum 2.""",
equate=False),
# BLAST-2-Sequences options:
# - see subclass
# Formatting options:
# - see baseclass
# Query filtering options
_Option(["-soft_masking", "soft_masking"],
"Apply filtering locations as soft masks (Boolean, Default = true)",
equate=False),
_Switch(["-lcase_masking", "lcase_masking"],
"Use lower case filtering in query and subject sequence(s)?"),
# Restrict search or results
_Option(["-gilist", "gilist"],
"""Restrict search of database to list of GI's.
Incompatible with: negative_gilist, seqidlist, remote, subject, subject_loc""",
filename=True,
equate=False),
_Option(["-negative_gilist", "negative_gilist"],
"""Restrict search of database to everything except the listed GIs.
Incompatible with: gilist, seqidlist, remote, subject, subject_loc""",
filename=True,
equate=False),
_Option(["-seqidlist", "seqidlist"],
"""Restrict search of database to list of SeqID's.
Incompatible with: gilist, negative_gilist, remote, subject, subject_loc""",
filename=True,
equate=False),
_Option(["-entrez_query", "entrez_query"],
"Restrict search with the given Entrez query (requires remote).",
equate=False),
_Option(["-qcov_hsp_perc", "qcov_hsp_perc"],
"""Percent query coverage per hsp (float, 0 to 100).
Added in BLAST+ 2.2.30.
""",
equate=False),
_Option(["-max_target_seqs", "max_target_seqs"],
"Maximum number of aligned sequences to keep (integer, at least one).",
equate=False),
# Statistical options
_Option(["-dbsize", "dbsize"],
"Effective length of the database (integer).",
equate=False),
_Option(["-searchsp", "searchsp"],
"Effective length of the search space (integer).",
equate=False),
_Option(["-max_hsps_per_subject", "max_hsps_per_subject"],
"Override max number of HSPs per subject saved for ungapped searches (integer).",
equate=False),
_Option(["-max_hsps", "max_hsps"],
"Set max number of HSPs saved per subject sequence (default 0 means no limit).",
equate=False),
_Switch(["-sum_statistics", "sum_statistics"],
"Use sum statistics."),
# Is -sum_stats a BLAST+ bug, why not use -sum_statistics switch?
_Option(["-sum_stats", "sum_stats"],
"""Use sum statistics (boolean).
Added in BLAST+ 2.2.30.
""",
equate=False),
# Extension options
_Option(["-xdrop_ungap", "xdrop_ungap"],
"X-dropoff value (in bits) for ungapped extensions (float).",
equate=False),
_Option(["-xdrop_gap", "xdrop_gap"],
"X-dropoff value (in bits) for preliminary gapped extensions (float).",
equate=False),
_Option(["-xdrop_gap_final", "xdrop_gap_final"],
"X-dropoff value (in bits) for final gapped alignment (float).",
equate=False),
_Option(["-window_size", "window_size"],
"Multiple hits window size, use 0 to specify 1-hit algorithm (integer).",
equate=False),
# Search strategy options
_Option(["-import_search_strategy", "import_search_strategy"],
"""Search strategy to use.
Incompatible with: export_search_strategy""",
filename=True,
equate=False),
_Option(["-export_search_strategy", "export_search_strategy"],
"""File name to record the search strategy used.
Incompatible with: import_search_strategy""",
filename=True,
equate=False),
# Miscellaneous options
_Option(["-num_threads", "num_threads"],
"""Number of threads to use in the BLAST search (integer, at least one).
Default is one.
Incompatible with: remote""",
equate=False),
_Switch(["-remote", "remote"],
"""Execute search remotely?
Incompatible with: gilist, negative_gilist, subject_loc, num_threads, ..."""),
]
try:
# Insert extra parameters - at the start just in case there
# are any arguments which must come last:
self.parameters = extra_parameters + self.parameters
except AttributeError:
# Should we raise an error? The subclass should have set this up!
self.parameters = extra_parameters
_NcbibaseblastCommandline.__init__(self, cmd, **kwargs)
def _validate(self):
incompatibles = {"remote": ["gilist", "negative_gilist", "num_threads"],
"import_search_strategy": ["export_search_strategy"],
"gilist": ["negative_gilist"],
"seqidlist": ["gilist", "negative_gilist", "remote"]}
self._validate_incompatibilities(incompatibles)
if self.entrez_query and not self.remote:
raise ValueError("Option entrez_query requires remote option.")
AbstractCommandline._validate(self)
class _Ncbiblast2SeqCommandline(_NcbiblastCommandline):
"""Base Commandline object for (new) NCBI BLAST+ wrappers (PRIVATE).
This is provided for subclassing, it deals with shared options
common to all the BLAST tools supporting two-sequence BLAST
(blastn, psiblast, etc) but not rpsblast or rpstblastn.
"""
def __init__(self, cmd=None, **kwargs):
assert cmd is not None
extra_parameters = [
# General search options:
_Option(["-gapopen", "gapopen"],
"Cost to open a gap (integer).",
equate=False),
_Option(["-gapextend", "gapextend"],
"Cost to extend a gap (integer).",
equate=False),
# BLAST-2-Sequences options:
_Option(["-subject", "subject"],
"""Subject sequence(s) to search.
Incompatible with: db, gilist, negative_gilist.
See also subject_loc.""",
filename=True,
equate=False),
_Option(["-subject_loc", "subject_loc"],
"""Location on the subject sequence (Format: start-stop).
Incompatible with: db, gilist, seqidlist, negative_gilist,
db_soft_mask, db_hard_mask, remote.
See also subject.""",
equate=False),
# Restrict search or results:
_Option(["-culling_limit", "culling_limit"],
"""Hit culling limit (integer).
If the query range of a hit is enveloped by that of at
least this many higher-scoring hits, delete the hit.
Incompatible with: best_hit_overhang, best_hit_score_edge.
""",
equate=False),
_Option(["-best_hit_overhang", "best_hit_overhang"],
"""Best Hit algorithm overhang value (float, recommended value: 0.1)
Float between 0.0 and 0.5 inclusive.
Incompatible with: culling_limit.""",
equate=False),
_Option(["-best_hit_score_edge", "best_hit_score_edge"],
"""Best Hit algorithm score edge value (float, recommended value: 0.1)
Float between 0.0 and 0.5 inclusive.
Incompatible with: culling_limit.""",
equate=False),
]
try:
# Insert extra parameters - at the start just in case there
# are any arguments which must come last:
self.parameters = extra_parameters + self.parameters
except AttributeError:
# Should we raise an error? The subclass should have set this up!
self.parameters = extra_parameters
_NcbiblastCommandline.__init__(self, cmd, **kwargs)
def _validate(self):
incompatibles = {"subject_loc": ["db", "gilist", "negative_gilist", "seqidlist", "remote"],
"culling_limit": ["best_hit_overhang", "best_hit_score_edge"],
"subject": ["db", "gilist", "negative_gilist", "seqidlist"]}
self._validate_incompatibilities(incompatibles)
_NcbiblastCommandline._validate(self)
class _NcbiblastMain2SeqCommandline(_Ncbiblast2SeqCommandline):
"""Base Commandline object for (new) NCBI BLAST+ wrappers (PRIVATE).
This is provided for subclassing, it deals with shared options
common to the main BLAST tools blastp, blastn, blastx, tblastx, tblastn
but not psiblast, rpsblast or rpstblastn.
"""
def __init__(self, cmd=None, **kwargs):
assert cmd is not None
extra_parameters = [
# Restrict search or results:
_Option(["-db_soft_mask", "db_soft_mask"],
"""Filtering algorithm for soft masking (integer).
Filtering algorithm ID to apply to the BLAST database as soft masking.
Incompatible with: db_hard_mask, subject, subject_loc""",
equate=False),
_Option(["-db_hard_mask", "db_hard_mask"],
"""Filtering algorithm for hard masking (integer).
Filtering algorithm ID to apply to the BLAST database as hard masking.
Incompatible with: db_soft_mask, subject, subject_loc""",
equate=False),
]
try:
# Insert extra parameters - at the start just in case there
# are any arguments which must come last:
self.parameters = extra_parameters + self.parameters
except AttributeError:
# Should we raise an error? The subclass should have set this up!
self.parameters = extra_parameters
_Ncbiblast2SeqCommandline.__init__(self, cmd, **kwargs)
def _validate(self):
incompatibles = {"db_soft_mask": ["db_hard_mask", "subject", "subject_loc"],
"db_hard_mask": ["db_soft_mask", "subject", "subject_loc"]}
self._validate_incompatibilities(incompatibles)
_Ncbiblast2SeqCommandline._validate(self)
class NcbiblastpCommandline(_NcbiblastMain2SeqCommandline):
"""Create a commandline for the NCBI BLAST+ program blastp (for proteins).
With the release of BLAST+ (BLAST rewritten in C++ instead of C), the NCBI
replaced the old blastall tool with separate tools for each of the searches.
This wrapper therefore replaces BlastallCommandline with option -p blastp.
>>> from Bio.Blast.Applications import NcbiblastpCommandline
>>> cline = NcbiblastpCommandline(query="rosemary.pro", db="nr",
... evalue=0.001, remote=True, ungapped=True)
>>> cline
NcbiblastpCommandline(cmd='blastp', query='rosemary.pro', db='nr', evalue=0.001, remote=True, ungapped=True)
>>> print(cline)
blastp -query rosemary.pro -db nr -evalue 0.001 -remote -ungapped
You would typically run the command line with cline() or via the Python
subprocess module, as described in the Biopython tutorial.
"""
def __init__(self, cmd="blastp", **kwargs):
self.parameters = [
# General search options:
_Option(["-task", "task"],
"Task to execute (string, blastp (default), blastp-fast or blastp-short).",
checker_function=lambda value: value in ["blastp",
"blastp-fast",
"blastp-short"],
equate=False),
_Option(["-matrix", "matrix"],
"Scoring matrix name (default BLOSUM62)."),
_Option(["-threshold", "threshold"],
"Minimum score for words to be added to the BLAST lookup table (float).",
equate=False),
_Option(["-comp_based_stats", "comp_based_stats"],
"""Use composition-based statistics (string, default 2, i.e. True).
0, F or f: no composition-based statistics
2, T or t, D or d : Composition-based score adjustment as in
Bioinformatics 21:902-911, 2005, conditioned on sequence properties
Note that tblastn also supports values of 1 and 3.""",
checker_function=lambda value: value in "0Ft2TtDd",
equate=False),
# Query filtering options:
_Option(["-seg", "seg"],
"""Filter query sequence with SEG (string).
Format: "yes", "window locut hicut", or "no" to disable.
Default is "12 2.2 2.5""",
equate=False),
# Extension options:
_Switch(["-ungapped", "ungapped"],
"Perform ungapped alignment only?"),
# Miscellaneous options:
_Switch(["-use_sw_tback", "use_sw_tback"],
"Compute locally optimal Smith-Waterman alignments?"),
]
_NcbiblastMain2SeqCommandline.__init__(self, cmd, **kwargs)
class NcbiblastnCommandline(_NcbiblastMain2SeqCommandline):
"""Wrapper for the NCBI BLAST+ program blastn (for nucleotides).
With the release of BLAST+ (BLAST rewritten in C++ instead of C), the NCBI
replaced the old blastall tool with separate tools for each of the searches.
This wrapper therefore replaces BlastallCommandline with option -p blastn.
For example, to run a search against the "nt" nucleotide database using the
FASTA nucleotide file "m_code.fasta" as the query, with an expectation value
cut off of 0.001, saving the output to a file in XML format:
>>> from Bio.Blast.Applications import NcbiblastnCommandline
>>> cline = NcbiblastnCommandline(query="m_cold.fasta", db="nt", strand="plus",
... evalue=0.001, out="m_cold.xml", outfmt=5)
>>> cline
NcbiblastnCommandline(cmd='blastn', out='m_cold.xml', outfmt=5, query='m_cold.fasta', db='nt', evalue=0.001, strand='plus')
>>> print(cline)
blastn -out m_cold.xml -outfmt 5 -query m_cold.fasta -db nt -evalue 0.001 -strand plus
You would typically run the command line with cline() or via the Python
subprocess module, as described in the Biopython tutorial.
"""
def __init__(self, cmd="blastn", **kwargs):
self.parameters = [
# Input query options:
_Option(["-strand", "strand"],
"""Query strand(s) to search against database/subject.
Values allowed are "both" (default), "minus", "plus".""",
checker_function=lambda value: value in ["both",
"minus",
"plus"],
equate=False),
# General search options:
_Option(["-task", "task"],
"""Task to execute (string, default 'megablast')
Allowed values 'blastn', 'blastn-short', 'dc-megablast', 'megablast'
(the default), or 'vecscreen'.""",
checker_function=lambda value: value in ['blastn',
'blastn-short',
'dc-megablast',
'megablast',
'vecscreen'],
equate=False),
_Option(["-penalty", "penalty"],
"Penalty for a nucleotide mismatch (integer, at most zero).",
equate=False),
_Option(["-reward", "reward"],
"Reward for a nucleotide match (integer, at least zero).",
equate=False),
_Option(["-use_index", "use_index"],
"Use MegaBLAST database index (Boolean, Default = False)",
equate=False),
_Option(["-index_name", "index_name"],
"MegaBLAST database index name.",
equate=False),
# Query filtering options:
_Option(["-dust", "dust"],
"""Filter query sequence with DUST (string).
Format: 'yes', 'level window linker', or 'no' to disable.
Default = '20 64 1'.
""",
equate=False),
_Option(["-filtering_db", "filtering_db"],
"BLAST database containing filtering elements (i.e. repeats).",
equate=False),
_Option(["-window_masker_taxid", "window_masker_taxid"],
"Enable WindowMasker filtering using a Taxonomic ID (integer).",
equate=False),
_Option(["-window_masker_db", "window_masker_db"],
"Enable WindowMasker filtering using this repeats database (string).",
equate=False),
# Restrict search or results:
_Option(["-perc_identity", "perc_identity"],
"Percent identity (real, 0 to 100 inclusive).",
equate=False),
# Discontiguous MegaBLAST options
_Option(["-template_type", "template_type"],
"""Discontiguous MegaBLAST template type (string).
Allowed values: 'coding', 'coding_and_optimal' or 'optimal'
Requires: template_length.""",
checker_function=lambda value: value in ['coding', 'coding_and_optimal', 'optimal'],
equate=False),
_Option(["-template_length", "template_length"],
"""Discontiguous MegaBLAST template length (integer).
Allowed values: 16, 18, 21
Requires: template_type.""",
checker_function=lambda value: value in [16, 18, 21, '16', '18', '21'],
equate=False),
# Extension options:
_Switch(["-no_greedy", "no_greedy"],
"Use non-greedy dynamic programming extension"),
_Option(["-min_raw_gapped_score", "min_raw_gapped_score"],
"Minimum raw gapped score to keep an alignment in the "
"preliminary gapped and traceback stages (integer).",
equate=False),
_Switch(["-ungapped", "ungapped"],
"Perform ungapped alignment only?"),
_Option(["-off_diagonal_range", "off_diagonal_range"],
"""Number of off-diagonals to search for the 2nd hit (integer).
Expects a positive integer, or 0 (default) to turn off.
Added in BLAST 2.2.23+
""",
equate=False),
]
_NcbiblastMain2SeqCommandline.__init__(self, cmd, **kwargs)
def _validate(self):
if (self.template_type and not self.template_length) \
or (self.template_length and not self.template_type):
raise ValueError("Options template_type and template_type require each other.")
_NcbiblastMain2SeqCommandline._validate(self)
class NcbiblastxCommandline(_NcbiblastMain2SeqCommandline):
"""Wrapper for the NCBI BLAST+ program blastx (nucleotide query, protein database).
With the release of BLAST+ (BLAST rewritten in C++ instead of C), the NCBI
replaced the old blastall tool with separate tools for each of the searches.
This wrapper therefore replaces BlastallCommandline with option -p blastx.
>>> from Bio.Blast.Applications import NcbiblastxCommandline
>>> cline = NcbiblastxCommandline(query="m_cold.fasta", db="nr", evalue=0.001)
>>> cline
NcbiblastxCommandline(cmd='blastx', query='m_cold.fasta', db='nr', evalue=0.001)
>>> print(cline)
blastx -query m_cold.fasta -db nr -evalue 0.001
You would typically run the command line with cline() or via the Python
subprocess module, as described in the Biopython tutorial.
"""
def __init__(self, cmd="blastx", **kwargs):
self.parameters = [
# Input query options:
_Option(["-task", "task"],
"Task to execute (string, blastx (default) or blastx-fast).",
checker_function=lambda value: value in ["blastx",
"blastx-fast"],
equate=False),
_Option(["-strand", "strand"],
"""Query strand(s) to search against database/subject.
Values allowed are "both" (default), "minus", "plus".""",
checker_function=lambda value: value in ["both", "minus", "plus"],
equate=False),
# Input query options:
_Option(["-query_gencode", "query_gencode"],
"Genetic code to use to translate query (integer, default 1).",
equate=False),
# General search options:
_Option(["-frame_shift_penalty", "frame_shift_penalty"],
"""Frame shift penalty (integer, at least 1, default ignored) (OBSOLETE).
This was removed in BLAST 2.2.27+""",
equate=False),
_Option(["-max_intron_length", "max_intron_length"],
"""Maximum intron length (integer).
Length of the largest intron allowed in a translated nucleotide
sequence when linking multiple distinct alignments (a negative
value disables linking). Default zero.""",
equate=False),
_Option(["-matrix", "matrix"],
"Scoring matrix name (default BLOSUM62).",
equate=False),
_Option(["-threshold", "threshold"],
"Minimum score for words to be added to the BLAST lookup table (float).",
equate=False),
_Option(["-comp_based_stats", "comp_based_stats"],
"""Use composition-based statistics for blastp, blastx, or tblastn:
D or d: default (equivalent to 2 )
0 or F or f: no composition-based statistics
1: Composition-based statistics as in NAR 29:2994-3005, 2001
2 or T or t : Composition-based score adjustment as in
Bioinformatics 21:902-911, 2005, conditioned on sequence properties
3: Composition-based score adjustment as in
Bioinformatics 21:902-911, 2005, unconditionally
For programs other than tblastn, must either be absent or be D, F or 0
Default = `2'
""",
equate=False),
# Query filtering options:
_Option(["-seg", "seg"],
"""Filter query sequence with SEG (string).
Format: "yes", "window locut hicut", or "no" to disable.
Default is "12 2.2 2.5""",
equate=False),
# Extension options:
_Switch(["-ungapped", "ungapped"],
"Perform ungapped alignment only?"),
_Switch(["-use_sw_tback", "use_sw_tback"],
"Compute locally optimal Smith-Waterman alignments?"),
]
_NcbiblastMain2SeqCommandline.__init__(self, cmd, **kwargs)
class NcbitblastnCommandline(_NcbiblastMain2SeqCommandline):
"""Wrapper for the NCBI BLAST+ program tblastn.
With the release of BLAST+ (BLAST rewritten in C++ instead of C), the NCBI
replaced the old blastall tool with separate tools for each of the searches.
This wrapper therefore replaces BlastallCommandline with option -p tblastn.
>>> from Bio.Blast.Applications import NcbitblastnCommandline
>>> cline = NcbitblastnCommandline(help=True)
>>> cline
NcbitblastnCommandline(cmd='tblastn', help=True)
>>> print(cline)
tblastn -help
You would typically run the command line with cline() or via the Python
subprocess module, as described in the Biopython tutorial.
"""
def __init__(self, cmd="tblastn", **kwargs):
self.parameters = [
# General search options:
_Option(["-task", "task"],
"Task to execute (string, tblastn (default) or tblastn-fast).",
checker_function=lambda value: value in ["tblastn",
"tblastn-fast"],
equate=False),
_Option(["-db_gencode", "db_gencode"],
"Genetic code to use to translate query (integer, default 1).",
equate=False),
_Option(["-frame_shift_penalty", "frame_shift_penalty"],
"""Frame shift penalty (integer, at least 1, default ignored) (OBSOLETE).
This was removed in BLAST 2.2.27+""",
equate=False),
_Option(["-max_intron_length", "max_intron_length"],
"""Maximum intron length (integer).
Length of the largest intron allowed in a translated nucleotide
sequence when linking multiple distinct alignments (a negative
value disables linking). Default zero.""",
equate=False),
_Option(["-matrix", "matrix"],
"Scoring matrix name (default BLOSUM62).",
equate=False),
_Option(["-threshold", "threshold"],
"Minimum score for words to be added to the BLAST lookup table (float).",
equate=False),
_Option(["-comp_based_stats", "comp_based_stats"],
"""Use composition-based statistics (string, default 2, i.e. True).
0, F or f: no composition-based statistics
1: Composition-based statistics as in NAR 29:2994-3005, 2001
2, T or t, D or d : Composition-based score adjustment as in
Bioinformatics 21:902-911, 2005, conditioned on sequence properties
3: Composition-based score adjustment as in Bioinformatics 21:902-911,
2005, unconditionally
Note that only tblastn supports values of 1 and 3.""",
checker_function=lambda value: value in "0Ft12TtDd3",
equate=False),
# Query filtering options:
_Option(["-seg", "seg"],
"""Filter query sequence with SEG (string).
Format: "yes", "window locut hicut", or "no" to disable.
Default is "12 2.2 2.5""",
equate=False),
# Extension options:
_Switch(["-ungapped", "ungapped"],
"Perform ungapped alignment only?"),
# Miscellaneous options:
_Switch(["-use_sw_tback", "use_sw_tback"],
"Compute locally optimal Smith-Waterman alignments?"),
# PSI-TBLASTN options:
_Option(["-in_pssm", "in_pssm"],
"""PSI-BLAST checkpoint file
Incompatible with: remote, query""",
filename=True,
equate=False),
]
_NcbiblastMain2SeqCommandline.__init__(self, cmd, **kwargs)
class NcbitblastxCommandline(_NcbiblastMain2SeqCommandline):
"""Wrapper for the NCBI BLAST+ program tblastx.
With the release of BLAST+ (BLAST rewritten in C++ instead of C), the NCBI
replaced the old blastall tool with separate tools for each of the searches.
This wrapper therefore replaces BlastallCommandline with option -p tblastx.
>>> from Bio.Blast.Applications import NcbitblastxCommandline
>>> cline = NcbitblastxCommandline(help=True)
>>> cline
NcbitblastxCommandline(cmd='tblastx', help=True)
>>> print(cline)
tblastx -help
You would typically run the command line with cline() or via the Python
subprocess module, as described in the Biopython tutorial.
"""
def __init__(self, cmd="tblastx", **kwargs):
self.parameters = [
# Input query options:
_Option(["-strand", "strand"],
"""Query strand(s) to search against database/subject.
Values allowed are "both" (default), "minus", "plus".""",
checker_function=lambda value: value in ["both", "minus", "plus"],
equate=False),
# Input query options:
_Option(["-query_gencode", "query_gencode"],
"Genetic code to use to translate query (integer, default 1).",
equate=False),
# General search options:
_Option(["-db_gencode", "db_gencode"],
"Genetic code to use to translate query (integer, default 1).",
equate=False),
_Option(["-max_intron_length", "max_intron_length"],
"""Maximum intron length (integer).
Length of the largest intron allowed in a translated nucleotide
sequence when linking multiple distinct alignments (a negative
value disables linking). Default zero.""",
equate=False),
_Option(["-matrix", "matrix"],
"Scoring matrix name (default BLOSUM62).",
equate=False),
_Option(["-threshold", "threshold"],
"Minimum score for words to be added to the BLAST lookup table (float).",
equate=False),
# Query filtering options:
_Option(["-seg", "seg"],
"""Filter query sequence with SEG (string).
Format: "yes", "window locut hicut", or "no" to disable.
Default is "12 2.2 2.5""",
equate=False),
]
_NcbiblastMain2SeqCommandline.__init__(self, cmd, **kwargs)
class NcbipsiblastCommandline(_Ncbiblast2SeqCommandline):
"""Wrapper for the NCBI BLAST+ program psiblast.
With the release of BLAST+ (BLAST rewritten in C++ instead of C), the NCBI
replaced the old blastpgp tool with a similar tool psiblast. This wrapper
therefore replaces BlastpgpCommandline, the wrapper for blastpgp.
>>> from Bio.Blast.Applications import NcbipsiblastCommandline
>>> cline = NcbipsiblastCommandline(help=True)
>>> cline
NcbipsiblastCommandline(cmd='psiblast', help=True)
>>> print(cline)
psiblast -help
You would typically run the command line with cline() or via the Python
subprocess module, as described in the Biopython tutorial.
"""
def __init__(self, cmd="psiblast", **kwargs):
self.parameters = [
# General search options:
_Option(["-matrix", "matrix"],
"Scoring matrix name (default BLOSUM62).",
equate=False),
_Option(["-threshold", "threshold"],
"Minimum score for words to be added to the BLAST lookup table (float).",
equate=False),
_Option(["-comp_based_stats", "comp_based_stats"],
"""Use composition-based statistics (string, default 2, i.e. True).
0, F or f: no composition-based statistics
2, T or t, D or d : Composition-based score adjustment
as in Bioinformatics 21:902-911, 2005, conditioned on
sequence properties
Note that tblastn also supports values of 1 and 3.""",
checker_function=lambda value: value in "0Ft2TtDd",
equate=False),
# Query filtering options:
_Option(["-seg", "seg"],
"""Filter query sequence with SEG (string).
Format: "yes", "window locut hicut", or "no" to disable.
Default is "12 2.2 2.5""",
equate=False),
# Extension options:
_Option(["-gap_trigger", "gap_trigger"],
"Number of bits to trigger gapping (float, default 22).",
equate=False),
# Miscellaneous options:
_Switch(["-use_sw_tback", "use_sw_tback"],
"Compute locally optimal Smith-Waterman alignments?"),
# PSI-BLAST options:
_Option(["-num_iterations", "num_iterations"],
"""Number of iterations to perform (integer, at least one).
Default is one.
Incompatible with: remote""",
equate=False),
_Option(["-out_pssm", "out_pssm"],
"File name to store checkpoint file.",
filename=True,
equate=False),
_Option(["-out_ascii_pssm", "out_ascii_pssm"],
"File name to store ASCII version of PSSM.",
filename=True,
equate=False),
_Switch(["-save_pssm_after_last_round", "save_pssm_after_last_round"],
"Save PSSM after the last database search."),
_Switch(["-save_each_pssm", "save_each_pssm"],
"""Save PSSM after each iteration
File name is given in -save_pssm or -save_ascii_pssm options.
"""),
_Option(["-in_msa", "in_msa"],
"""File name of multiple sequence alignment to restart PSI-BLAST.
Incompatible with: in_pssm, query""",
filename=True,
equate=False),
_Option(["-msa_master_idx", "msa_master_idx"],
"""Index of sequence to use as master in MSA.
Index (1-based) of sequence to use as the master in the
multiple sequence alignment. If not specified, the first
sequence is used.""",
equate=False),
_Option(["-in_pssm", "in_pssm"],
"""PSI-BLAST checkpoint file.
Incompatible with: in_msa, query, phi_pattern""",
filename=True,
equate=False),
# PSSM engine options:
_Option(["-pseudocount", "pseudocount"],
"""Pseudo-count value used when constructing PSSM.
Integer. Default is zero.""",
equate=False),
_Option(["-inclusion_ethresh", "inclusion_ethresh"],
"E-value inclusion threshold for pairwise alignments (float, default 0.002).",
equate=False),
_Switch(["-ignore_msa_master", "ignore_msa_master"],
"""Ignore the master sequence when creating PSSM
Requires: in_msa
Incompatible with: msa_master_idx, in_pssm, query, query_loc, phi_pattern
"""),
# PHI-BLAST options:
_Option(["-phi_pattern", "phi_pattern"],
"""File name containing pattern to search.
Incompatible with: in_pssm""",
filename=True,
equate=False),
]
_Ncbiblast2SeqCommandline.__init__(self, cmd, **kwargs)
def _validate(self):
incompatibles = {"num_iterations": ["remote"],
"in_msa": ["in_pssm", "query"],
"in_pssm": ["in_msa", "query", "phi_pattern"],
"ignore_msa_master": ["msa_master_idx", "in_pssm",
"query", "query_loc", "phi_pattern"],
}
self._validate_incompatibilities(incompatibles)
_Ncbiblast2SeqCommandline._validate(self)
class NcbirpsblastCommandline(_NcbiblastCommandline):
"""Wrapper for the NCBI BLAST+ program rpsblast.
With the release of BLAST+ (BLAST rewritten in C++ instead of C), the NCBI
replaced the old rpsblast tool with a similar tool of the same name. This
wrapper replaces RpsBlastCommandline, the wrapper for the old rpsblast.
>>> from Bio.Blast.Applications import NcbirpsblastCommandline
>>> cline = NcbirpsblastCommandline(help=True)
>>> cline
NcbirpsblastCommandline(cmd='rpsblast', help=True)
>>> print(cline)
rpsblast -help
You would typically run the command line with cline() or via the Python
subprocess module, as described in the Biopython tutorial.
"""
def __init__(self, cmd="rpsblast", **kwargs):
# TODO - remove the -word_size argument as per BLAST+ 2.2.30
# (BLAST team say it should never have been included, since
# the word size is set when building the domain database.)
# This likely means reviewing the class hierarchy again.
self.parameters = [
# Query filtering options:
_Option(["-seg", "seg"],
"""Filter query sequence with SEG (string).
Format: "yes", "window locut hicut", or "no" to disable.
Default is "12 2.2 2.5""",
equate=False),
# Restrict search or results:
_Option(["-culling_limit", "culling_limit"],
"""Hit culling limit (integer).
If the query range of a hit is enveloped by that of at
least this many higher-scoring hits, delete the hit.
Incompatible with: best_hit_overhang, best_hit_score_edge.
""",
equate=False),
_Option(["-best_hit_overhang", "best_hit_overhang"],
"""Best Hit algorithm overhang value (recommended value: 0.1)
Float between 0.0 and 0.5 inclusive.
Incompatible with: culling_limit.""",
equate=False),
_Option(["-best_hit_score_edge", "best_hit_score_edge"],
"""Best Hit algorithm score edge value (recommended value: 0.1)
Float between 0.0 and 0.5 inclusive.
Incompatible with: culling_limit.""",
equate=False),
# General search options:
_Option(["-comp_based_stats", "comp_based_stats"],
"""Use composition-based statistics.
D or d: default (equivalent to 0 )
0 or F or f: Simplified Composition-based statistics as in
Bioinformatics 15:1000-1011, 1999
1 or T or t: Composition-based statistics as in NAR 29:2994-3005, 2001
Default = `0'
""",
checker_function=lambda value: value in "Dd0Ff1Tt",
equate=False),
# Misc options:
_Switch(["-use_sw_tback", "use_sw_tback"],
"Compute locally optimal Smith-Waterman alignments?"),
]
_NcbiblastCommandline.__init__(self, cmd, **kwargs)
def _validate(self):
incompatibles = {"culling_limit": ["best_hit_overhang", "best_hit_score_edge"]}
self._validate_incompatibilities(incompatibles)
_NcbiblastCommandline._validate(self)
class NcbirpstblastnCommandline(_NcbiblastCommandline):
"""Wrapper for the NCBI BLAST+ program rpstblastn.
With the release of BLAST+ (BLAST rewritten in C++ instead of C), the NCBI
replaced the old rpsblast tool with a similar tool of the same name, and a
separate tool rpstblastn for Translated Reverse Position Specific BLAST.
>>> from Bio.Blast.Applications import NcbirpstblastnCommandline
>>> cline = NcbirpstblastnCommandline(help=True)
>>> cline
NcbirpstblastnCommandline(cmd='rpstblastn', help=True)
>>> print(cline)
rpstblastn -help
You would typically run the command line with cline() or via the Python
subprocess module, as described in the Biopython tutorial.
"""
def __init__(self, cmd="rpstblastn", **kwargs):
# TODO - remove the -word_size argument as per BLAST+ 2.2.30
# (BLAST team say it should never have been included, since
# the word size is set when building the domain database.)
# This likely means reviewing the class hierarchy again.
self.parameters = [
# Input query options:
_Option(["-strand", "strand"],
"""Query strand(s) to search against database/subject.
Values allowed are "both" (default), "minus", "plus".""",
checker_function=lambda value: value in ["both",
"minus",
"plus"],
equate=False),
# Input query options:
_Option(["-query_gencode", "query_gencode"],
"Genetic code to use to translate query (integer, default 1).",
equate=False),
# Query filtering options:
_Option(["-seg", "seg"],
"""Filter query sequence with SEG (string).
Format: "yes", "window locut hicut", or "no" to disable.
Default is "12 2.2 2.5""",
equate=False),
# Extension options:
_Switch(["-ungapped", "ungapped"],
"Perform ungapped alignment only?"),
]
_NcbiblastCommandline.__init__(self, cmd, **kwargs)
class NcbiblastformatterCommandline(_NcbibaseblastCommandline):
"""Wrapper for the NCBI BLAST+ program blast_formatter.
With the release of BLAST 2.2.24+ (i.e. the BLAST suite rewritten in C++
instead of C), the NCBI added the ASN.1 output format option to all the
search tools, and extended the blast_formatter to support this as input.
The blast_formatter command allows you to convert the ASN.1 output into
the other output formats (XML, tabular, plain text, HTML).
>>> from Bio.Blast.Applications import NcbiblastformatterCommandline
>>> cline = NcbiblastformatterCommandline(archive="example.asn", outfmt=5, out="example.xml")
>>> cline
NcbiblastformatterCommandline(cmd='blast_formatter', out='example.xml', outfmt=5, archive='example.asn')
>>> print(cline)
blast_formatter -out example.xml -outfmt 5 -archive example.asn
You would typically run the command line with cline() or via the Python
subprocess module, as described in the Biopython tutorial.
Note that this wrapper is for the version of blast_formatter from BLAST
2.2.24+ (or later) which is when the NCBI first announced the inclusion
this tool. There was actually an early version in BLAST 2.2.23+ (and
possibly in older releases) but this did not have the -archive option
(instead -rid is a mandatory argument), and is not supported by this
wrapper.
"""
def __init__(self, cmd="blast_formatter", **kwargs):
self.parameters = [
# Input options
_Option(["-rid", "rid"],
"BLAST Request ID (RID), not compatible with archive arg",
equate=False),
_Option(["-archive", "archive"],
"Archive file of results, not compatible with rid arg.",
filename=True,
equate=False),
# Restrict search or results
_Option(["-max_target_seqs", "max_target_seqs"],
"Maximum number of aligned sequences to keep",
checker_function=lambda value: value >= 1,
equate=False),
]
_NcbibaseblastCommandline.__init__(self, cmd, **kwargs)
def _validate(self):
incompatibles = {"rid": ["archive"]}
self._validate_incompatibilities(incompatibles)
_NcbibaseblastCommandline._validate(self)
class NcbideltablastCommandline(_Ncbiblast2SeqCommandline):
"""Create a commandline for the NCBI BLAST+ program deltablast (for proteins).
This is a wrapper for the deltablast command line command included in
the NCBI BLAST+ software (not present in the original BLAST).
>>> from Bio.Blast.Applications import NcbideltablastCommandline
>>> cline = NcbideltablastCommandline(query="rosemary.pro", db="nr",
... evalue=0.001, remote=True)
>>> cline
NcbideltablastCommandline(cmd='deltablast', query='rosemary.pro', db='nr', evalue=0.001, remote=True)
>>> print(cline)
deltablast -query rosemary.pro -db nr -evalue 0.001 -remote
You would typically run the command line with cline() or via the Python
subprocess module, as described in the Biopython tutorial.
"""
def __init__(self, cmd="deltablast", **kwargs):
self.parameters = [
# General search options:
_Option(["-matrix", "matrix"],
"Scoring matrix name (default BLOSUM62)."),
_Option(["-threshold", "threshold"],
"Minimum score for words to be added to the BLAST lookup table (float).",
equate=False),
_Option(["-comp_based_stats", "comp_based_stats"],
"""Use composition-based statistics (string, default 2, i.e. True).
0, F or f: no composition-based statistics.
2, T or t, D or d : Composition-based score adjustment as in
Bioinformatics 21:902-911, 2005, conditioned on sequence properties
Note that tblastn also supports values of 1 and 3.""",
checker_function=lambda value: value in "0Ft2TtDd",
equate=False),
# Query filtering options:
_Option(["-seg", "seg"],
"""Filter query sequence with SEG (string).
Format: "yes", "window locut hicut", or "no" to disable.
Default is "12 2.2 2.5""",
equate=False),
# Extension options:
_Option(["-gap_trigger", "gap_trigger"],
"Number of bits to trigger gapping Default = 22",
equate=False),
# Miscellaneous options:
_Switch(["-use_sw_tback", "use_sw_tback"],
"Compute locally optimal Smith-Waterman alignments?"),
# PSI-BLAST options
_Option(["-num_iterations", "num_iterations"],
"""Number of iterations to perform. (integer >=1, Default is 1)
Incompatible with: remote""",
equate=False),
_Option(["-out_pssm", "out_pssm"],
"File name to store checkpoint file.",
filename=True,
equate=False),
_Option(["-out_ascii_pssm", "out_ascii_pssm"],
"File name to store ASCII version of PSSM.",
filename=True,
equate=False),
_Switch(["-save_pssm_after_last_round", "save_pssm_after_last_round"],
"Save PSSM after the last database search."),
_Switch(["-save_each_pssm", "save_each_pssm"],
"""Save PSSM after each iteration
File name is given in -save_pssm or -save_ascii_pssm options.
"""),
# PSSM engine options
_Option(["-pseudocount", "pseudocount"],
"Pseudo-count value used when constructing PSSM (integer, default 0).",
equate=False),
_Option(["-domain_inclusion_ethresh", "domain_inclusion_ethresh"],
"""E-value inclusion threshold for alignments with conserved domains.
(float, Default is 0.05)""",
equate=False),
_Option(["-inclusion_ethresh", "inclusion_ethresh"],
"Pairwise alignment e-value inclusion threshold (float, default 0.002).",
equate=False),
# DELTA-BLAST options
_Option(["-rpsdb", "rpsdb"],
"BLAST domain database name (dtring, Default = 'cdd_delta').",
equate=False),
_Switch(["-show_domain_hits", "show_domain_hits"],
"""Show domain hits?
Incompatible with: remote, subject""")
]
_Ncbiblast2SeqCommandline.__init__(self, cmd, **kwargs)
def _test():
"""Run the Bio.Blast.Applications module's doctests."""
import doctest
doctest.testmod(verbose=1)
if __name__ == "__main__":
# Run the doctests
_test()
|
zjuchenyuan/BioWeb
|
Lib/Bio/Blast/Applications.py
|
Python
|
mit
| 56,120
|
## A module to do operations on SQllite db , Execute DDL , bhav csv to bhav table etc ##
## Licensed Freeware ##
## Author Paarth Batra ##
## Creation Date : 21st Oct 2013 ##
## Last Update Date : 21st Oct 2013 ##
##Example Call Bhavcsv_To_Sqlite3db('D:\Paarth\Codes\Python\Work\Database\csv_to_sqlite\sqllite_dbfiles','SMA_ANALYZER.DB','D:\Paarth\Codes\Python\Work\Database\csv_to_sqlite\Downloaded_Bhav_csv_files','2013OCT18.csv') ##
## Version 1.0 ##
import csv, sqlite3
import Predict_StockMarket as p
def executeDDL(dbpath='',dbfilename='',ddl_sql=''):
print "Parameters passed to me are : "
print "Database file path : "+dbpath
print "Database file name : "+dbfilename
print "DDL SQL : "+ddl_sql
dbstr=dbpath+dbfilename
try:
con = sqlite3.connect(dbstr)
cur = con.cursor()
print "connection made to ",dbstr
except Exception as E:
print "Exception occurred while creating connection :",E
return "Exception occurred while creating table ",E
try:
cur.execute(ddl_sql)
print "SQL Processed Successfully"
except Exception as E:
print "Exception occurred while creating table ",E
return "Exception occurred while creating table ",E
con.close()
return 0
def executeOneResultDML(dbpath='',dbfilename='',dml_sql=''):
#This function will result only one row in 1 column i.e. only 1 value
print "Parameters passed to me are : "
print "Database file path : "+dbpath
print "Database file name : "+dbfilename
print "DDL SQL : "+dml_sql
dbstr=dbpath+dbfilename
try:
con = sqlite3.connect(dbstr)
cur = con.cursor()
print "connection made to ",dbstr
except Exception as E:
exceptionMsg="Exception occurred in connection Block :"+str(E)
#print exceptionMsg
return exceptionMsg
data=[]
try:
print dml_sql
cur.execute(dml_sql)
resultValue= cur.fetchall()
except Exception as E:
exceptionMsg="Exception occurred in execution Block :"+str(E)
#print exceptionMsg
return exceptionMsg
con.close()
return resultValue
def Bhavcsv_To_Sqlite3db(dbpath='',dbfilename='',csvpath='',csvfilename=''):
dbStr=dbpath+dbfilename
try:
con = sqlite3.connect(dbStr)
cur = con.cursor()
print "connection made to ",dbStr
except Exception as E:
print "Exception occurred while creating connection :",E
return "Exception occurred while creating table ",E
csvFile=csvpath+csvfilename
print "File name to read is ",csvFile
with open(csvFile,'rb') as fileData: # `with` statement available in 2.5+
# csv.DictReader uses first line in file for column headings by default
dr = csv.DictReader(fileData) # comma is default delimiter
to_db = [(i['SYMBOL'], i['SERIES'],i['OPEN'],i['HIGH'],i['LOW'],i['CLOSE'],i['LAST'],i['PREVCLOSE'],i['TOTTRDQTY'],i['TOTTRDVAL'],i['TIMESTAMP'],i['TOTALTRADES'],i['ISIN']) for i in dr]
try:
cur.executemany("INSERT INTO bhav (SYMBOL,SERIES,OPEN,HIGH,LOW,CLOSE,LAST,PREVCLOSE,TOTTRDQTY,TOTTRDVAL,TIMESTAMP,TOTALTRADES,ISIN) VALUES (?, ?,?,?,?,?,?,?,?,?,?,?,?);", to_db)
con.commit()
except Exception as E:
exceptionMsg="Exception occurred while Inserting into DB :"+str(E)
#print exceptionMsg
return exceptionMsg
return 0
def processBhavD_BV(dbpath='',dbfilename='',Date='20140116',tempTableName='',mainTableName=''):
dbStr=dbpath+dbfilename
print "processBhavD_VV : Values passed to me db_path = %s \n DB Name = %s \n Date = %s \n temptablename = %s \n maintablename = %s"%(dbpath,dbfilename,Date,tempTableName,mainTableName)
try:
con = sqlite3.connect(dbStr)
cur = con.cursor()
print "connection made to ",dbStr
except Exception as E:
print "Exception occurred while creating connection :",E
Date = p.dateToBhavTimestamp(Date)
print "Date now is ",Date
SQL="insert into D_BV select SYMBOL,SERIES,OPEN,HIGH,LOW,CLOSE,LAST,PREVCLOSE,TOTTRDQTY,TOTTRDVAL," \
"TIMESTAMP,TOTALTRADES,ISIN from BHAV " \
"where series = 'EQ' and timestamp = '"+Date+"';"
print SQL
try:
cur.execute(SQL)
con.commit()
return "Data Processed Successfully"
except Exception as E:
exceptionMsg="Exception occurred while Inserting into DB :"+str(E)
#print exceptionMsg
return exceptionMsg
def executeSelect(dbpath='',dbfilename='',select_sql=''):
#This function will result only one row in 1 column i.e. only 1 value
#print "Parameters passed to me are : "
#print "Database file path : "+dbpath
#print "Database file name : "+dbfilename
#print "DDL SQL : "+select_sql
dbstr=dbpath+dbfilename
try:
con = sqlite3.connect(dbstr)
cur = con.cursor()
#print "connection made to ",dbstr
except Exception as E:
exceptionMsg="Exception occurred in connection Block :"+str(E)
#print exceptionMsg
return exceptionMsg
data=[]
try:
cur.execute(select_sql)
resultValue= cur.fetchall()
col_name_list = [tuple[0] for tuple in cur.description]
print col_name_list
except Exception as E:
exceptionMsg="Exception occurred in select_sql Block :"+str(E)
#print exceptionMsg
return exceptionMsg
con.close()
return resultValue
#createBhavTableSQL="""create table BHAV (SYMBOL TEXT,SERIES TEXT,OPEN integer,HIGH integer,LOW
# integer,CLOSE integer,LAST integer,PREVCLOSE integer,TOTTRDQTY integer,TOTTRDVAL integer,
# TIMESTAMP text,TOTALTRADES integer,ISIN text,Primary key(SYMBOL,SERIES,TIMESTAMP));"""
#create_MC_TableSQL="""create table MCdata (STOCK_NAME text,ANALYST_NAME text,DATE text,TIME text,SOURCE text,
#TIP text,ONELINER text,URL text,Primary key(STOCK_NAME,ANALYST_NAME ,DATE,TIME));"""
#dropBhavTableSQL="""drop table BHAV"""
#csvFilePath='D:\Paarth\Codes\Python\Work\QT\The SMA Analyzer\Downloaded_Bhav_csv_files'
#dbFilePath='D:\Paarth\Google_Drive\Google Drive\Codes\Python\Work\Web_URL\Analyst_Analyzer\Data\\bhav\\'
#dbFileName='SMA_ANALYZER.DB'
#csvFileName='2013OCT21.csv'
#Dropping existing Bhav table
#result=executeDDL(dbFilePath,dbFileName,dropBhavTableSQL)
#print"\n\nResult of running dropBhavtable is ",result
#creating new bhav table
#import Predict_StockMarket.config as cnf
#resultCreateTable=executeDDL(cnf.dbPath,cnf.dbName,createBhavTableSQL)
#print"\n\nResult of running createBhavTableSQL is ",resultCreateTable
#inserting data from csv
#resultCSVtosqlite=Bhavcsv_To_Sqlite3db(dbFilePath,dbFileName,csvFilePath,csvFileName)
#print"\n\nResult of running Bhavcsv_To_Sqlite3db is ",resultCSVtosqlite
#selectOneSQL="select distinct timestamp from BHAV"
#print executeOneResultDDL(dbFilePath,dbFileName,selectOneSQL)
|
PaarthBatra/Predict_StockMarket
|
Database/SQLiteDBOperations.py
|
Python
|
mit
| 7,467
|
#!/usr/bin/python
# Chat Program
# author: https://medium.com/@dataq
# execute server: ./chat.py server <IP>:<PORT>
# execute server: ./chat.py client <Server IP>:<Server PORT> <username>
# type "/<username> <message>" to send private message
import sys
import socket
import select
import signal
import json
class Server:
def __init__(self, sockaddr):
# create connection socket
self.connsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.connsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.connsock.bind(sockaddr)
self.connsock.listen(1)
print("Listen at {}...".format(self.connsock.getsockname()))
# this dictionary contain client's username and socket
self.users = {}
def __broadcast__(self, datasock, payload):
# send to all users except the sender
for s in self.users.values():
if s is not datasock:
s.sendall(payload)
def __disconnected__(self, datasock):
print("Client {} disconnected.".format(datasock.getpeername()))
# remove from user dictionary
for key, val in self.users.items():
if val is datasock:
del self.users[key]
def __forward__(self, datasock):
payload = datasock.recv(2048)
if payload:
# get "To" username
dst = json.loads(payload.decode("utf-8"))["To"]
if dst == "all":
self.__broadcast__(datasock, payload)
else:
# send private message
self.users[dst].sendall(payload)
else:
self.__disconnected__(datasock)
def __serve__(self, ready):
for s in ready:
if s is self.connsock:
# receive connection
datasock, peername = self.connsock.accept()
# get username
username = datasock.recv(2048)
self.users[username] = datasock
print("Client {} connected from {}.".format(username, peername))
else:
self.__forward__(s)
def run(self):
print("Press Crtl+c to stop...")
while True:
try:
signal.signal(signal.SIGINT, signal.default_int_handler)
ready, _, _ = select.select([self.connsock] + self.users.values(), [], [])
self.__serve__(ready)
except KeyboardInterrupt:
break
# close client's sockets
for s in self.users.values():
s.close()
# close server's socket
self.connsock.close()
class Client:
def __init__(self, sockaddr, username):
self.online = True
# create socket
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.connect(sockaddr)
# send username
self.username = username
self.sock.sendall(self.username.encode("utf-8"))
print(
"User {} connected to {} from {}.".format(self.username, self.sock.getpeername(), self.sock.getsockname()))
def __receive__(self):
payload = self.sock.recv(2048)
if payload:
# decode using utf-8 then read json data
data = json.loads(payload.decode("utf-8"))
print("({}) > {}".format(data["From"], data["Msg"]))
else:
print("Server disconnected.")
self.online = False
def __send__(self):
dst = "all"
msg = sys.stdin.readline().strip()
if msg[0] == "/":
msgs = msg.split(" ", 1)
dst = msgs[0][1:]
msg = msgs[1]
# send json data encoded using utf-8
self.sock.sendall(json.dumps({"From": self.username, "To": dst, "Msg": msg}).encode("utf-8"))
def run(self):
rlist = [self.sock, sys.stdin]
print("Press Crtl+c to stop...")
while self.online:
try:
signal.signal(signal.SIGINT, signal.default_int_handler)
# waiting input from socker or user input
ready, _, _ = select.select(rlist, [], [])
for i in ready:
if i is self.sock:
self.__receive__()
else:
self.__send__()
except KeyboardInterrupt:
self.online = False
# close socket
self.sock.close()
if __name__ == "__main__":
ip, port = sys.argv[2].split(":")
sockaddr = (ip, int(port))
app = Server(sockaddr) if sys.argv[1] == "server" else Client(sockaddr, sys.argv[3])
app.run()
|
datanduth/python-tutorial
|
Network Tutorial/Chat/chat.py
|
Python
|
mit
| 4,718
|
#!/usr/bin/env python
'''
FILE NAME : [:VIM_EVAL:]expand('%:t')[:END_EVAL:]
AUTHOR : msarver
CREATE DATE : [:VIM_EVAL:]strftime('%d %B %Y')[:END_EVAL:]
'''
|
mrsarver/dotfiles
|
.vim/skeleton/skeleton.py
|
Python
|
mit
| 157
|
from flask_wtf import FlaskForm
from wtforms import TextAreaField
from wtforms.validators import InputRequired, Length
class CommentForm(FlaskForm):
""" Form for comments. """
comment = TextAreaField('Comment:', validators=[InputRequired(),
Length(min=6,
message="Your comment should be as min\
6 characters lenght.")])
|
oldani/nanodegree-blog
|
app/forms/comment_form.py
|
Python
|
mit
| 486
|
__author__ = 'lac'
import datetime
from django.http import HttpResponse,Http404
from django.shortcuts import render,render_to_response
from django.template import RequestContext
from myblog.models import BlogPost
from django.http import Http404, HttpResponseRedirect
from django.views.decorators.cache import cache_page
from django.views.decorators.cache import cache_control
from django.views.decorators.cache import never_cache
from django.contrib.syndication.views import Feed
from django.views.decorators.csrf import csrf_exempt
from myblog.forms import BlogForm,TagForm
import time
@cache_page(60 * 15)
@cache_control(public=True, must_revalidate=True, max_age=1200)
def index_page(request):
'''这个view的功能是显示主页,并实现分页功能。
cache_page装饰器定义了这个view所对应的页面的缓存时间。
cache_control装饰器告诉了上游缓存可以以共缓存的形式缓存内容,并且告诉客户端浏览器,这个
页面每次访问都要验证缓存,并且缓存有效时间为1200秒。
'''
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
posts = BlogPost.objects.all()
paginator = Paginator(posts, 5)
page = request.GET.get('page')
try:
posts_of_page = paginator.page(page)
except PageNotAnInteger:
posts_of_page = paginator.page(1)
except EmptyPage:
posts_of_page = paginator.page(paginator.num_pages)
return render(request, 'index.html', {'posts_of_page': posts_of_page}, )
@never_cache
def blog_show(request, id=''):
'''这个view的作用是显示博客的正文内容。
这个view会根据段落、图片、代码对象的sequence属性的值进行排序,
生成一个最终显示列表返回给模版进行渲染。
为了实现评论后刷新页面能马上看到评论信息,加入了nerver_cache装饰器使得这个
view所对应的页面不被缓存。
'''
def create_post_objects(objects, output_dict):
for i in range(len(objects)):
output_dict[int('%d'%objects[i].sequence)] = objects[i]
try:
post = BlogPost.objects.get(id=id)
except BlogPost.DoesNotExist:
raise Http404
blog_post_list = {}
photos = post.photo_set.all()
codes = post.code_set.all()
paragraphs = post.paragraph_set.all()
create_post_objects(photos, blog_post_list)
create_post_objects(codes, blog_post_list)
create_post_objects(paragraphs, blog_post_list)
context_list = []
for x in sorted(blog_post_list):
context_list.append(blog_post_list[x])
rs = {}
try:
next_post = BlogPost.objects.get(id=int(id)+1)
rs['next'] = next_post
except BlogPost.DoesNotExist:
rs['next'] = 0
try:
pre_post = BlogPost.objects.get(id=int(id)-1)
rs['pre'] = pre_post
except BlogPost.DoesNotExist:
rs['pre'] = 0
return render(
request, 'blog_show.html', {
'post': post, 'context_list': context_list, 'next_post': rs['next'],
'pre_post': rs['pre'],
},
)
def blog_show_comment(request, id=''):
blog = BlogPost.objects.get(id=id)
return render('blog_comments_show.html', {"blog": blog})
def RSS_url(request):
return HttpResponse('haoba')
def add_blog(request):
form = BlogForm()
tag = TagForm()
return render_to_response('blog_add.html')
@csrf_exempt
def add_blog_action(request):
if 'title' in request.POST and 'para' in request.POST:
title = request.POST['title']
summary = request.POST['para']
if 'tags' in request.POST:
tag = request.POST['tags']
blognew = BlogPost(title =title,author='lac',summary=summary,timestamp=time.strftime('%Y-%m-%d',time.localtime(time.time())))
blognew.save()
return render_to_response("errors.html",{"message":"success"})
else:
return render_to_response('add_blog.html',{"message": "请输入内容"})
|
liaicheng/lacblog
|
code/myblog/views.py
|
Python
|
mit
| 3,993
|
# I want to Define what descriptors can be used with each weapon.
# I'll start with first, opening, and reading a file
# with a weapon and its damage type on it.
import sys
wepfile = "Weapons.txt"
# Define the 'Weapon' Class, which will be how the weapon information is stored.
class WeaponObj(object):
def __init__(self):
self.name = "I'm a Weapon"
self.dmg = "I do Damage"
def swing(self):
print (" " +self.name+ " & " + self.dmg)
# Define the fluff text that goes with everything.
class DescripObj(object):
def __init__(self):
self.subject = "Bandit"
self.verb = "Attacks"
# Testing the creation of this object
FirstWep = WeaponObj()
FirstWep.swing()
# Now we want to open the file, and split it into a variable.
words = []
with open(wepfile) as opf:
for line in opf:
words.append([word for word in line.strip().split(',')])
for pair in words:
try:
FirstWep.name , FirstWep.dmg = pair[0],pair[1]
#Do some other things
except IndexError:
print("A line in the file doesn't have enough entries.")
# Test again
FirstWep.swing()
# Now we check if we can make a list of Weapons
# First we grab the Martial weapon file
mfile = "Martial.txt"
# Count how many lines in the file.
with open(mfile) as mf:
count = sum(1 for line in mf if line.rstrip('\n'))
# Create a List of Instances
WeaponList = [ WeaponObj() for i in range(count)]
# Now we'll duplicate some of the work above and put that information in the instances.
myWeapons = []
n = 0
with open(mfile) as mf:
for line in mf:
myWeapons.append([word for word in line.strip().split(',')])
for pair in myWeapons:
try:
WeaponList[n].name , WeaponList[n].dmg = pair[0],pair[1]
n += 1
except IndexError:
print("A line in the file doesn't have enough entries.")
for i in range(count):
WeaponList[i].swing()
|
Laventhros/DescGenerator
|
Test.py
|
Python
|
mit
| 2,088
|
import json
import csv
def get_a(channel, device):
filename = "channels/%s/%s.csv" % (channel, device)
y = []
with open(filename, 'rb') as f:
reader = csv.reader(f)
#channel = "CP_OP10C_D"
i = 0
z = []
for row in reader:
i = i +1
if i != 1:
x = "%s.%s.%s" % (channel, device, row[0])
y.append(x)
z.append(row[0].split(".")[-1])
return y
def ch_tags(channel, devices):
tags = []
for device in devices:
v =get_a(channel, device)
tags.extend(v)
return tags
#print json.dumps(tags)
def main():
channels = ["CP_OP10C_D","CP_OP20C_D"]
channels = ["Digi"]
devices = ["cool_system"]#,"hyd_system", "power_system","screw_system","sp_system","TEST" ]
all = []
for channel in channels:
rtn = ch_tags(channel, devices)
all.extend(rtn)
print(len(all))
print(json.dumps(all))
if __name__ == "__main__":
main()
|
mabotech/mabo.io
|
node/opcua/monitor/parse_tags02.py
|
Python
|
mit
| 1,157
|
from HTMLParser import HTMLParser
class ItemGridParser(HTMLParser):
def __init__(self, html_string, *args, **kwargs):
HTMLParser.__init__(self, *args, **kwargs)
self.products = []
HTMLParser.feed(self, html_string)
@property
def product_id_to_product_path(self):
return dict(self.products)
@property
def product_ids(self):
return [product[0] for product in self.products]
def handle_starttag(self, tag, attrs):
if tag == 'a':
attrs = dict(attrs)
class_values = attrs.get('class', '').split()
if ['js-product-link'] == class_values:
product_path = attrs['href']
product_id = product_path.split('/')[-1]
self.products.append((product_id, product_path))
def feed(*args, **kwargs):
raise NotImplementedError('ItemGridParser.feed')
class ProductPageParser(HTMLParser):
READY = 0
RECORD_DATA = 1
RECORD_CATEGORIES = 2
def __init__(self, html_string, *args, **kwargs):
HTMLParser.__init__(self, *args, **kwargs)
self.state = self.READY
self.product_attributes = {
'currency': 'USD',
'categories': []
}
self.record_data_value = None
HTMLParser.feed(self, html_string)
def handle_starttag(self, tag, attrs):
attrs = dict(attrs)
class_values = attrs.get('class', '').split()
if self.state == self.RECORD_CATEGORIES:
if tag == 'a' and not class_values:
# Examples: /sfbay/meat or /sfbay/meat/pork
category = attrs['href'].strip('/').split('/')[1:]
self.product_attributes['categories'].append(category)
return
if self.state == self.RECORD_DATA:
return
# self.state == self.READY
if tag == 'h2' and 'producer-name' in class_values:
self.state = self.RECORD_DATA
self.record_data_value = 'producer_name'
elif tag == 'h1' and 'product-name' in class_values:
self.state = self.RECORD_DATA
self.record_data_value = 'name'
elif tag == 'div' and 'description-body' in class_values:
self.state = self.RECORD_DATA
self.record_data_value = 'description'
elif tag == 'meta' and 'itemprop' in attrs:
if attrs['itemprop'] == 'price':
self.product_attributes['price'] = float(attrs['content'])
elif tag =='div' and 'breadcrumbs' in class_values:
self.state = self.RECORD_CATEGORIES
def handle_endtag(self, tag):
if self.state == self.RECORD_CATEGORIES and tag == 'div':
self.state = self.READY
def handle_data(self, data):
if self.state == self.RECORD_DATA:
self.product_attributes[self.record_data_value] = data
self.state = self.READY
self.record_data_value = None
def feed(*args, **kwargs):
raise NotImplementedError('ProductPageParser.feed')
def get_parser(url_pathname, html_string):
if url_pathname.count('/') == 2:
return ItemGridParser
elif url_pathname.count('/') == 4:
return ProductPageParser
raise RuntimeError('get_parser: unexpected {}'.format(url_pathname))
|
silasbw/hungryskunk
|
goodeggs_parser.py
|
Python
|
mit
| 2,996
|
import json
from django.utils.datastructures import MultiValueDictKeyError
from rest_framework.generics import ListCreateAPIView
from rest_framework.response import Response
from rest_framework.views import APIView
from apis.betterself.v1.events.filters import SleepLogFilter
from apis.betterself.v1.events.serializers import SleepLogReadSerializer, SleepLogCreateSerializer
from analytics.events.utils.dataframe_builders import SleepActivityDataframeBuilder
from apis.betterself.v1.utils.views import ReadOrWriteSerializerChooser, UUIDDeleteMixin
from config.pagination import ModifiedPageNumberPagination
from constants import LOOKBACK_PARAM_NAME
from events.models import SleepLog
class SleepActivityView(ListCreateAPIView, ReadOrWriteSerializerChooser, UUIDDeleteMixin):
model = SleepLog
pagination_class = ModifiedPageNumberPagination
read_serializer_class = SleepLogReadSerializer
write_serializer_class = SleepLogCreateSerializer
filter_class = SleepLogFilter
def get_serializer_class(self):
return self._get_read_or_write_serializer_class()
def get_queryset(self):
return self.model.objects.filter(user=self.request.user)
class SleepAggregatesView(APIView):
def get(self, request):
user = request.user
sleep_activities = SleepLog.objects.filter(user=user)
serializer = SleepActivityDataframeBuilder(sleep_activities)
sleep_aggregate = serializer.get_sleep_history_series()
# because pandas uses a timeindex, when we go to json - it doesn't
# play nicely with a typical json dump, so we do an additional load so drf can transmit nicely
result = sleep_aggregate.to_json(date_format='iso')
result = json.loads(result)
return Response(data=result)
class SleepAveragesView(APIView):
def get(self, request):
try:
window = int(request.query_params[LOOKBACK_PARAM_NAME])
except MultiValueDictKeyError:
# MultiValueDictKeyError happens when a key doesn't exist
window = 1
except ValueError:
# ValueError if something entered for a window that couldn't be interpreted
return Response(status=400)
user = request.user
sleep_activities = SleepLog.objects.filter(user=user)
builder = SleepActivityDataframeBuilder(sleep_activities)
sleep_aggregate = builder.get_sleep_history_series()
sleep_average = sleep_aggregate.rolling(window=window, min_periods=1).mean()
result = sleep_average.to_json(date_format='iso')
result = json.loads(result)
return Response(data=result)
|
jeffshek/betterself
|
apis/betterself/v1/sleep/views.py
|
Python
|
mit
| 2,651
|
from __future__ import unicode_literals, print_function, absolute_import
from nikola.plugin_categories import SignalHandler
from nikola import utils
from nikola import metadata_extractors
import blinker
import hashlib
import io
import os
__all__ = []
_LOGGER = utils.get_logger('static_comments')
class Comment(object):
"""Represents a comment for a post, story or gallery."""
# set by constructor
id = None
parent_id = None
# set by creator
content = '' # should be a properly escaped HTML fragment
author = None
author_email = None # should not be published by default
author_url = None
author_ip = None # should not be published by default
date_utc = None # should be set via set_utc_date()
date_localized = None # should be set via set_utc_date()
# set by _process_comments():
indent_levels = None # use for formatting comments as tree
indent_change_before = 0 # use for formatting comments as tree
indent_change_after = 0 # use for formatting comments as tree
# The meaning of indent_levels, indent_change_before and
# indent_change_after are the same as the values in utils.TreeNode.
def __init__(self, site, owner, id, parent_id=None):
"""Initialize comment.
site: Nikola site object;
owner: post which 'owns' this comment;
id: ID of comment;
parent_id: ID of comment's parent, or None if it has none.
"""
self._owner = owner
self._config = site.config
self.id = id
self.parent_id = parent_id
def set_utc_date(self, date_utc):
"""Set the date (in UTC). Automatically updates the localized date."""
self.date_utc = utils.to_datetime(date_utc)
self.date_localized = utils.to_datetime(date_utc, self._config['__tzinfo__'])
def formatted_date(self, date_format):
"""Return the formatted localized date."""
return utils.LocaleBorg().formatted_date(date_format, self.date_localized)
def hash_values(self):
"""Return tuple of values whose hash to consider for computing the hash of this comment."""
return (self.id, self.parent_id, self.content, self.author, self.author_url, self.date_utc)
def __repr__(self):
"""Returns string representation for comment."""
return '<Comment: {0} for {1}; indent: {2}>'.format(self.id, self._owner, self.indent_levels)
class StaticComments(SignalHandler):
"""Add static comments to posts."""
def _compile_content(self, compiler_name, content, filename):
"""Compile comment content with specified page compiler."""
if compiler_name == 'html':
# Special case: just pass-through content.
return content
if compiler_name not in self.site.compilers:
_LOGGER.error("Cannot find page compiler '{0}' for comment {1}!".format(compiler_name, filename))
exit(1)
compiler = self.site.compilers[compiler_name]
try:
result = compiler.compile_string(content, source_path=filename, is_two_file=True, lang=self.site.default_lang)
if compiler_name == 'rest' and result[1] >= 3:
# The reStructured Text page compiler returns error_level as second return value
_LOGGER.error("reStructuredText page compiler ({0}) failed to compile comment {1}!".format(compiler_name, filename))
exit(1)
return result[0]
except (AttributeError, NotImplementedError):
try:
return compiler.compile_to_string(content)
except AttributeError:
_LOGGER.error("Page compiler plugin '{0}' provides no compile_string or compile_to_string function (comment {1})!".format(compiler_name, filename))
exit(1)
def _parse_comment(self, filename):
"""Read a comment from a file, and return metadata dict and content."""
with io.open(filename, "r", encoding="utf-8-sig") as f:
source_text = f.read()
meta = None
content = None
for priority in metadata_extractors.MetaPriority:
found_in_priority = False
for extractor in self.site.metadata_extractors_by['priority'].get(priority, []):
# Call metadata_extractors.check_conditions to check whether the extractor
# can be used, and if that succeeds, check whether all requirements for the
# extractor are there. We pass None as the post since we don't have a post.
# The (currently only) consequence is that compiler-specific plugins don't
# work: after all, the compiler is determined from the metadata to be extracted.
if not metadata_extractors.check_conditions(None, filename, extractor.conditions, self.site.config, source_text):
continue
extractor.check_requirements()
# Use the extractor to split the post into metadata and content, and to
# decode the metadata.
meta_str, content = extractor.split_metadata_from_text(source_text)
new_meta = extractor._extract_metadata_from_text(meta_str)
if new_meta:
found_in_priority = True
meta = new_meta
break
if found_in_priority:
break
if meta is None:
_LOGGER.error("Cannot identify metadata format for comment {0}!".format(filename))
exit(1)
return meta, content
def _read_comment(self, filename, owner, id):
"""Read a comment from a file."""
meta, content = self._parse_comment(filename)
# create comment object
comment = Comment(self.site, owner, id)
# parse headers
compiler_name = None
for header, value in meta.items():
if header == 'id':
comment.id = value
elif header == 'status':
pass
elif header == 'approved':
if value != 'True':
return None
elif header == 'author':
comment.author = value
elif header == 'author_email':
comment.author_email = value
elif header == 'author_url':
comment.author_url = value
elif header == 'author_IP':
comment.author_ip = value
elif header == 'date_utc':
comment.set_utc_date(value)
elif header == 'parent_id':
if value != 'None':
comment.parent_id = value
elif header == 'wordpress_user_id':
pass
elif header == 'post_language':
pass
elif header == 'compiler':
compiler_name = value
else:
_LOGGER.error("Unknown comment header: '{0}' (in file {1})".format(header, filename))
exit(1)
# check compiler name
if compiler_name is None:
_LOGGER.warn("Comment file '{0}' doesn't specify compiler! Using default 'wordpress'.".format(filename))
compiler_name = 'wordpress'
# compile content
comment.content = self._compile_content(compiler_name, content, filename)
return comment
def _scan_comments(self, path, file, owner):
"""Scan comments for post."""
comments = {}
for dirpath, dirnames, filenames in os.walk(path, followlinks=True):
if dirpath != path:
continue
for filename in filenames:
if not filename.startswith(file + '.'):
continue
rest = filename[len(file):].split('.')
if len(rest) != 3:
continue
if rest[0] != '' or rest[2] != 'wpcomment':
continue
try:
comment = self._read_comment(os.path.join(dirpath, filename), owner, rest[1])
if comment is not None:
# _LOGGER.info("Found comment '{0}' with ID {1}".format(os.path.join(dirpath, filename), comment.id))
comments[comment.id] = comment
except ValueError as e:
_LOGGER.warn("Exception '{1}' while reading file '{0}'!".format(os.path.join(dirpath, filename), e))
pass
return sorted(list(comments.values()), key=lambda c: c.date_utc)
def _hash_post_comments(self, post):
"""Compute hash of all comments for this post."""
# compute hash of comments
hash = hashlib.md5()
c = 0
for comment in post.comments:
c += 1
for part in comment.hash_values():
hash.update(str(part).encode('utf-8'))
return hash.hexdigest()
def _process_comments(self, comments):
"""Given a list of comments, rearranges them according to hierarchy and returns ordered list with indentation information."""
# First, build tree structure out of TreeNode with comments attached
root_list = []
comment_nodes = dict()
for comment in comments:
node = utils.TreeNode(comment.id)
node.comment = comment
comment_nodes[comment.id] = node
for comment in comments:
node = comment_nodes[comment.id]
parent_node = comment_nodes.get(node.comment.parent_id)
if parent_node is not None:
parent_node.children.append(node)
else:
root_list.append(node)
# Then flatten structure and add indent information
comment_nodes = utils.flatten_tree_structure(root_list)
for node in comment_nodes:
comment = node.comment
comment.indent_levels = node.indent_levels
comment.indent_change_before = node.indent_change_before
comment.indent_change_after = node.indent_change_after
return [node.comment for node in comment_nodes]
def _process_post_object(self, post):
"""Add comments to a post object."""
# Get all comments
path, ext = os.path.splitext(post.source_path)
path, file = os.path.split(path)
comments = self._scan_comments(path, file, post)
# Add ordered comment list to post
post.comments = self._process_comments(comments)
# Add dependency to post
digest = self._hash_post_comments(post)
post.add_dependency_uptodate(utils.config_changed({1: digest}, 'nikola.plugins.comments.static_comments:' + post.base_path), is_callable=False, add='page')
def _process_posts_and_pages(self, site):
"""Add comments to all posts."""
if site is self.site:
for post in site.timeline:
self._process_post_object(post)
def set_site(self, site):
"""Set Nikola site object."""
super(StaticComments, self).set_site(site)
site._GLOBAL_CONTEXT['site_has_static_comments'] = True
blinker.signal("scanned").connect(self._process_posts_and_pages)
|
getnikola/plugins
|
v8/static_comments/static_comments.py
|
Python
|
mit
| 11,214
|
__all__ = ["make_graph", "add_edges", "Results",
"find_solution", "add_edges_numerical", "find_solution_numerical"]
import numpy as np
import copy
import networkx as nx
def make_graph(node_ids, node_labels):
"""
Make an undirected graph with the nodes specified
in `node_ids`. Each node will have the attributes
speficied in `node_labels`.
Parameters
----------
node_ids : iterable
The list of node_ids. Can be a list of anything
that networkx.Graph() can read and use.
node_labels : dict
A dictionary of the form {"label":[list of labels
for all nodes], ...}.
Defines all labels for all nodes.
Returns
-------
G : networkx.Graph() instance
The undirected graph containing the nodes and
corresponding labels, but no edges yet.
"""
for key in node_labels.keys():
assert len(node_ids) == len(node_labels[key]), \
"node_ids and topics for label %s "%key + \
"are not of equal length, but they should be!"
G=nx.Graph()
for i, nid in enumerate(node_ids):
G.add_node(nid)
for k in node_labels.keys():
G.nodes()[nid][k] = node_labels[k][i]
return G
def add_edges(G, labels=None, hard_constraint=True, weights=None):
"""
Add edges to the graph, with weights.
If no order of labels is
specified, then the order of keys in the dictionary
for each node will be used.
TODO: Make hard_constraint an *index* rather than a bool
Parameters
----------
G : networkx.Graph() instance
The graph without edges
labels : list of strings
A list of labels specifying the order of attributes on each
node to use when calculating the weights.
This list should be in descending order (with the most important
label *first*).
If none are specified, then the order of keywords in the
dictionary of attributes for each node will be used.
hard_constraint : bool
Boolean flag determining whether hard constraints should be used.
In this case, this means that for the first label specified in
`labels`, no edges will be drawn when this label is the same
for two nodes.
weights : iterable of float (0, 1]
The relative weights of each category. By default, the weight of an
edge will be `weight=1`, adjusted by `weight[i]` for each pair of nodes
where the labels in category `i` are the same. If `hard_constraints==True`,
then edges between nodes for which labels in the first category are
the same do not exist, and `weights` should have length `len(labels)-1`.
If `hard_constraints == False`, then `len(weights)` should be `len(labels)`,
where the first entry is used to set the weight of an edge between two
nodes where `label[0]` has the same value.
Returns
-------
G : networkx.Graph() instance
The same input graph, but with edges.
"""
# find the total number of labels
nlabels = len(labels)
if weights is not None:
if hard_constraint:
assert nlabels-1 == len(weights), "Number of weights must correspond" \
"to the number of topics"
else:
assert nlabels == len(weights), "Number of weights must correspond" \
"to the number of topics"
else:
weights = np.ones(nlabels)
# the total number of nodes
n_nodes = G.number_of_nodes()
# list of nodes
nodes = G.nodes()
# get a list of lists of all node labels
node_labels = []
for l in labels:
node_labels.append([G.nodes()[i][l] for i in G.nodes()])
# TODO: Currently only works with two labels!
# iterate over all the different possible labels
for i, sl in enumerate(node_labels):
# iterate over all nodes
for k, n1 in enumerate(G.nodes()):
for l, n2 in enumerate(G.nodes()):
#print("n1: " + str(n1))
#print("n2: " + str(n2))
# if sessions have the same label,
# either make no node (for first label),
# or weaken a node that's already there
if k == l:
#print("k == l, continuing")
continue
if k > l:
#print("k > l, continuing")
continue
if hard_constraint:
#print("using hard constraints")
if i == 0:
#print("First label")
if G.nodes()[n1][labels[i]] == G.nodes()[n2][labels[i]]:
#print("Labels are the same, continuing")
continue
else:
#print("Making edge between %i and %i of weight %.2f"%(n1, n2, weights[i]))
G.add_edge(n1, n2, weight=1.0)
else:
#print("Second pass")
if G.nodes()[n1][labels[i]] == G.nodes()[n2][labels[i]]:
if (n1, n2) not in G.edges():
continue
else:
G[n1][n2]["weight"] *= weights[i-1]
else:
#print("labels are not the same. Not doing anything.")
continue
else:
if i == 0:
if G.nodes()[n1][labels[i]] == G.nodes()[n2][labels[i]]:
G.add_edge(n1, n2, weight=weights[i])
else:
G.add_edge(n1, n2, weight=1.0)
else:
if G.nodes()[n1][labels[i]] == G.nodes()[n2][labels[i]]:
G[n1][n2]["weight"] *= weights[i]
else:
continue
return G
def add_edges_numerical(G, edges, hard_constraint=True, threshold=0.5):
"""
Add edges to the graph, with weights.
Weights are determined by by the importance weights on
each label.
If no order of labels is
specified, then the order of keys in the dictionary
for each node will be used.
Parameters
----------
G : networkx.Graph() instance
The graph without edges
edges: iterable
An iterable of edges, in order of (k, l) occurrence, i.e. starts at
((0,0), (0,1), (0,2), ..., (1, 2), (1, 3), ..., (n-1, n)).
hard_constraint : bool
Boolean flag determining whether hard constraints should be used.
In this case, this means that for the first label specified in
`labels`, no edges will be drawn when this label is the same
for two nodes.
threshold: float out of [0,1]
The threshold of the dissimilarity measure describing how dissimilar
two sessions are. If the threshold is small, the graph has a high tolerance
to similar sessions. That is, it is more likely to schedule similar sessions
at the same time. If it is small, then this tolerance is much smaller. There
will be much fewer edges between nodes. This means that there is a much
smaller chance that similar sessions will be scheduled at the same time.
However, fewer edges also means that there's a greater chance that no
solution will be found at all.
Returns
-------
G : networkx.Graph() instance
The same input graph, but with edges.
"""
n_nodes = G.number_of_nodes()
assert np.size(edges) == (n_nodes*(n_nodes - 1))/2.0, "Incorrect number of edges!"
# counter for edges
i = 0
for k, n1 in enumerate(G.nodes()):
for l, n2 in enumerate(G.nodes()):
if k >= l:
continue
# if a hard constraint is set, make no edge
# if the edge weight is smaller than `threshold`,
# otherwise set the weight edge to 1
if hard_constraint:
if edges[i] < threshold:
i += 1
continue
else:
G.add_edge(n1, n2, weight=edges[i])
# otherwise just set all the weights the way they are
else:
G.add_edge(n1,n2,weight=edges[i])
i += 1
return G
def _sort_cliques_by_weights(G, cliques, n_elements):
"""
Sort cliques by their weights.
Parameters
----------
G : networkx.Graph instance
Undirected graph with nodes and edges.
Edges must have attribute 'weight'
cliques : iterable
A list of lists; inner lists must have n_elements members
n_elements : integer
The number of elements in each clique
Returns
-------
cliques : iterable
All cliques sorted by weights in descending order
summed_weights : iterable
The list of summed weights, sorted in the
same descending order as cliques
"""
# compute summed weights for all cliques:
cliques = np.asarray(cliques)
summed_weights = []
for cl in cliques:
#print("cl: " + str(cl))
if len(cl) != n_elements:
ww = 0
else:
ww = 0
for i in range(n_elements):
#print("i: " + str(i))
for j in range(n_elements):
#print("j: " + str(j))
if i >= j:
#print("i >= j, continuing")
continue
else:
ww += G[cl[i]][cl[j]]["weight"]
#print("ww_temp: " + str(ww))
#print("weight: " + str(ww))
summed_weights.append(ww)
# sort cliques from highest weight to smallest
sorted_cliques = cliques[np.argsort(summed_weights)[::-1]]
# sort weights in the same way
#print("summed_weights: " + str(summed_weights))
summed_weights = np.sort(summed_weights)[::-1]
return sorted_cliques[(summed_weights > 0)], summed_weights[(summed_weights > 0)]
class Results(object):
def __init__(self, n_elements):
self.n_elements = n_elements
self.groups = []
self.all_weights = []
self.success = True
self.weights_sum_total = 0
def update_groups(self, groups):
self.groups.append(groups)
def update_weights(self, weights_sum_total):
self.all_weights.append(weights_sum_total)
self.weights_sum_total = np.sum(self.all_weights)
def find_solution(G, n_elements, n_unused=None, results=None):
"""
Sort nodes in G into groups of n_elements members such that
the total sum of weights is maximized.
If the graph includes hard constraints on the relationship between
nodes (i.e. missing edges), it is possible that no solution is found.
In the case of a fully connected graph, the solution will be that
which maximizes the weights. The weights are inherent attributes of
the Graph and must be calculated beforehand (see `add_edges` for details).
Parameters
----------
G : networkx.Graph() instance
Undirected graph with nodes and edges. The edges must have weights
between 0 and 1, but edges can be missing if no relationship exists
between nodes.
groups : iterable
A list of lists containing all groups of n_elements members fulfilling
the connectivity constraints that maximize the sum of weights of all
groups being used.
Should be initialized with an empty list, will be modified during the
recursion to be filled with the groups.
n_elements : integer
The number of elements per group. Must be an integer divisor of the
total number of nodes in the graph.
n_unused : integer
The number of unused nodes in the graph at every recursion step.
If None, then it will be initialized as the total number of nodes
in the graph.
weights_total_sum : list
The total sum of weights of elements in `groups`.
If None, then it will be initialized as an empty list to count
the sum of weights for each individual group. Will be summed at
the end before output into a float value.
Note: DO NOT SET THIS AT THE BEGINNING OF THE RUN!
Returns
-------
success : bool
Flag indicating success or failure of the algorithm
groups: iterable
A list of lists containing all groups of n_elements members fulfilling
the connectivity constraints that maximize the sum of weights of all
groups being used.
weights_total_sum : float
The total sum of all weights of the output groups
"""
assert G.number_of_nodes() % np.float(n_elements) == 0, "Number of sessions must be " + \
"an integer multiple of n_elements"
## initialize results object
if results is None:
results = Results(n_elements)
if n_unused is None:
n_unused = G.number_of_nodes()
# base case
if n_unused == 0:
results.success = True
return results
# recursion
else:
## find all cliques in the graph G
cliques = list(nx.enumerate_all_cliques(G))
## find all cliques that have the required number of elements
cliques = np.array([c for c in cliques if len(c)==n_elements])
## sort cliques by weights
cliques, summed_weights = _sort_cliques_by_weights(G, cliques, n_elements)
## find the total number of cliques with n_elements members
ncliques = len(cliques)
## loop over all cliques:
for g,(cl,ww) in enumerate(zip(cliques, summed_weights)):
cl_topics = [G.nodes()[c] for c in cl]
## add the new clique to the list of output groups
results.update_groups(list(zip(cl, cl_topics)))
## add total weight of the clique:
results.update_weights(ww)
## make a new deep copy for the next recursion step
G_new = copy.deepcopy(G)
## remove clique from graph
for n in cl:
G_new.remove_node(n)
## compute new unused number of nodes
n_unused = G_new.number_of_nodes()
## if no unused nodes are left, return the selected groups,
## otherwise recurse
results = find_solution(G_new, n_elements, n_unused, results)
if results is not None:
if results.success:
return results
## backtrack
else:
results.success = False
results.groups.pop(-1)
results.all_weights.pop(-1)
continue
if len(results.groups) == 0:
print("No solution found!")
results.success = False
return results
else:
results.groups.pop(-1)
results.all_weights.pop(-1)
results.success = False
return results
def find_solution_numerical(G, n_elements, n_unused=None, results=None):
"""
Sort nodes in G into groups of n_elements members such that
the total sum of weights is maximized.
If the graph includes hard constraints on the relationship between
nodes (i.e. missing edges), it is possible that no solution is found.
In the case of a fully connected graph, the solution will be that
which maximizes the weights. The weights are inherent attributes of
the Graph and must be calculated beforehand (see `add_edges` for details).
Parameters
----------
G : networkx.Graph() instance
Undirected graph with nodes and edges. The edges must have weights
between 0 and 1, but edges can be missing if no relationship exists
between nodes.
n_elements : integer
The number of elements per group. Must be an integer divisor of the
total number of nodes in the graph.
n_unused : integer
The number of unused nodes in the graph at every recursion step.
If None, then it will be initialized as the total number of nodes
in the graph.
weights_total_sum : list
The total sum of weights of elements in `groups`.
If None, then it will be initialized as an empty list to count
the sum of weights for each individual group. Will be summed at
the end before output into a float value.
Note: DO NOT SET THIS AT THE BEGINNING OF THE RUN!
Returns
-------
success : bool
Flag indicating success or failure of the algorithm
groups: iterable
A list of lists containing all groups of n_elements members fulfilling
the connectivity constraints that maximize the sum of weights of all
groups being used.
weights_total_sum : float
The total sum of all weights of the output groups
"""
if G.number_of_nodes() % np.float(n_elements) == 0:
print("Caution! Number of sessions is not an integer "
"multiple of the number of parallel slots!")
## initialize results object
if results is None:
results = Results(n_elements)
if n_unused is None:
n_unused = G.number_of_nodes()
## base case
if n_unused == 0:
results.success = True
return results
## recursion
else:
## find all cliques in the graph G
cliques = list(nx.enumerate_all_cliques(G))
## find all cliques that have the required number of elements
cliques = np.array([c for c in cliques if len(c)==n_elements])
## sort cliques by weights
cliques, summed_weights = _sort_cliques_by_weights(G, cliques, n_elements)
## find the total number of cliques with n_elements members
ncliques = len(cliques)
## loop over all cliques:
for g,(cl,ww) in enumerate(zip(cliques, summed_weights)):
cl_topics = [G.nodes()[c] for c in cl]
## add the new clique to the list of output groups
results.update_groups(list(zip(cl, cl_topics)))
## add total weight of the clique:
results.update_weights(ww)
## make a new deep copy for the next recursion step
G_new = copy.deepcopy(G)
## remove clique from graph
for n in cl:
G_new.remove_node(n)
## compute new unused number of nodes
n_unused = G_new.number_of_nodes()
## if no unused nodes are left, return the selected groups,
## otherwise recurse
results = find_solution_numerical(G_new, n_elements, n_unused, results)
if results is not None:
if results.success:
return results
## backtrack
else:
results.success = False
results.groups.pop(-1)
results.all_weights.pop(-1)
continue
# TODO: Need to add something here to figure out which sessions
# have potentially been left out because the number of sessions wasn't
# an integer multiple of the number of slots
if len(results.groups) == 0:
print("No solution found!")
results.success = False
return results
else:
results.groups.pop(-1)
results.all_weights.pop(-1)
results.success = False
return results
|
dhuppenkothen/AstroChairs
|
astrochairs/graphscheduler.py
|
Python
|
mit
| 19,634
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# @return a ListNode
def addTwoNumbers(self, l1, l2):
carry = 0
head = ListNode(0)
l3 = head
while l1 is not None or l2 is not None:
a = 0
b = 0
if l1 is not None:
a = l1.val
l1 = l1.next
if l2 is not None:
b = l2.val
l2 = l2.next
l3.next = ListNode(0)
l3 = l3.next
c = a + b + carry
l3.val = c % 10
carry = c / 10
if carry:
l3.next = ListNode(carry)
return head.next
|
rahul-ramadas/leetcode
|
add-two-numbers/Solution.8447558.py
|
Python
|
mit
| 868
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
from pyquery import PyQuery as pq
import re, os, multiprocessing
from CnbetaApis.datas.Models import *
from urllib.parse import urlparse
from CnbetaApis.datas.get_article_by_id import get_article_by_id, fixSource
from requests import get
from datetime import datetime, timedelta, timezone
from apscheduler.schedulers.blocking import BlockingScheduler
from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore
from apscheduler.executors.pool import ThreadPoolExecutor, ProcessPoolExecutor
def avaiableUrl(title, url):
if len(title) == 0 or len(url) == 0:
return False
if 'cnbeta.com' not in url:
return False
return True
class HomePageContent:
def __init__(self):
self.base_url = "http://www.cnbeta.com"
self.document = None
self.page = 2
self.csrf_token = None
def requestfirst(self):
session = DBSession()
self.document = pq(self.base_url, encoding="utf-8")
self.csrf_token = self.document('meta[name=csrf-token]').attr('content')
topContent = self.document('.topContent .cnbeta-home-hero')
for element in topContent.find('.swiper-slide').items():
title = element('.swiper-title').text()
img = element('.img285x360 img').attr('src')
url = element('a').attr('href')
if not avaiableUrl(title, url):
continue
id = os.path.split(urlparse(url).path)[1].split('.')[0]
session.merge(Article(id=id, title=title, url=url, imgUrl=img))
for element in topContent.find('.cnbeta-home-hero-figures .item').items():
img = element('.figure-img img').attr('src')
title = element('.item-title .figure-title').text()
url = element('.link').attr('href')
if not avaiableUrl(title, url):
continue
id = os.path.split(urlparse(url).path)[1].split('.')[0]
session.merge(Article(id=id, title=title, url=url, imgUrl=img))
for hl in self.document('.cnbeta-headlines').find('.swiper-slide-group').items():
for element in hl('.swiper-slide-content').items():
temp = element('.headline-thumb a')
url = temp.attr('href')
img = temp('img').attr('src')
title = element('h2 a').text()
if not avaiableUrl(title, url):
continue
id = os.path.split(urlparse(url).path)[1].split('.')[0]
rids = ''
for relate in element('ul>li a').items():
rtitle = relate.text()
rurl = relate.attr('href')
if not avaiableUrl(rtitle, rurl):
continue
rid = os.path.split(urlparse(rurl).path)[1].split('.')[0]
relatearticle = Article(id=rid, title=rtitle, url=rurl)
if len(rids):
rids = '%s,' % rids
rids = '%s%s' % (rids, rid)
session.merge(relatearticle)
article = Article(id=id, title=title, url=url, imgUrl=img, related=rids)
session.merge(article)
for element in self.document('.cnbeta-update .cnbeta-update-list .items-area').find('.item').items():
attrElement = element('dl')
url = attrElement('dt>a').attr('href')
title = attrElement('dt>a').text()
if not avaiableUrl(title, url):
continue
id = os.path.split(urlparse(url).path)[1].split('.')[0]
if session.query(Article).filter(Article.id == id).count():
break
introduction = attrElement('dd>p').text()
img = attrElement('a>img').attr('src')
dataElement = element('.meta-data')
# article_type = dataElement('label>a').text()
status = dataElement('li').eq(0).text()
m_result = re.match(r'发布于([0-9\-]+\s[0-9:]+)[\s|]+([0-9]+)次阅读[\s|]+([0-9]+)个意见', status)
createTime = datetime.strptime(m_result.group(1), '%Y-%m-%d %H:%M').replace(tzinfo=timezone(timedelta(hours=8)))
readCount = int(m_result.group(2))
opinionCount = int(m_result.group(3))
rids = ''
for relatedElement in element('.cnbeta-update-list-article ul>li').items():
rtitle = relatedElement('a').text()
rurl = relatedElement('a').attr('href')
if not avaiableUrl(rtitle, rurl):
continue
rid = os.path.split(urlparse(rurl).path)[1].split('.')[0]
relatearticle = Article(id=rid, title=rtitle, url=rurl)
if len(rids):
rids = '%s,' % rids
rids = '%s%s' % (rids, rid)
session.merge(relatearticle)
article = Article(id=id, title=title, url=url, imgUrl=img, related=rids, introduction=introduction, createTime=createTime, readCount=readCount, opinionCount=opinionCount)
session.merge(article)
session.commit()
session.close()
fixContent()
def requestpage(self):
if self.page > 10:
return
session = DBSession()
# 'http://www.cnbeta.com/home/more?&type=all&page=2&_csrf=aW1QdmVNbGFRCR1OXSYPCDskJyAgDwckGwQcPQsVHBAMGGlHDTo4LQ%3D%3D&_=1503134298456'
url = 'http://www.cnbeta.com/home/more?type=all&page=%d&_csrf=%s&_=%s' % (
self.page, self.csrf_token, int(datetime.now().timestamp() * 1000))
getpage = get(url, headers={
'referer': 'http://www.cnbeta.com/',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.101 Safari/537.36'
}).json()
for sigle in getpage['result']['list']:
readCount = sigle['counter']
commentCount = sigle['comments']
intro = pq(sigle['hometext']).text()
createTime = datetime.strptime(sigle['inputtime'], '%Y-%m-%d %H:%M').replace(tzinfo=timezone(timedelta(hours=8)))
sid = sigle['sid']
source = fixSource(sigle['source'])
imgurl = sigle['thumb']
url = sigle['url_show']
title = sigle['title']
if session.query(Article).filter(Article.id == sid).count():
break
rids = ''
if hasattr(sigle, 'relation'):
for rela in sigle['relation']:
rela_url = pq(rela).attr('href')
rela_title = pq(rela).text()
rela_sid = os.path.split(urlparse(rela_url).path)[1].split('.')[0]
if session.query(Article).filter(Article.id == rela_sid).count():
continue
if len(rids):
rids = '%s,' % rela_sid
rids = '%s%s' % (rids, rela_sid)
relatearticle = Article(id=rela_sid, title=rela_title, url=rela_url)
session.merge(relatearticle)
article = Article(id=sid, title=title, url=url, imgUrl=imgurl, related=rids, introduction=intro,
source=source, createTime=createTime, readCount=readCount, opinionCount=commentCount)
session.merge(article)
session.commit()
session.close()
fixContent()
self.page += 1
self.requestpage()
def fixContent():
session = DBSession()
datas = session.query(Article).order_by(desc(Article.id)).filter(Article.haveAll == 0).all()
pool = multiprocessing.Pool(processes=4)
for data in datas:
pool.apply_async(get_article_by_id,(data.url,))
pool.close()
pool.join()
session.close()
def job():
home = HomePageContent()
home.requestfirst()
home.requestpage()
sched = BlockingScheduler()
sched.add_job(job, 'interval', minutes=15)
for j in sched.get_jobs():
j.func()
sched.start()
|
kagenZhao/cnBeta
|
CnbetaApi/CnbetaApis/datas/get_home_models.py
|
Python
|
mit
| 8,089
|
from random import randint
'''
This is an easy copy/paste for creating dicts:
Table = {
'': ,
'': ,
'': ,
'': ,
'': ,
'': ,
'': ,
'':
}
'''
def random_choice(chances_dict):
chances = chances_dict.values()
strings = list(chances_dict.keys())
return strings[random_choice_index(chances)]
def random_choice_index(chances):
dice = randint(1, sum(chances))
running_sum = 0
choice = 0
for w in chances:
running_sum += w
if dice <= running_sum:
return choice
choice += 1
raceTable101 = {
'Human': 14,
'Elf': 2,
'Dwarf': 1,
'Halfling': 1,
'Half Elf': 1,
'other races': 1
}
raceTable101a = {
'Beastman': 3,
'Reptileman': 2,
'Orc': 1,
'Half-Orc': 4
}
cultureTable102 = {
'Primitive': 1,
'Nomad': 2,
'Barbarian': 3,
'Civilized': 3,
'Civilized-decadent': 1
}
def cultureTable102a(culture):
if culture == 'Primitive':
return(-3)
elif culture == 'Nomad':
return(0)
elif culture == 'Barbarian':
return(2)
elif culture == 'Civilized':
return(4)
elif culture == 'Civilized-decadent':
return(7)
def socialTable103(cuMod, tiMod, charCulture):
rand = randint(1,100) + cuMod + tiMod
if rand <= 12:
return('Destitute', -3, '', 0)
elif rand <= 40:
return('Poor', -1, '', 0)
elif rand <=84:
return('Comfortable', 0, '', 0)
elif rand == 85:
socialTable103(0, tiMod, charCulture)
elif rand <= 96:
return('Well-to-do', 2, '', 0)
elif rand <= 98:
rand = randint(1,100)
if rand <= tiMod + 1:
return('Extremely Wealthy', 8, '', 0)
else:
return('Wealthy', 4, '', 0)
elif rand >= 99:
if charCulture == 'Primitive':
nobleTitle = random_choice(nobleTable758prim)
elif charCulture == 'Nomad':
nobleTitle = random_choice(nobleTable758nomad)
elif charCulture == 'Barbarian':
nobleTitle = random_choice(nobleTable758barb)
else:
nobleTitle = random_choice(nobleTable758civil)
tiMod = nobleTable758tiMod(nobleTitle)
# 103a is exactly like 103 but iterates at 99+ rands. Not necessary, but slightly cleaner on debug printing. Otherwise I could just keep iterating through 103 until it didn't roll into circles.
return socialTable103a(cuMod, tiMod, charCulture, nobleTitle)
else:
raise ValueError("You shouldn't ever be able to see this error on socialTable103.")
def socialTable103a(cuMod, tiMod, charCulture, nobleTitle):
rand = randint(1,100) + cuMod + tiMod
if rand <= 12:
return('Destitute', -3, nobleTitle, tiMod)
elif rand <= 40:
return('Poor', -1, nobleTitle, tiMod)
elif rand <=84:
return('Comfortable', 0, nobleTitle, tiMod)
elif rand == 85:
return socialTable103a(0, tiMod, charCulture, nobleTitle)
elif rand <= 96:
return('Well-to-do', 2, nobleTitle, tiMod)
elif rand <= 98:
rand = randint(1,100)
if rand <= tiMod + 1:
return('Extremely Wealthy', 8, nobleTitle, tiMod)
else:
return('Wealthy', 4, nobleTitle, tiMod)
elif rand >= 99:
return socialTable103a(cuMod, tiMod, charCulture, nobleTitle)
def birthTable104(cuMod):
rand = randint(1,20) + cuMod
if rand >= 19:
return(False)
def illegitBirthTable105(cuMod):
rand = randint(1,20) + cuMod
if rand <= 12:
return 'mother was a common prostitute, unmarried'
if rand <= 14:
return 'mother was raped and remained unmarried'
if rand <= 23:
return 'mother was unmarried'
if rand <= 27:
return 'mother was a courtesan'
def familyTable106(cuMod):
rand = randint(1,20) + cuMod
global charSocial
if rand <= 8:
familyInfo = 'Mother and father only'
elif rand <= 12:
familyInfo = 'Extended family. Mother and father, along with ' + str(randint(1,4)) + ' grandparents and ' + str(randint(1,4)) + ' aunts/uncles and cousins'
elif rand <= 13:
rand = randint(1,2)
if rand > 1:
familyInfo = "Grandparents on father's side"
else:
familyInfo = "Grandparents on mother's side"
elif rand <= 14:
rand = randint(1,2)
if rand > 1:
familyInfo = "Single grandparent on mother's side"
else:
familyInfo = "Single grandparent on father's side"
elif rand <= 15:
rand = randint(1,2)
if rand > 1:
familyInfo = "Single aunt or uncle on father's side"
else:
familyInfo = "Single aunt or uncle on mother's side"
elif rand <= 16:
rand = randint(1,2)
if rand > 1:
rand = randint(1,2)
if rand > 1:
familyInfo = "Aunt on father's side"
else:
familyInfo = "Aunt on mother's side"
else:
rand = randint(1,2)
if rand > 1:
familyInfo = "Uncle on father's side"
else:
familyInfo = "Uncle on mother's side"
elif rand <= 18:
familyInfo = "Only a mother"
elif rand <= 19:
familyInfo = "Only a father"
elif rand <= 20:
rand = randint(1,20)
if rand <= 8:
return guardiansTable754(cuMod), True
else:
return familyTable106a(cuMod), True
elif rand <= 24:
charSocial = 'destitute'
familyInfo = 'none, left to fend for yourself'
elif rand <= 27:
charSocial = 'poor'
familyInfo = 'none, raised in an orphanage'
else:
raise ValueError("familyTable106 is reporting a randint error for some weird fucking reason. This shouldn't be possible.")
return familyInfo, False
def familyTable106a(cuMod):
rand = randint(1,20) + cuMod
if rand <= 8:
return 'Mother and father only'
elif rand <= 12:
return 'Extended family. Mother and father, along with ' + str(randint(1,4)) + ' grandparents and ' + str(randint(1,4)) + ' aunts/uncles and cousins'
elif rand <= 13:
rand = randint(1,2)
if rand > 1:
return "Grandparents on father's side"
else:
return "Grandparents on mother's side"
elif rand <= 14:
rand = randint(1,2)
if rand > 1:
return "Single grandparent on mother's side"
else:
return "Single grandparent on father's side"
elif rand <= 15:
rand = randint(1,2)
if rand > 1:
return "Single aunt or uncle on father's side"
else:
return "Single aunt or uncle on mother's side"
elif rand <= 16:
rand = randint(1,2)
if rand > 1:
rand = randint(1,2)
if rand > 1:
return "Aunt on father's side"
else:
return "Aunt on mother's side"
else:
rand = randint(1,2)
if rand > 1:
return "Uncle on father's side"
else:
return "Uncle on mother's side"
elif rand <= 18:
return "Only a mother"
elif rand <= 19:
return "Only a father"
def siblingsTable107():
rand = randint(1,19) #support for 20, just not implementing yet
if rand <= 2:
return 'none', '', ''
elif rand <= 9:
rand = randint(1,3)
elif rand <= 15:
rand = randint(2,4)
elif rand <= 17:
rand = randint(3,6)
elif rand <= 19:
rand = randint(2,8)
#elif rand <= 20:
return siblingsTable107a(rand)
def siblingsTable107a(number): # I rolled table 108 into this one because I got kinda caught up in the speed I was going. Eh, fuck it for now.
siblingMale = 0
siblingFemale = 0
for i in range(number):
rand = randint(1,20)
if rand <= 9:
siblingMale += 1
else:
siblingFemale += 1
totalSiblings = siblingMale + siblingFemale
birthOrder = ""
if totalSiblings == 2:
rand = randint(1,2)
if rand == 1:
birthOrder = 'first born'
else:
birthOrder = 'last born'
elif totalSiblings == 2:
rand = randint(1,3)
if rand == 1:
birthOrder = 'first born'
elif rand == 2:
birthOrder = 'middle born'
else:
birthOrder = 'last born'
else:
rand = randint(1,20)
if rand <= 2:
birthOrder = 'first born'
elif rand <= 10:
birthOrder = 'second born'
elif rand <= 16:
birthOrder = 'middle born'
elif rand <= 18:
birthOrder = 'second-to-last born'
elif rand <= 20:
birthOrder = 'last born'
return siblingMale, siblingFemale, birthOrder
#note to me: I rolled 108 into 107a, that's why it's not here. I know I'll forget this.
def birthTimeTable109():
rand = randint(1,4)
if rand == 1:
birthSeason = 'spring'
elif rand == 2:
birthSeason = 'summer'
elif rand == 3:
birthSeason = 'autumn'
elif rand == 4:
birthSeason = 'winter'
rand = randint(1,8)
if rand == 1:
birthTimeOfDay = 'midnight'
elif rand == 2:
birthTimeOfDay = 'late night'
elif rand == 3:
birthTimeOfDay = 'early morning'
elif rand == 4:
birthTimeOfDay = 'sunrise'
elif rand == 5:
birthTimeOfDay = 'mid-day'
elif rand == 6:
birthTimeOfDay = 'afternoon'
elif rand == 7:
birthTimeOfDay = 'sunset'
elif rand == 8:
birthTimeOfDay = 'early evening'
return birthSeason, birthTimeOfDay
def placeOfBirthTable110():
rand = randint(1,20)
if rand <= 6:
return 'in the family home', -5
elif rand <= 9:
return "in a hospital or healer's hall", -7
elif rand <= 10:
return 'in a carriage while traveling', 1
elif rand <= 11:
return 'in a common barn', 1
elif rand <= 13:
return 'in a foreign land', 2
elif rand <= 14:
return 'in a cave', 5
elif rand <= 15:
return 'in the middle of a field', 1
elif rand <= 16:
return 'in a forest', 2
elif rand <= 24:
return exoticBirthLocationTable111()
def exoticBirthLocationTable111():
rand = randint(1,19) #yep, it's another one of these, I took out roll 14 due to GM-only, will consider adding back in later
if rand <= 2:
return 'double roll thing, will keep noted here for now', 5
elif rand == 3:
return 'in a temple of ' + deitiesTable864(), 5
elif rand == 4:
rand = randint(1,6)
if rand == 6:
return 'in the middle of a battlefield', 8
else:
return 'at a battlefield camp', 8
elif rand == 5:
return 'in an alley', 5
elif rand == 6:
return 'in a brothel', 2
elif rand == 7:
return 'in home of a local ruler', 2
elif rand == 8:
return 'home of the ruler of the country', 5
elif rand == 9:
return 'palace of an evil person or creature', 15
elif rand == 10:
return 'in a tavern', 2
elif rand == 11:
return 'in the sewers', 10
elif rand == 12:
return 'in a thieves den', 5
elif rand == 13:
return 'in the home of friendly nonhumans', 2
elif rand == 14: #I know I'll be looking at this later and confused, but 14 is the one I pruned because it's set as GM ONLY
return 'in the temple of an evil diety', 20
elif rand == 15:
return 'on another plane of reality', 15
elif rand == 16:
return 'in another time period', 10
elif rand == 17:
return 'on a ship at sea', 2
elif rand == 18:
return 'in a prison cell', 9
elif rand == 19:
return "in a wizard's laboratory", 20
def unusualBirthTable112(biMod): #this has been cleaned up, removing the GM selecting portions
rand = randint(1,100) + biMod
if rand <= 60:
return 'Nothing interesting', False
elif rand <= 76:
birthOccurance = [None]*1
birthOccurance[0] = unusualBirthCircumstancesTable113()
elif rand <= 92:
birthOccurance = [None]*2
birthOccurance[0] = unusualBirthCircumstancesTable113()
birthOccurance[1] = unusualBirthCircumstancesTable113()
elif rand <= 97:
birthOccurance = [None]*3
birthOccurance[0] = unusualBirthCircumstancesTable113()
birthOccurance[1] = unusualBirthCircumstancesTable113()
birthOccurance[2] = unusualBirthCircumstancesTable113()
else:
birthOccurance = [None]*4
birthOccurance[0] = unusualBirthCircumstancesTable113()
birthOccurance[1] = unusualBirthCircumstancesTable113()
birthOccurance[2] = unusualBirthCircumstancesTable113()
birthOccurance[3] = unusualBirthCircumstancesTable113()
return birthOccurance, True
def unusualBirthCircumstancesTable113():
rand = randint(1,100)
if rand <= 5:
return "a person of note near the character's home died when they were born"
elif rand <= 10:
return 'wolves and dogs set up a howling'
elif rand <= 20:
return 'mother died in childbirth'
elif rand <= 23:
return 'all glassware in the house shattered'
elif rand <= 25:
return 'all milk in the area soured'
elif rand <= 27:
return 'father believes the character is not his child'
elif rand <= 31:
rand = randint(1,5)
return 'character has identical twin' + ((" that was separated at birth", "")[rand == 5])
elif rand <= 34:
return 'water froze or boiled by itself'
elif rand <= 37:
return 'unnatural weather occurred'
elif rand <= 38:
return 'unnaturally potent storms raged'
elif rand <= 41:
return 'character born at exactly noon'
elif rand <= 44:
return 'character born at exactly midnight'
elif rand <= 48:
return 'a seer declares that the character will be afflicted by an ancient family curse, ' #table 868 be here
elif rand <= 50:
randint(1,10)
return 'a goose laid a golden egg' + ((", which the character still has with them", "")[rand >6])
elif rand <= 53:
return 'the sky darkened like an eclipse'
elif rand <= 55:
return 'the house became infested with poisonous snakes the next day'
elif rand <= 56:
return 'all gold in the house turned to lead'
elif rand <= 57:
return 'all metal in the house was turned into precious metals'
elif rand <= 62:
return 'as an infant, character was left to die on hillside by natural parents'
elif rand <= 64:
return 'character is born immediately after a tragedy, ' #table 528-a-gogo
elif rand <= 69:
return 'character is born with a birthmark' # here be table 866
elif rand <= 75:
return 'character is born with a curse' #868 here
elif rand <= 81:
return 'born with a blessing' #869
elif rand <= 85:
rand = randint(1,2)
return 'character has a fraternal twin, ' + (("male", "female")[rand == 1])
elif rand <= 86:
return 'character is one of a set of identical triplets'
elif rand <= 88:
return 'witch prophesies death of the character' #here be 545
elif rand <= 93:
return 'character born with physical affliction' #874 go hurr
elif rand <= 94:
return 'character born with psychic powers' #873
elif rand <= 99:
return 'a mysterious stranger bestows a gift on the character at birth: ' + giftsTable863()
else:
return 'mother was reputed to be a virgin'
def parentTable114a(charCulture, solMod):
rand = randint(1,20)
if rand <= 12:
return 'Head of household is a ' + occupationsTable420(charCulture, solMod)
elif rand <= 14:
return 'Head of household has two jobs: ' + occupationsTable420(charCulture, solMod)
elif rand <= 16:
return 'Head of household does not work, the other parent does. They work as a ' + occupationsTable420(charCulture, solMod)
elif rand <= 18:
return 'Both parents work. Head of household is a ' + occupationsTable420(charCulture, solMod) + 'and other parent is a ' + occupationsTable420(charCulture, solMod)
elif rand <= 19:
return 'Head of household was an adventurer, ' #table 757
elif rand <= 20:
return 'Head of household does not have an apparent occupation, but money is available when needed.'
def parentTable114b():
rand = randint(1,3)
noteworthyItems = []
for _ in range(rand):
noteworthyItems.append(parentTable114bA)
def parentTable114bA():
rand = randint(1,20)
if rand == 1:
rand = randint(1,6)
if rand <= 3:
return 'noted for a personality trait, ' #647
elif rand <= 5:
return 'noted for a personality trait, ' #648
else:
return 'noted for an exotic personality trait, ' #649
elif rand == 2:
return 'had an unusual birth circumstance, ' + unusualBirthCircumstancesTable113()
elif rand == 3:
return 'devotes time to a hobby, ' #427
elif rand == 4:
return 'possesses an unusual item, ' + random_choice(giftsTable863)
elif rand == 5:
return 'is inventive, creative, and artistic'
elif rand == 6:
return 'affected by an exotic event which they speak of often, ' #544
elif rand == 7:
return 'tells tales of a legendary lost treasure'
elif rand == 8:
rand = randint(1,6)
if rand == 1:
return 'obsessed with a relationship with someone ' # 750
elif rand == 2:
return 'obsessed with an event from their past, ' #215
elif rand == 3:
return 'obsessed with working out of a personality trait, ' #fuck it, need to split here
elif rand == 4:
return 'obsessed with the accomplishment of a motivation, ' #page 8?
elif rand == 5:
return 'obsessed with accomplishing a future event, ' #217
elif rand == 6:
return 'obsessed with preventing a future event, ' #217
elif rand == 9:
return 'has a secret identity as a ' #occupation when I get to it
elif rand == 10:
return 'has a patron, ' #543
elif rand == 11:
return 'is a military veteran, ' + militaryTable535a()
elif rand == 12:
return 'is very religious, worships ' + deitiesTable864()
elif rand == 13:
rand = randint(1,4)
if rand == 1:
return 'does not like to talk about an important event in their past, ' #217
elif rand == 2:
return "does not like to talk about how they're persecuted for " #217
elif rand == 3:
return 'does not like to talk about their past and how important they are to their home town'
elif rand == 4:
return 'refuses to speak about a past event'
elif rand == 14:
rand = randint(1,4)
if rand == 1:
return 'particularly loving towards family'
elif rand == 2:
return 'does not love family or children'
elif rand == 3:
return 'is unfaithful to spouse'
elif rand == 4:
return 'has married more than once, current spouse is number ' + str(randint(1,4))
elif rand == 15:
return 'originally from a different culture'
elif rand == 16:
return 'originally from a different social status'
elif rand == 17:
return 'from a foreign land'
elif rand == 18:
rand = randint(1,5) # it's another re-roll
if rand == 1:
return 'has a rival ' #762
elif rand == 2:
return 'has many enemies ' #roll on 762 a lot
elif rand == 3:
return 'has ' + str(randint(3,13)) + ' close friends living nearby'
elif rand == 4:
return 'has ' + str(randint(2,7)) + ' jilted ex-lovers'
elif rand == 5:
return 'had a companion, ' + companionTable761()
#elif rand == 6:
elif rand == 19:
return 'was horribly wounded, ' #870
elif rand == 20:
return 'noted for extremely unusual personality: ' #649
def childhoodEventsTable215a(solMod):
rand = randint(1,3)
childhoodEvents = []
adolescentEvents = []
for _ in range(rand):
childhoodEvents.append(childhoodEventsTable215b(solMod))
rand = randint(1,3)
for _ in range(rand):
adolescentEvents.append(childhoodEventsTable215b(solMod))
return childhoodEvents, adolescentEvents
def childhoodEventsTable215b(solMod):
rand = randint(1,17) + solMod
if rand == -2:
significantEvent = "all public assistance is terminated due to war. The character's family is involved in the riots in the poorer sectors of towns and villages"
elif rand == -1:
significantEvent = 'while foraging in a trash heap, the character finds ' + giftsTable863()
elif rand == 0:
significantEvent = childhoodEventsTable215b(0)
elif rand == 1:
significantEvent = 'friends involve the character in illegal activities, ' #534
elif rand == 2:
significantEvent = 'a tragedy occurs, ' #528
elif rand == 3:
significantEvent = 'The character learns an unusual skill, ' #876
elif rand == 4:
significantEvent = 'something wonderful occurs: ' #529
elif rand == 5:
significantEvent = "The character learns to be adept at the head of household's occupation. If there is no head of household, then select randomly."
elif rand == 6:
rand = randint(1,9)
significantEvent = 'the character runs away, '
if rand == 1:
significantEvent += 'and never returns'
elif rand == 2:
significantEvent += 'and returns after ' + str(randint(1,8)) + ' days'
elif rand == 3:
significantEvent += 'and returns after ' + str(randint(1,12)) + ' months'
elif rand == 4:
significantEvent += 'and returns after ' + str(randint(1,6)) + ' years'
elif rand == 5:
significantEvent += 'to a distant land'
elif rand == 6:
significantEvent += 'and joins the circus'
elif rand == 7:
significantEvent += 'and falls into the hands of criminals, ' #534
elif rand == 8:
significantEvent += 'and wanders the land, avoiding authorities'
elif rand == 9:
significantEvent += 'and lives with ' + random_choice(nonhumansTable751)
elif rand == 7:
significantEvent = 'character has a religious experience, ' #541
elif rand == 8:
rand = randint(1,6)
if rand == 1:
significantEvent = 'character is loved by parents'
elif rand == 2:
significantEvent = 'character is unloved'
elif rand == 3:
significantEvent = "family has great plans for the character's future"
elif rand == 4:
significantEvent = "family does not approve of character's friends"
elif rand == 5:
significantEvent = "family encourages character's interests"
elif rand == 6:
rand = randint(1,2)
significantEvent = (("mother", "father")[rand == 1]) + ' is distant towards the character'
elif rand == 9:
significantEvent = 'character serves a patron, ' #543
elif rand <= 11:
significantEvent = 'age-specific event'
elif rand == 12:
significantEvent = 'character gains a friend, ' #750
elif rand == 13: #skipped the real 13 and 14 for the time being
significantEvent = 'an exotic event occurs, ' #544
elif rand == 14:
rand = randint(1,2)
significantEvent = "character's parents split up, character stays with " + (("mother", "father")[rand == 1])
elif rand == 15:
rand = randint(1,4)
if rand == 1:
significantEvent = 'character is molested by ' #750
elif rand == 2:
significantEvent = 'a tragedy occurs, ' #528
elif rand == 3:
significantEvent = 'character angers an old woman who puts a curse on them, ' #868
elif rand == 4:
significantEvent = 'character acquires a rival, ' #762
elif rand == 16:
rand = randint(1,4)
if rand == 1:
significantEvent = 'character inherits a large sum of money'
elif rand == 2:
significantEvent = 'a fairy blesses the character as a reward for a good deed, ' #869
elif rand == 3:
significantEvent = 'something wonderful occurs, ' #529
elif rand == 4:
significantEvent = 'character acquires a companion, ' #761
elif rand == 17:
significantEvent = 'age-specific event occurs, ' #216a/216b
elif rand == 18:
significantEvent = 'character develops jaded tastes for exotic and expensive pleasures'
elif rand == 19:
significantEvent = childhoodEventsTable215b(0)
elif rand == 20:
significantEvent = "rivals force the character's family to move somewhere new"
elif rand == 21:
significantEvent = 'something wonderful occurs, ' #529
elif rand == 22:
significantEvent = 'a tragedy occurs, ' #528
elif rand == 23:
significantEvent = 'character is betrothed in a political marriage'
elif rand == 24:
significantEvent = 'head of household is made a close advisor of a local ruler'
elif rand == 25: #also skipped 25
significantEvent = 'family travels widely, visiting several foreign lands'
elif rand == 26:
significantEvent = 'a tutor teaches the character an unusual skill, ' #876
elif rand == 27:
significantEvent = 'family throws extravagant birthday party for character. Character acquires special gift, ' + giftsTable863()
elif rand == 28:
significantEvent = 'character exhibits exotic personality, ' #649
elif rand == 29:
significantEvent = 'family gives character ' + str(randint(1,10)) + ' slaves to do with as they see fit'
elif rand == 30:
significantEvent = 'family gives character personal estate with ' + str(randint(1,10)) + ' square miles of property'
return significantEvent
def childhoodEventsTable216a():
rand = randint(1,20)
if rand == 1:
return 'a neighbor schools the character, improving their literacy'
elif rand == 2:
return 'character becomes emotionally attached to a toy for ' + str(randint(2,20)) + ' years'
elif rand == 3:
return 'character has a collection of related things, such as rocks, dolls, animal skulls, etc.'
elif rand == 4:
return 'character has a close friendship with a sibling or cousin'
elif rand == 5:
return 'character has an imaginary friend'
elif rand == 6:
return 'character is a child prodigy with an unusual skill, ' #876
elif rand == 7:
return 'character learns use of a weapon appropriate to their culture and social status'
elif rand == 8:
return 'character and a friend discover secret hiding place near home'
elif rand == 9:
return 'character becomes proficient at a sporting event'
elif rand == 10:
return 'friend of the family, an old warrior, tells the character tales of adventure'
elif rand == 11:
return 'character becomes well-known for the occurance of an event in their life, ' + childhoodEventTables215(0)
elif rand == 12:
rand = randint(1,10)
return "one of the character's grandparents dies of natural causes in the presence of the character " + (("that grandparent entrusts the character with a secret", "")[rand < 7])
elif rand == 13:
return 'character witnesses a crime being committed by ' + str(randint(1,4)) + ' people. They are unable to catch him. The crime is ' #875
elif rand == 14:
return 'race specific event, fuck'
elif rand == 15:
return 'an exotic event occurs, ' #544
elif rand == 16:
return 'character discovers they are a near exact twin of a young noble, ' + random_choice(nobleTable758civil)
elif rand == 17:
return 'a tragedy occurs, ' #528
elif rand == 18:
return 'something wonderful occurs, ' #529
elif rand == 19:
return childhoodEventsTable216b()
elif rand == 20:
return 'character acquires a hobby, ' #427
def childhoodEventsTable216b():
rand = randint(1,19)
if rand == 1:
return 'character learns to use weapon appropriate to their culture and social status'
elif rand == 2:
return 'to be fashionable, character gets tatto on their face, ' #866
elif rand <= 4:
return 'apprenticed to learn an occupation, ' #419
elif rand == 5:
return 'a wizard teaches the character a simple spell'
elif rand == 6:
eventPresuppose = 'character is accused of a crime which they did not commit (' #875
rand = randint(1,6)
if rand == 1:
return eventPresuppose + ', is imprisoned, ' #540 here
elif rand == 2:
return eventPresuppose + ', is stockaded and flogged publicly as an example to others'
elif rand == 3:
rand = randint(1,3)
eventPresuppose = eventPresuppose + ', is tortured to reveal the names of accomplices'
if rand == 3:
eventPresuppose = eventPresuppose #870
elif rand == 4:
return eventPresuppose + ', is found innocent, but not before being humiliated'
elif rand == 5:
return eventPresuppose + ', is sentenced to death, but is rescued by outlaws.' #534 goes here
elif rand == 6:
return eventPresuppose + ', is sold into slavery: ' + enslavedTable539()
elif rand == 7:
return 'character learns an unusual skill, ' #876
elif rand == 8:
return 'character learns a hobby, ' #427
elif rand == 9:
return "character learns head of household's occupation"
elif rand == 10:
eventPresuppose = 'character joins the military ' + militaryTable535(charCulture, solMod)
rand = randint(1,4)
if rand == 1:
eventPresuppose = eventPresuppose + 'because they were drafted, '
elif rand == 2:
eventPresuppose = eventPressuppose + 'because they patriotically volunteered, '
elif rand == 3:
eventPresuppose = eventPresuppose + 'because they forced to, '
elif rand == 4:
eventPresuppose = eventPresuppose + 'on mistake, '
eventPresuppose = eventPresuppose + 'Info: '
elif rand == 11:
rand = randint(1,5)
if rand == 5:
return 'character participated in a successful rebellion'
else:
rand = randint(1,10)
if rand != 10:
return 'character participated in a failed rebellion, but only a few close friends knew of it'
else:
return 'character was known to participate in a failed rebellion and is now an outlaw'
elif rand == 12:
return 'character becomes famous for ' + childhoodEventTables215b()
elif rand <= 14:
return 'character has a romantic encounter, ' #542
elif rand == 15:
return 'character learns to speak another language'
elif rand == 16:
return 'race specific event. fuck.'
elif rand == 17:
return 'an exotic event occurs, ' #544
elif rand == 18:
return 'a tragedy occurs, ' #528
elif rand == 19:
return 'something wonderful occurs, ' #529
# elif rand == 20:
def adulthoodSignificantEventsTable217(charSocial, solMod, charCulture):
rand = randint(1,3)
adulthoodEvents = []
for _ in range(rand):
adulthoodEvents.append(adulthoodSignificantEventsTable217a(charSocial, solMod, charCulture))
return adulthoodEvents
def adulthoodSignificantEventsTable217a(charSocial, solMod, charCulture):
rand = randint(2,39) + solMod
if rand == -1:
return 'while foraging or hunting for food, the character saves a trapped predatory beast. Later, the same beast saves the character.'
elif rand == 0:
return 'to earn a living, the character learns a new occupation: ' + occupationsTable420(charsocial, solMod)
elif rand <= 2:
return 'something wonderful occurs, ' #529
elif rand <= 4:
return 'a tragedy occurs, ' #528
elif rand == 5:
return 'character learns an unusual skill, ' #876
elif rand == 6:
rand = randint(1,5)
if rand == 5:
return 'character participated in a successful rebellion'
else:
rand = randint(1,10)
if rand != 10:
return 'character participated in a failed rebellion, but only a few close friends knew of it'
else:
return 'character was known to participate in a failed rebellion and is now an outlaw'
elif rand == 7:
return 'Character serves a patron, ' #543
elif rand == 8:
eventPresuppose = 'Character has wanderlust and decides to travel for ' + str(randint(1,6)) + ' years. During this time, the character '
rand = randint(1,6)
if rand == 1:
eventPresuppose = eventPresuppose + 'visits major cities and towns in the land.'
elif rand == 2:
eventPresuppose = eventPresuppose + 'signs on as a seaman on a ship.'
elif rand == 3:
eventPresuppose = eventPresuppose + 'journeys through mountains.'
elif rand == 4:
eventPresuppose = eventPresuppose + 'investigates nearby dark woods.'
elif rand == 5:
eventPresuppose = eventPresuppose + 'travels to a distant land, learning a foreign language.'
elif rand == 6:
eventPresuppose = eventPresuppose + 'lives with nonhumans, ' + random_choice(nonhumansTable751)
return eventPresuppose
elif rand <= 10:
return 'character has a religious experience, ' #541
elif rand == 11:
return "character saves someone's life, and that person becomes the character's companion: " #761
elif rand <= 13:
return 'race-specific event. fuck.'
elif rand == 14: #skipping 14
return 'an exotic event occurs, ' #544
elif rand == 15:
return 'character learns to use a weapon appropriate to their culture and social status'
elif rand == 16:
rand = randint(1,3)
if rand == 1:
return 'a tragedy occurs, ' #528
elif rand == 2:
return 'the character angers an old woman, who curses them, ' #868
elif rand == 3:
return 'character acquires a rival, ' #762
elif rand == 17:
rand = randint(1,3)
if rand == 1:
return 'an old man whom the character rescues blsses the character, ' #869
elif rand == 2:
return 'something wonderful occurs, ' #529
elif rand == 3:
return 'character acquires a companion, ' #761
elif rand == 18:
return 'character becomes well-known for ' + adulthoodSignificantEventsTable217a(charSocial, solMod, charCulture)
elif rand == 19:
return 'character develops an exotic personality trait, ' #649
elif rand == 20:
return 'character inherits property from a relative' #863 sub-table???
elif rand <= 22: #22 regular was skipped, so this should be 23-24
return 'character becomes involved in illegal activities, ' #534a
elif rand == 23:
return 'character learns to use an unusual weapon'
elif rand <= 26:
eventPresuppose = 'character joins the military because '
rand = randint(1,4)
if rand == 1:
eventPresuppose = eventPresuppose + 'they were drafted, '
elif rand == 2:
eventPresuppose = eventPresuppose + 'they patriotically volunteered, '
elif rand == 3:
eventPresuppose = eventPresuppose + 'forced to, '
elif rand == 4:
eventPresuppose = eventPresuppose + 'on mistake, '
return eventPresuppose + 'Info: ' + militaryTable535(charCulture, solMod)
elif rand <= 30:
return 'character has a romantic encounter, ' #542
elif rand == 31:
return 'character acquires a hobby, ' #427
elif rand == 32:
return 'character develops jaded tastes for exotic and expensive pleasures'
elif rand <= 34:
eventPresuppose = 'character is accused of a crime which they did not commit (' #875
rand = randint(1,6)
if rand == 1:
return eventPresuppose + ', is imprisoned, ' #540 here
elif rand == 2:
return eventPresuppose + ', is stockaded and flogged publicly as an example to others'
elif rand == 3:
rand = randint(1,3)
eventPresuppose = eventPresuppose + ', is tortured to reveal the names of accomplices'
if rand == 3:
eventPresuppose = eventPresuppose #870
elif rand == 4:
return eventPresuppose + ', is found innocent, but not before being humiliated'
elif rand == 5:
return eventPresuppose + ', is sentenced to death, but is rescued by outlaws.' #534 goes here
elif rand == 6:
return eventPresuppose + ', is sold into slavery: ' + enslavedTable539() + ')'
return eventPresuppose
elif rand <= 36: #skipping 37-38, as well as 39
return 'character learns an occupation, ' + occupationsTable420(charSocial, solMod)
elif rand <= 39:
return adulthoodSignificantEventsTable217a(charSocial, solMod+5, charCulture)
elif rand == 40:
return 'character is made close advisor to a local ruler'
elif rand <= 43:
return 'character develops an exotic personality trait, ' #649
elif rand <= 45:
return "family sends character a personal servant who refuses to leave the character's service. The servant becomes a companion: " #761
elif rand <= 48:
return 'a ruler with slightly lower social status than the character proposes marriage. The marriage is obviously political in nature.'
elif rand <= 55:
return 'a radical change in political structure strips the character of all land and nobility.'
def apprenticeshipsTable419a():
rand = randint(1,3)
if rand == 1:
return random_choice(craftsTable424a)
elif rand == 2:
return random_choice(craftsTable424b)
elif rand == 3:
return random_choice(craftsTable424c)
def apprenticeshipsTable419b():
rand = randint(1,9) #again, not doing the re-roll shit
if rand == 1:
return "character's master is known for their strong personality"
elif rand == 2:
return "character manages to accidentally break the master's valuable collection of ceramic pots. For this, he's expelled."
elif rand == 3:
return "Character stumbles upon a lost secret of the craft, which his master takes credit for."
elif rand == 4:
return "Character continues to study the craft with his master for an extra " + str(randint(1,6)) + " years."
elif rand == 5:
return "The character discovered that the master's shop is a front for a criminal network."
elif rand == 6:
return "The master is world-renowned in their craft."
elif rand == 7:
rand = randint(1,2)
return "A " + (("female", "male")[rand == 1]) + " apprentice becomes best friends with the character. That person would later become a master of the craft."
elif rand == 8:
return "An exotic event occurs, affecting the character's master: " #544
elif rand == 9:
return "Character accompanies their master on a long journey: " + adulthoodSignificantEventsTable217a(randint(1,5))
def occupationsTable420(charCulture, solMod=0):
if charCulture == "Primitive":
return primitiveOccupationsTable420a()
elif charCulture == "Nomad":
return nomadOccupationsTable421a()
elif charCulture == "Barbarian":
return barbarianOccupationsTable422a()
else:
return civilizedOccupationsTable423a(solMod)
def primitiveOccupationsTable420a():
rand = randint(1,20)
if rand <= 9:
return 'fisherman'
elif rand <= 18:
return 'hunter'
elif rand <= 19:
return 'warrior'
elif rand == 20:
return primitiveOccupationsTable420b()
def primitiveOccupationsTable420b():
rand = randint(1,4)
if rand == 1:
return 'shaman'
elif rand == 2:
return 'basket weaver'
elif rand == 3:
return 'artist'
elif rand == 4:
return 'toolmaker'
def nomadOccupationsTable421a():
rand = randint(1,20)
if rand <= 2:
return random_choice(craftsTable424a)
elif rand <= 12:
return 'herder'
elif rand <= 16:
return 'hunter'
elif rand <= 18:
return 'warrior'
elif rand == 19:
return 'merchant'
elif rand == 20:
return nomadOccupationsTable421b()
def nomadOccupationsTable421b():
rand = randint(1,10)
if rand == 1:
return 'priest'
elif rand == 2:
return 'healer'
elif rand == 3:
return 'adventurer, ' #757
elif rand == 4:
return 'career criminal, ' #755
elif rand == 5:
return 'tentmaker'
elif rand == 6:
return 'weapon master'
elif rand == 7:
return 'counselor/philosopher'
elif rand == 8:
return '423a'
elif rand == 9:
return 'horsemaster'
elif rand == 10:
return 'entertainer'
def barbarianOccupationsTable422a():
rand = randint(1,20)
if rand <= 2:
return random_choice(craftsTable424a)
elif rand <= 8:
return 'farmer'
elif rand <= 11:
return 'fisherman'
elif rand <= 13:
return 'herder'
elif rand <= 15:
return 'hunter'
elif rand <= 17:
return 'warrior'
elif rand == 18:
return random_choice(craftsTable424b)
elif rand == 19:
return merchantsTable425(0)
elif rand == 20:
return barbarianOccupationsTable422b()
def barbarianOccupationsTable422b():
rand = randint(1,20)
if rand <= 7:
return civilizedOccupationsTable423a()
elif rand <= 9:
return 'priest'
elif rand == 10:
return 'healer'
elif rand == 11:
return 'adventurer, ' #757
elif rand == 12:
return 'ship builder'
elif rand == 13:
return 'career criminal, ' #755
elif rand == 14:
return 'wizard, witch, or warlock'
elif rand == 15:
return 'counselor'
elif rand == 16:
return 'horsemaster'
elif rand == 17:
return 'explorer'
elif rand == 18:
return 'entertainer'
elif rand == 19:
return 'forester'
elif rand == 20:
return random_choice(craftsTable424c)
def civilizedOccupationsTable423a(solMod):
rand = randint(1,10) + solMod
if rand == -2:
return nomadOccupationsTable421a()
elif rand <= 5:
return civilizedOccupationsTable423b()
elif rand == 6:
return barbarianOccupationsTable422a()
elif rand == 7:
return civilizedOccupationsTable423e()
elif rand <= 11:
return civilizedOccupationsTable423c()
elif rand <= 14:
return civilizedOccupationsTable423d()
elif rand == 15:
return civilizedOccupationsTable423e()
elif rand <= 23:
return civilizedOccupationsTable423d()
def civilizedOccupationsTable423b():
rand = randint(1,20)
if rand == 1:
return 'beggar'
elif rand <= 6:
rand = randint(1,4)
if rand == 1:
return 'freeman farmer'
elif rand == 2:
return 'herder'
elif rand == 3:
return 'sharecropper'
elif rand == 4:
return 'serf'
elif rand <= 7:
return 'tinker'
elif rand <= 8:
return 'sailor'
elif rand <= 10:
rand = randint(1,6)
if rand == 1:
return 'miner'
elif rand == 2:
return 'stone cutter'
elif rand == 3:
return 'wood cutter'
elif rand == 4:
return 'charcoal burner'
elif rand == 5:
return 'peat cutter'
elif rand == 6:
return 'unskilled laborer'
elif rand <= 11:
return 'launderer'
elif rand <= 14:
return 'fisherman'
elif rand <= 15:
rand = randint(1,6)
if rand == 1:
return 'butler'
elif rand == 2:
return 'cook'
elif rand == 3:
return 'housekeeper'
elif rand == 4:
return 'gardener'
elif rand == 5:
return 'stable hand'
elif rand == 6:
return 'footman'
elif rand <= 16:
rand = randint(1,4)
if rand == 1:
return 'bartender'
elif rand == 2:
return 'serving person'
elif rand == 3:
return 'housekeeper'
elif rand == 4:
return 'bouncer'
elif rand <= 17:
return 'street vendor'
elif rand <= 18:
return 'soldier' + militaryTable535(charCulture, solMod)
elif rand == 19:
return random_choice(craftsTable424a)
elif rand == 20:
return 'second hand shop'
def civilizedOccupationsTable423c():
rand = randint(1,20)
if rand == 1:
return 'money lender'
elif rand <= 5:
return 'merchant'
elif rand <= 6:
return 'business owner, ' + civilizedOccupationsTable423b()
elif rand <= 8:
return random_choice(craftsTable424b)
elif rand <= 9:
rand = randint(1,4)
if rand == 1:
return 'weapon instructor'
elif rand == 2:
return 'unusual skill instructor, ' #876
elif rand == 3:
rand = randint(1,4)
if rand == 1:
addIt = 'combat skills'
elif rand == 2:
addIt = 'horse skills'
elif rand == 3:
addIt = 'forestry skills'
elif rand == 4:
addIt = 'naval skills'
return 'military instrutor in ' + addIt
elif rand == 4:
rand = randint(1,3)
if rand == 1:
random_choice(craftsTable424a)
elif rand == 2:
random_choice(craftsTable424b)
elif rand == 3:
random_choice(craftsTable424c)
elif rand <= 10:
return 'government official, ' #752
elif rand <= 11:
return random_choice(craftsTable424a)
elif rand <= 12:
return 'chef'
elif rand <= 13:
return 'an overseer of ' + civilizedOccupationsTable423a(solMod)
elif rand <= 14:
return 'innkeeper'
elif rand <= 15:
return 'scribe'
elif rand <= 16:
return 'guide/pilot'
elif rand <= 17:
return 'ship captain (not own ship)'
elif rand <= 18:
return 'engineer'
elif rand <= 19:
return 'teacher'
elif rand <= 20:
return 'tavern owner'
def civilizedOccupationsTable423d():
rand = randint(1,20)
if rand == 1:
return 'alchemist'
elif rand == 2:
return 'engineer'
elif rand == 3:
return 'architect'
elif rand == 4:
return 'chlurgeon'
elif rand <= 7:
return merchantsTable425(0)
elif rand == 8:
return 'craftsmen' #424c
elif rand == 9:
return 'courtier/courtesar'
elif rand == 10:
return 'diplomat'
elif rand == 11:
return 'author/playwrite/poet'
elif rand == 12:
return 'litigation trickster'
elif rand == 13:
return 'philosopher'
elif rand == 14:
return 'crafter' #424b
elif rand == 15:
return 'interpreter'
elif rand == 16:
return 'government official' #752
elif rand == 17:
return 'banker'
elif rand == 18:
return 'business owner: ' + civilizedOccupationsTable423a()
elif rand == 19:
return 'landlord'
elif rand == 20:
return 'craftmaster' #lots of shit here
def civilizedOccupationsTable423e():
rand = randint(1,20)
if rand == 1:
return 'assassin'
elif rand == 2:
return 'gladiator'
elif rand == 3:
return 'adventurer, ' #757
elif rand == 4:
return 'priest' #541b
elif rand == 5:
return 'wizard'
elif rand == 6:
return 'jack of all trades: ' + civilizedOccupationsTable423a() + ' and ' + civilizedOccupationsTable423a() + ' and ' + civilizedOccupationsTable423a()
elif rand == 7:
return 'career criminal, ' #755
elif rand == 8:
return 'entertainer'
elif rand == 9:
return 'printer'
elif rand == 10:
return 'private detective or spy'
elif rand == 11:
return 'professional guild thief, ' #534
elif rand == 12:
return 'astrologer/diviner/fortune teller'
elif rand == 13:
return 'rumormonger'
elif rand == 14:
rand = randint(1,4)
if rand == 1:
return 'doomsayer'
elif rand == 2:
return 'oracle'
elif rand == 3:
return 'hermit'
elif rand == 4:
return 'seer'
elif rand == 15:
return 'charlot or horse racer'
elif rand == 16:
return 'professional gambler'
elif rand == 17:
return 'healer/herbalist'
elif rand == 18:
return 'scientist'
elif rand == 19:
return 'veterinarian'
elif rand == 20:
return 'ship builder'
craftsTable424a = {
'blacksmith': 1,
'potter': 1,
'weaver': 1,
'stone mason': 1,
'baker': 1,
'butcher': 1,
'carpenter': 1,
'tanner': 1,
'rope and net maker': 1,
'leather worker': 1,
'cobbler': 1,
'painter': 1,
'spinner': 1,
'dyer': 1,
'fletcher': 1,
'sailmaker': 1,
'saddle and riding harness maker': 1
}
craftsTable424b = {
'shipwright': 1,
'wheel/cartwright': 1,
'distiller': 1,
'fuller': 1,
'sign painter': 1,
'chandler': 1,
'miller': 1,
'armor smith': 1,
'sausage maker': 1,
'brewer': 1,
'animal trainer': 1,
'plasterer': 1,
'glazier': 1,
'tailor': 1,
'copper and pewter smith': 1,
'glassblower': 1,
'cabinet maker': 1,
'weapon master': 1,
'dress maker': 1,
'sword-dancer': 1
}
craftsTable424c = {
'silver smith': 1,
'costumer': 1,
'goldsmith': 1,
'jeweler': 1,
'instrument maker': 1,
'clock maker': 1,
'cartographer': 1,
'perfumer': 1,
'animal trainer': 1,
'apothecary': 1,
'furrier': 1,
'horse breeder': 1,
'artist': 1,
'wine maker': 1,
'oculist': 1,
'pastry chef': 1,
'confectioner': 1,
'paper and ink maker': 1,
'sword smith': 1,
'illuminator': 1
}
def merchantsTable425(solMod):
rand = randint(1,16)
if rand == 0:
return 'pawnshop'
elif rand == 1:
return 'caravan master'
elif rand == 2:
return 'tavernkeeper'
elif rand == 3:
return 'trader'
elif rand == 4:
return 'innkeeper'
elif rand == 5:
return 'dry goods seller'
elif rand == 6:
return 'curio merchant'
elif rand == 7:
return 'snake oil salesman'
elif rand == 8:
return 'book seller'
elif rand == 9:
return 'clothing seller'
elif rand == 10:
return 'weapon shop'
elif rand == 11:
return 'fishmonger'
elif rand == 12:
return 'green grocer'
elif rand == 13:
return 'wine merchant'
elif rand == 14:
return 'importer'
elif rand == 15:
return 'furniture dealer'
elif rand == 16:
return 'slaver'
elif rand == 17:
return 'carpets & tapestries'
elif rand == 18:
return 'livestock trader'
elif rand == 19:
return 'shipping agent'
elif rand == 20:
return 'silk merchant'
elif rand == 21:
return 'art dealer'
elif rand == 22:
return 'gem merchant'
elif rand == 23:
return 'real estate broker'
elif rand == 24:
return 'lumber merchant'
elif rand == 28:
return 'master merchant: ' + merchantsTable425(6) + ', ' + merchantsTable425(6) + ', ' + merchantsTable425(6)
def hobbiesTable427():
rand = randint(1,20)
if rand == 1:
hobby = 'collecting something'
elif rand == 2:
hobby = 'dancing'
elif rand == 3:
hobby = 'playing a musical instrument'
elif rand == 4:
hobby = 'reading for enjoyment'
elif rand == 5:
hobby = 'writing creatively'
elif rand == 6:
hobby = 'acting'
elif rand == 7:
hobby = 'drawing or painting'
elif rand == 8:
hobby = 'needlework'
elif rand == 9:
hobby = 'singing'
elif rand == 10:
rand = randint(1,8)
if rand == 1:
hobby = 'studying history'
elif rand == 2:
hobby = 'studying religion'
elif rand == 3:
hobby = 'studying art'
elif rand == 4:
hobby = 'studying astronomy'
elif rand == 5:
hobby = 'studying astrology'
elif rand == 6:
hobby = 'studying other cultures'
elif rand == 7:
hobby = 'studying magic'
elif rand == 8:
hobby = 'studying weapons'
elif rand == 11:
rand = randint(1,8)
if rand == 1:
hobby = 'wrestling'
elif rand == 2:
hobby = 'running'
elif rand == 3:
hobby = 'fencing'
elif rand == 4:
hobby = 'team ball sport'
elif rand == 5:
hobby = 'horse racing'
elif rand == 6:
hobby = 'swimming'
elif rand == 7:
hobby = 'archery'
elif rand == 8:
hobby = 'boxing'
elif rand == 12:
hobby = 'building detailed models'
elif rand == 13:
hobby = 'developing appreciation of the arts'
elif rand == 14:
hobby = 'hairdressing and cosmetics'
elif rand == 15:
hobby = 'hunting for sport'
elif rand == 16:
hobby = 'gardening'
elif rand == 17:
hobby = 'breeding dogs'
elif rand == 18:
hobby = 'animal husbandry'
elif rand == 19:
hobby = 'fishing for sport'
elif rand == 20:
hobby = 'heraldry'
return hobby + ' (interest: ' + hobbiesTable427c() + ')'
def hobbiesTable427c():
rand = randint(1,10)
if rand <= 2:
return 'casual'
elif rand <= 7:
return 'sporadic and variable'
elif rand <= 9:
return 'devoted'
elif rand == 10:
return 'consuming passion'
def tragediesTable528():
global solMod
rand = randint(1,19) + solMod
if rand == -2:
return 'wild beasts attack. Character is injured ' #870 + #753
elif rand == -1:
return tragediesTable528() #should re-roll without solMod, but fuck it for now
elif rand == 0:
return 'imprisoned for a crime the character did not commit, ' #875, #540
elif rand == 1:
return tragediesTable528() #this should be different as well
elif rand == 2:
return 'parents/guardians unable to pay their taxes, ' #546
elif rand == 3:
return 'a favorite pet dies' #requires 750
elif rand == 4:
return 'orphaned! ' #546
elif rand == 5:
tragedy = 'place character lives is wiped out by '
rand = randint(1,6)
if rand == 1:
return tragedy + ' a deadly disease'
elif rand <= 3:
return tragedy + ' a terrible fire'
elif rand <= 5:
return tragedy + ' war'
elif rand == 6:
return tragedy #750 go hurr
elif rand == 6:
return 'character is responsible for a death, ' #750 and 545
elif rand == 7:
return 'orphaned! ' #546
elif rand == 8: #skipping the original 8
tragedy = 'a favorite possession is '
rand = randint(1,6)
if rand <= 3:
return tragedy + ' lost'
elif rand <= 5:
return tragedy + ' stolen'
elif rand == 6:
return tragedy + ' stolen, with a fake left in its place'
elif rand == 9:
rand = randint(1,6)
if rand <= 3:
tragedy1 = 'father is outlawed due to '
elif rand == 4:
tragedy1 = 'mother is outlawed due to '
elif rand <= 6:
tragedy1 = 'parents are both outlawed due to '
return tragedy1 #875
elif rand == 10:
return 'character sold into slavery, ' + enslavedTable539()
elif rand == 11:
rand = randint(1,8)
if rand <= 4:
tragedy1 = 'an accident'
if rand == 5:
tragedy1 = 'a terrible fire'
if rand == 6:
tragedy1 = 'an animal attack'
if rand <= 8:
tragedy1 = 'an attack by ' #750
return 'character receives a severe injury due to ' + tragedy1
elif rand == 12:
rand = randint(1,2)
if rand == 1:
tragedy = 'father was killed by '
elif rand == 2:
tragedy = 'mother was killed by '
rand = randint(1,3)
if rand <= 2:
return tragedy + 'an accident'
elif rand == 3:
return tragedy #750 and 545
elif rand == 13:
return 'character is banned from performing their profession and is cast out of guilds, their identity is known and they cannot continue to practice in the nearby vicinity'
elif rand == 14:
return 'if character has a lover, roll on this table' #I'll be elaborating on this once the basics of everything are written out
elif rand == 15:
return 'a disease almost kills the character and leaves horrible scars'
elif rand == 16:
return "war ravages the character's homeland, forcing them to join the military: " + militaryTable535(charCulture, solMod)
elif rand == 17:
return "a fire destroys the character's home, along with all of their personal belongings"
elif rand == 18:
return "character is cursed " #868
elif rand == 19:
return "character's best friend dies" #545
elif rand == 20:
preSuppose = "family estate destroyed by "
rand = randint(1,6)
if rand == 1:
return preSuppose + ''
elif rand <= 3:
return preSuppose + 'a terrible fire'
elif rand == 4:
return preSuppose + 'an unexplainable accident'
elif rand == 5:
return preSuppose + 'war'
elif rand == 6:
return preSuppose + "someone's actions" #750
elif rand == 21:
return 'imprisoned for a crime the character did not commit, ' #875, #540
elif rand == 22:
return tragediesTable528() #reroll is supposed to be without solMod, will fuck around with later
elif rand == 23:
return "character's family loses all wealth"
elif rand == 24:
return 'character is disinherited by parents'
elif rand <= 26:
return 'character is forced into an unwanted political marriage.'
elif rand <= 28:
return "a shift in economy causes severe inflation, causing wealth of character's family to drop to a tenth of what it was"
elif rand <= 30:
return tragediesTable528() #reroll is supposed to be without solMod, will fuck around with later
elif rand == 31:
return "source of character or character's family's income is destroyed or lost"
elif rand == 32:
rand = randint(1,6)
return "character's family is stripped of all titles and lands by the ruler of the land" + (("", ", the family is outlawed")[rand == 6])
def wonderfulTable529(solMod):
rand = randint(1,20) + solMod
if rand <= -2:
return "Wild beasts attack the character's camp. The character discovers they have the innate ability to command wild beasts."
elif rand <= -1:
return "The ruler pardons all prisoners of the land."
elif rand <= 1:
rand = randint(1,2)
return "If the character has a lover or spouse, they are blessed with a beautiful, healthy " (("boy", "girl"), rand == 1) + "."
elif rand <= 2:
return "While repairing the family home, the character discovers a " + giftsTable863()
elif rand <= 3:
return "Character acquires a " #759
elif rand <= 4:
return "Character is adopted into a wealthy family, treated well."
elif rand <= 5:
return "The village the character lives in is destroyed, but is rebuilt and becomes more prosperous than ever."
elif rand <= 6:
return "The character is responsible for saving the life of " #750
elif rand <= 9:
return wonderfulTable529(0)
elif rand <= 10:
return "An evil lcoal ruler outlaws the character's parents. " + str(randint(1,10)) + " years later, the ruler's liege overthrows them. The character's parents are pardoned and honored with elevation to nobility due to their role in the ruler's demise."
elif rand <= 11:
return ""
elif rand <= 12:
return
elif rand <= 14:
return
elif rand <= 16:
return
elif rand <= 18:
return
elif rand <= 19:
return
elif rand <= 20:
return
elif rand <= 21:
return
elif rand <= 23:
return
elif rand <= 25:
return
elif rand <= 27:
return
elif rand <= 29:
return
elif rand <= 32:
return
elif rand <= 33:
return
#skipping race-specific events for now, which is 530-533
def underworldTable534():
beginning = random_choice(underworldTable534a)
crimeType = underworldTable534b()
crimeEvent = underworldTable534c()
underworldTable534a = {
'character needs money to pay debts': 1,
'peer pressure forces the character to do criminal acts': 1,
'character has a pathological urge to do wrong': 1,
'character wants to defy authority': 1,
"character wants to live a lifestyle they can't afford": 1,
'character seeks a lifestyle filled with thrills and excitement': 1,
'character seeks to wield power in the crime world': 1,
'character is forced into a life of crime by cirminals who threaten their loved ones': 1
}
def underworldTable534b():
rand = randint(1,6)
if rand == 1:
return 'petty theft'
elif rand == 2:
return 'organized guild thievery'
elif rand == 3:
return 'organized crime: ' #875
elif rand == 4:
rand = randint(1,9)
preSuppose = 'independent criminal involved in '
if rand == 1:
second = 'prostitution'
elif rand == 2:
second = 'being a hired thug'
elif rand == 3:
second = 'burglary'
elif rand == 4:
second = 'smuggling'
elif rand == 5:
second = 'violating curfew'
elif rand == 6:
second = 'stealing livestock'
elif rand == 7:
second = 'selling drugs'
elif rand == 8:
second = 'robbing money lenders and stores'
elif rand == 9:
second = 'kidnapping'
return preSuppose + second
elif rand == 5:
return 'piracy' #534d
elif rand == 6:
return 'banditry'
def underworldTable534c():
rand = randint(1,20)
if rand == 1:
return 'join a gang'
elif rand == 2:
return 'jailed in a sweep of the streets by law enforcement'
elif rand == 3:
return 'seriously wounded in a fight' #870
elif rand == 4:
return 'character is a common criminal suspect regarding any crimes that happen in their town'
elif rand == 5:
rand = randint(1,6)
return 'character becomes an informant for the law' + (("", ", labeled as a snitch, with a contract on their life")[rand == 6])
elif rand == 6:
return 'character participates in a jewel heist, only for their ' + str(randint(1,4)) + 'partners to disappear with the loot'
elif rand == 7:
return 'a key gang boss is slain and the character is blamed, members of the gang seek the death of the character'
elif rand == 8:
return 'character is imprisoned for a crime' #875
elif rand == 9:
return 'character becomes a proficient thief'
elif rand == 10:
return 'character goes straight, ending their life of crime. Still often recognized by criminals who remember the old days, though'
elif rand == 11:
return 'character develops extensive contacts in the underworld'
elif rand == 12:
return 'character learns the surrounding sewers like the back of their hand'
elif rand == 13:
return "character learns secret passages to a noble's estate"
elif rand == 14:
return 'character discovers that several items taken in a recent heist are cursed.' #863 and #868
elif rand == 15:
return "a crime lord becomes the character's patron, grooming them to a leader of organized crime"
elif rand == 16:
return "character's friends are killed off in horrible ways and law enforcement has no interest in stopping the killer. only the character and one friend survive."
elif rand == 17:
return "character discovers that a prominent government official is the head of a major crime ring"
elif rand == 18:
return "character's thieving skills improve considerably"
elif rand == 19:
return "character steals and hides a valuable gem, only to later find out it was stoled by one of the character's criminal 'friends'"
elif rand == 20:
return 'character becomes the leader of a gang'
def underworldTable534d():
rand = randint(1,10)
if rand == 1:
return 'pirate captain buries treasure on a deserted island'
elif rand == 2:
return 'pirate crew is captured and all but the character are hanged'
elif rand == 3:
return 'character becomes adept at sailing a big ship'
elif rand == 4:
return 'pirate crew mutinies and the character is voted captain by the mutineers. The old captain vows revenge.'
elif rand == 5:
return 'pirates discover a lost island with a mysterious temple. All members of the crew are cursed by the magic of the temple, ' #868
elif rand == 6:
return 'an old salt teaches the character how to become an expert in wielding a cutlass'
elif rand == 7:
return 'a raid on a large treasure ship gives the character a lot of gold'
elif rand == 8:
return 'pirate captain is a woman known for taking vengeance on male captives.'
elif rand == 9:
return 'due to wide travel on the pirate ship, character becomes moderately skilled at ' + str(randint(1,6)+1) + ' languages.'
elif rand == 10:
return "character becomes oen of the captain's officers"
def militaryTable535(charCulture, solMod):
service = militaryTable535a(charCulture)
event = militaryTable535b(0, charCulture)
rank = militaryRankTable538(solMod)
def militaryTable535a(charCulture):
rand = randint(1,20)
if charCulture == 'Primitive':
if rand <= 12:
return 'light infantry'
elif rand <= 14:
return 'medium infantry'
elif rand <= 16:
return 'archer'
elif rand <= 18:
return 'light cavalry'
elif rand <= 20:
return 'mercenary (' + militaryTable535a(charCulture) + ')'
elif charCulture == 'Civilized':
if rand <= 1:
return 'light infantry'
elif rand <= 6:
return 'medium infantry'
elif rand <= 8:
return 'heavy infantry'
elif rand <= 10:
return 'archer'
elif rand <= 11:
return 'chariots'
elif rand <= 13:
return 'light cavalry'
elif rand <= 14:
return 'heavy cavalry'
elif rand <= 16:
return 'mercenary (' + militaryTable535a(charCulture) + ')'
elif rand <= 18:
return 'navy'
elif rand <= 19:
return 'special forces' #537 baybay
elif rand <= 20:
return 'noncombat duty' #536
else: #nomad and barbarian use the same thing, soooooo
if rand <= 3:
return 'light infantry'
elif rand <= 7:
return 'medium infantry'
elif rand <= 8:
return 'archer'
elif rand <= 10:
return 'chariots'
elif rand <= 15:
return 'light cavalry'
elif rand <= 17:
return 'mercenary (' + militaryTable535a(charCulture) + ')'
elif rand <= 19:
return 'navy'
elif rand <= 20:
return 'noncombat duty' #536
def militaryTable535b(modifier, charCulture):
rand = randint(1,20) + modifier
if rand <= 6: #battle rolls are fucking big
preSuppose = 'Battle! '
return preSuppose + militaryTable535bA()
elif rand <= 8:
return 'Character enlists for another tour of duty.'
elif rand <= 9:
return "Character's prowess and intelligence earn them reassignemnt to a special forces unit: " #537
elif rand <= 10:
return 'Character is transferred to a noncombat unit: ' #536
elif rand <= 11:
return 'Character is made an officer.'
elif rand <= 12:
rand = randint(1,5)
if rand == 5:
return "Character's unit is involved in " + str(randint(1,10)) + " skirmishes. One particular battle: " + militaryTable535bA()
return "Character's unit is involved in " + str(randint(1,10)) + " skirmishes."
elif rand <= 13:
return "Character's unit is ambushed! " + militaryTable535bA(randint(1,4))
elif rand <= 14:
return "Character's unit is involved in a plot to overthrow the government. "
elif rand <= 15:
return "Character is reassigned to special forces: " #537
elif rand <= 16:
rand = randint(1,6)
return "A disease ravages the army." + (("", " The character becomes sensitive to cold and damp."), rand == 6)
elif rand <= 17:
return "Character re-enlists for another hitch. " + militaryTable535b(0, charCulture)
elif rand <= 18:
return "Character learns to be proficient with a new weapon."
elif rand <= 19:
return "Character's hitch is extended by " + str(rand(1,4)) + " years due to a major war breaking out. " + militaryTable535b(5, charculture)
elif rand <= 21:
return "A fierce war breaks out due to " + random_choice(militaryTable535bB) + ". Result of most important battle: " + militaryTable535bA
elif rand <= 23:
return "Character increases their aptitude of their occupation."
elif rand == 24:
return "Character is assigned to accompany a military unit in the field. " + militaryTable535b(0, charCulture)
elif rand == 25:
return "In the service of " #543
def militaryTable535bA(modifier = 0):
rand = randint(1,20) + modifier
if rand <= 1:
battleOccur = str(randint(1,100)) + "\% of the character's side was killed. They fought poorly and received an injury, " #870 also their military career could end
elif rand <= 2:
battleOccur = 'Serious casualties and the character was injured, being granted an impressive scar.'
elif rand <= 3:
battleOccur = 'The horror of battle causes the character to develop an exotic personality feature, ' #649
# elif rand <= 5: skipping for now due to re-rolling
# battleOccur = ''
elif rand <= 7:
battleOccur = 'Character sees action, but nothing noteworthy occurs.'
elif rand <= 8:
battleOccur = 'Character fought well, with many a foe dying by their hands.'
elif rand <= 9:
battleOccur = 'Character fought well and became known for their heroism. For this, they were promoted.'
elif rand <= 10:
battleOccur = 'Character is captured and enslaved: ' + enslavedTable539()
elif rand <= 11:
battleOccur = 'Character is decorated for heroism.'
elif rand <= 12:
battleOccur = 'Character was a coward in battle and, even though no one noticed, must live with their actions.'
elif rand <= 13:
battleOccur = "Character's best friend dies at their side."
elif rand <= 14:
battleOccur = 'Character is the only survivor of their unit.'
elif rand <= 15:
battleOccur = 'Character deserts during battle, revealing their cowardly side.'
elif rand <= 16:
battleOccur = 'Character is responsible for the deaths of ' + str(randint(1,10)) + ' of their comrades.'
elif rand <= 17:
battleOccur = 'Character slays the leader of the enemy.'
elif rand <= 18:
battleOccur = "Character's superior is slain and they assume command."
elif rand <= 19:
battleOccur = 'Regardless of battle performance, character is accused of dereliction of duty and is court-martialed.'
elif rand <= 20:
rand = randint(1,6)
battleOccur = 'An act of the character reverses the outcome of the battle.' + (("", " They are recognized for it."), rand == 6)
elif rand <= 21:
battleOccur = "Victor's side suffers light casualties. " + militaryTable535b(0, charCulture)
elif rand <= 22:
battleOccur = "Loser's side is utterly destroyed. " + militaryTable535b(0, charCulture)
return battleOccur
militaryTable535bB = {
'armies from a neighboring land': 3,
'armies of monsters': 1,
'a civil war': 2,
'a peasant rebellion': 1,
'a war of succession': 1,
'a holy war': 1,
'monsters from another plane': 1
}
def noncombatTable536(charCulture):
rand = randint(1,20)
if rand <= 3:
return "A regular occupation, " + occupationsTable420(charCulture)
elif rand <= 5:
return "Medical corps"
elif rand == 6:
return "Recruiter"
elif rand == 7:
return 'quartermaster corps'
elif rand == 8:
return 'Instructor'
elif rand == 9:
return 'Engineer'
elif rand == 10:
return 'Messenger'
elif rand == 11:
return 'Cook'
elif rand == 12:
return 'Embassy guard'
elif rand == 13:
return 'Mage guard'
elif rand == 14:
return 'Prison guard'
elif rand == 15:
return 'Payroll guard'
elif rand == 16:
return 'City guard'
elif rand == 17:
return 'Private body guard to leader'
elif rand == 18:
return 'Palace guard'
elif rand == 19:
return 'Temple guard'
elif rand == 20:
return 'border guard'
specialForcesTable537 = {
'ranger': 2,
'scout': 2,
'monster squad': 1,
'marine': 2,
'suicide squad': 1,
'war machine': 1,
'espionage': 1
}
def militaryRankTable538(solMod):
rand = randint(1,6) + solMod
if rand <= 10:
return "Soldier"
elif rand <= 12:
return 'Corporal'
elif rand <= 15:
return 'Sargeant'
elif rand <= 16:
return 'Second Lieutenant'
elif rand <= 18:
return 'First Lieutenant'
elif rand <= 20:
return 'Captain'
elif rand <= 24:
return 'Major'
elif rand <= 25:
return "Colonel"
def enslavedTable539():
rand = randint(1,20)
if rand <= 1: # Escape situation
rand = randint(1,8)
if rand == 1:
return 'character escaped slavery, a reward of ' + str(randint(1,10)*100) + ' gold is offered for their capture'
elif rand == 2:
return str(randint(1,6)) + ' slaves accompanied the character in their escape from slavery'
elif rand == 3:
return 'the government is offering a bounty on the escaped slave'
elif rand == 4:
return "the owner's " + random_choice(relativesTable753) + "helped the character escape from slavery"
elif rand == 5:
return 'while escaping from slavery, the character killed their owner'
elif rand == 6:
return 'while escaping slavery, the character stole ' + giftsTable863()
elif rand == 7:
return 'the character, owned by a slaverunner, was set free by the owner who is in love with them'
elif rand == 8:
return 're-roll on table shit, ugh.'
elif rand <= 2:
#owner decides to free character
rand = randint(1,10)
if rand == 1:
rand = randint(1,8)
if rand <= 4:
return 'character is freed from slavery by owner, who is a good friend'
elif rand <= 7:
return "character is freed from slavery by owner, who becomes the character's patron" # table 543 here
elif rand <= 8:
return "character is freed from slavery by owner, who becomes the character's companion" # table 761 here
elif rand <= 2:
return 'character is freed from slavery by owner due to religious conversion, character is paid ' + str(randint(2,20)) + ' gold coins in reparations'
elif rand <= 4:
return 'character is reunired with relatives after being freed from slavery'
elif rand <= 5:
return 'owner dies and their will specifies that slaves are to be freed'
elif rand <= 7:
return ' the slave is unable to be used for work and enlists in the military' #go to table 535
elif rand <= 8:
return 'the character escapes slavery with the help of another, who becomes their companion' #go to table 863
elif rand <= 9:
return "while in slavery, the character saves their owner's life. the owner gives the character their freedom and " + giftsTable863
elif rand <= 10:
return '3x re-roll, will set up later'
elif rand <= 3:
return 'the ruler of the land declares slavery illegal and the player is given ' + str(randint(1,100)) + ' gold as reparations'
elif rand <= 4:
return 'the character is freed of slavery by saving money and buying their own freedom'
elif rand <= 5:
return 'owner dies'
#there will be a bunch more rolls here
elif rand <= 7:
return 'character improves their occupational skill rating by one while in slavery'
elif rand <= 8:
return 'character improves their occupational skill rating by ' + str(randint(2,4)) + ' while in slavery'
elif rand <= 9:
return 'while in slavery, the character is often beaten by their owner'
elif rand <= 10:
return 'character learns an additional skill at rank 1 while enslaved'
elif rand <= 11:
return 'as a slave, the character is a sexual plaything of the owner and has no other duties.'
elif rand <= 12:
return 'character participates in a slave revolt'
#more rolls will go here
elif rand <= 13:
return 'while enslaved, a character is promoted to a position of authority'
elif rand <= 14:
return "while enslaved, the character is the owner's favorite and becomes the senior slave. one of the other slaves becomes the character's rival" #roll table 762 here
elif rand <= 15:
return 'character is used as breeding stock. if male, produces ' + str(randint(1,10)) + ' kids per year. if female, one per year'
elif rand <= 16:
return 'character is resold ' + str(randint(1,3)) + ' times during their enslavement'
elif rand <= 17:
return 'character is branded on their ' + bodyLocationTable867
elif rand <= 18:
return 'the character attempts to escape from slavery, fails, and is branded on their ' + bodyLocationTable867 + '. they are also beaten more often'
elif rand <= 20:
return 'an exotic event occurs that causes the character to be freed' #roll on table 544 here
def DarksideTraitsTable648():
rand=randint(2,40)
if rand == 2:
return "pessimist"
elif rand == 3:
return "egoist"
elif rand == 4:
return "obstructive"
elif rand == 5:
return "cruel"
elif rand == 6:
return "careless"
elif rand == 7:
return "thoughtless"
elif rand == 8:
return "flippant"
elif rand == 9:
return "drunkard"
elif rand == 10:
return "suspicious"
elif rand == 11:
return "violent"
elif rand == 12:
return "argumentative"
elif rand == 13:
return "irreverent"
elif rand == 14:
return "cheat"
elif rand == 15:
return "hateful"
elif rand == 16:
return "selfish"
elif rand == 17:
return "slovenly"
elif rand == 18:
return "filthy"
elif rand == 19:
return "tardy"
elif rand == 20:
return "self-doubting"
elif rand == 21:
return "cowardly"
elif rand == 22:
return "disrespectful"
elif rand == 23:
return "angry"
elif rand == 24:
return "impatient"
elif rand == 25:
return "foolish"
elif rand == 26:
return "greedy"
elif rand == 27:
return "dull"
elif rand == 28:
return "vengeful"
elif rand == 29:
return "immoral"
elif rand == 30:
return "untrustworthy"
elif rand == 31:
return "rude"
elif rand == 32:
return "harsh"
elif rand == 33:
return "unfriendly"
elif rand == 34:
return "egotistic"
elif rand == 35:
return "lazy"
elif rand == 36:
return "liar"
elif rand == 37:
return "morose"
elif rand == 38:
return "unenthuastic"
elif rand == 39:
return "spendthrift"
elif rand == 40:
return "tactless"
nonhumansTable751 = {
'elf': 4,
'dwarf': 3,
'halfling': 3,
'half elf': 4,
'beastman': 1,
'reptileman': 1,
'orc': 1,
'half orc': 2
}
relativesTable753 = {
'first cousin': 1,
'second cousin': 1,
'distant cousin': 1,
'son': 1,
'daughter': 1,
'sister': 1,
'brother': 1,
'spouse': 1,
'aunt': 1,
'uncle': 1,
'great aunt': 1,
'great uncle': 1,
'mother': 1,
'father': 1,
'grandmother': 1,
'grandfather': 1,
'great grandmother': 1,
'great grandfather': 1,
'descendent': 1,
'unknown relation': 1
}
def guardiansTable754(cuMod):
rand = randint(1,20)
rand = 10
if rand <= 5:
return random_choice(relativesTable753)
elif rand <= 8:
return 'raised in an orphanage'
elif rand <= 10:
return familyTable106a(cuMod)
elif rand <= 11:
return 'raised by priests or monks of ' + deitiesTable864(cuMod)
elif rand <= 12:
return 'raised by ' + random_choice(nonhumansTable751)
elif rand <= 13:
return 'sold into slavery ' + enslavedTable539()
elif rand <= 14:
return 'raised on the street by beggars and prostitutes'
elif rand <= 15:
return "raised by a thieves' guild" #table534 here
elif rand <= 16:
return 'raised by different relatives, passed between them until coming of age'
elif rand <= 17:
return 'raised by an adventurer: ' #table757
elif rand <= 18:
return 'character mysteriously disappeared for ' + str(randint(1,10)) + ' years'
elif rand <= 19:
return 'raised by beasts in the wild'
elif rand <= 20:
return 'raised by ' #table 756
criminalTable755 = {
'murderer': 1,
'kidnapper': 2,
'thief': 3,
'pickpocket': 4,
'extortionist': 5,
'con man': 6,
'armed robber': 7,
'highwayman': 8,
'gang bandit': 9,
'professional assassin': 10,
'drug dealer': 11,
'mugger': 12,
'horse thief': 13,
'rustler': 14,
'thug': 15,
'pimp': 16,
'prostitute': 17,
'gang leader': 18,
'rapist': 19,
'pirate': 20
}
nobleTable758prim = {
'high king': 1,
'chieftain': 29,
'subchieftain': 70
}
nobleTable758nomad = {
'kahn': 10,
'chieftain': 30,
'subchieftain': 40,
'hetman': 20
}
nobleTable758barb = {
'high king': 2,
'king': 13,
'prince(ss)': 10,
'chieftain': 20,
'jarl': 15,
'subchieftain': 10,
'baron': 5,
'prince(ss) (royal)': 5,
'hetman': 20
}
nobleTable758civil = {
'emperor': 1,
'king': 4,
'prince(ss) (royal)': 10,
'archduke': 5,
'duke': 5,
'marquis': 10,
'viscount': 15,
'count': 10,
'baron': 15,
'lord': 3,
'prince(ss)': 12,
'knight': 10
}
def nobleTable758tiMod(nobleTitle):
if nobleTitle == 'hetman':
return randint(1,6)
elif nobleTitle == 'knight':
return randint(2,12)
elif nobleTitle == 'prince(ss)':
return randint(4,40)
elif nobleTitle == 'lord':
return randint(2,16)
elif nobleTitle == 'baron':
return randint(2,20)
elif nobleTitle == 'count':
return randint(3,18)
elif nobleTitle == 'subchieftain':
return randint(2,12)
elif nobleTitle == 'jarl':
return randint(3,18)
elif nobleTitle == 'viscount':
return randint(3,24)
elif nobleTitle == 'chieftain':
return randint(3,18)
elif nobleTitle == 'marquis':
return randint(3,30)
elif nobleTitle == 'duke':
return randint(4,32)
elif nobleTitle == 'archduke':
return randint(4,40)
elif nobleTitle == 'prince(ss) (royal)':
return randint(4,40)
elif nobleTitle == 'kahn':
return randint(5,40)
elif nobleTitle == 'king':
return 39
elif nobleTitle == 'high king':
return randint(5,50)
elif nobleTitle == 'emperor':
return 60
else:
raise ValueError("\nnobleTable758tiMod isn't getting a nobleTitle pushed to it.")
def unusualPetsTable759():
rand = randint(1,20)
if rand <= 2:
petType = 'dog'
elif rand <= 4:
petType = 'cat'
elif rand <= 5:
petType = 'rabbit'
elif rand <= 6:
petType = 'lizard'
elif rand <= 7:
petType = 'monkey'
elif rand <= 8:
petType = 'raccoon'
elif rand <= 9:
petType = 'rat'
elif rand <= 10:
petType = 'snake'
elif rand <= 11:
petType = 'hawk'
elif rand <= 12:
petType = 'mouse'
elif rand <= 13:
petType = 'ferret'
elif rand <= 14:
petType = 'songbird'
elif rand <= 15:
rand = randint(1,3)
if rand == 3:
petType = 'fish (that can survive out of water)'
else:
petType = 'fish'
elif rand <= 16:
petType = 'puppy'
elif rand <= 17:
petType = 'mini-dragon'
elif rand <= 18:
petType = 'big cat'
elif rand <= 19:
petType = 'baby bear that stays a baby'
elif rand <= 20:
petType = 'something alien'
petAbility = specialPetAbilitiesTable760()
return petType + ' (' + petAbility + ')'
def specialPetAbilitiesTable760():
rand = randint(1,20)
if rand <= 1:
return 'has wings'
elif rand <= 2:
return 'very intelligent'
elif rand <= 3:
return 'telepathic'
elif rand <= 4:
return 'unusual color: ' + colorsTable865
elif rand <= 5:
rand = randint(1,10)
return 'pet is made of odd substance'
elif rand <= 6:
return 'pet has physical affliction ' #table874 here
elif rand <= 7:
return 'pet can use magic spells'
elif rand <= 8:
return 'pet is invisible to all but owner'
elif rand <= 9:
return 'pet regenerates damage done to it'
elif rand <= 10:
return 'when killed, pet will possess nearest animal'
elif rand <= 11:
rand = randint(1,2)
if rand == 1:
return 'pet is unusually large'
else:
return 'pet is unusually small'
elif rand <= 12:
return 'once per day, pet may assume attractive human form for ' + str(randint(1,6)) + ' hours'
elif rand <= 13:
return 'draws magical power from its master'
elif rand <= 14:
return 'supplies magical power to master'
elif rand <= 15:
return "pet's life is added to character's own as long as the pet lives"
elif rand <= 16:
return 'breathes fire'
elif rand <= 17:
return 'can increase its size and strength ' + str(randint(1,10)) + ' times their normal amount once per day for ' + str(randint(1,6)) + ' hours'
elif rand <= 18:
return 'can provide master with ' + str(randint(1,6)) + ' gold per day'
elif rand <= 19:
return 'can turn into mist at will'
elif rand <= 20:
return 'reroll shit'
def companionTable761():
companionWho = companionTable761a()
companionWhy = companionTable761b()
companionPersonality = companionTable761c()
return companionWho + ', because ' + companionWhy + ', personality: ' + companionPersonality
def companionTable761a():
rand = randint(1,9) #easily able to add 10 to this for the re-roll once everything is set up
if rand == 1:
return 'childhood friend'
elif rand == 2:
return 'a family member, ' + random_choice(relativesTable753)
elif rand == 3:
return 'a nonhuman, ' + random_choice(nonhumansTable751)
elif rand == 4:
return 'a stranger, ' #table 750 go hurr
elif rand == 5:
return 'an intelligent, articulate inanimate object'
elif rand == 6:
return 'a kid aged ' + str(randint(7,13))
elif rand == 7:
rand = randint(1,2)
if rand == 1:
return 'an older sibling'
else:
return 'a younger sibling'
elif rand == 8:
return 'an adventurer, ' #table 757
elif rand == 9:
return 'a former enemy or rival, ' #table 762 go hurr
#elif rand == 10:
def companionTable761b():
rand = randint(1,9)
if rand == 1:
return 'character saved their life'
elif rand == 2:
return 'they seek a similar goal, are friendly rivals'
elif rand == 3:
return 'parents were companions in adventure'
elif rand == 4:
return 'they share the same enemy'
elif rand == 5:
return 'they were in the same place and in trouble at the same time'
elif rand == 6:
return 'the companion imagines the character a hero and wants to learn from them'
elif rand == 7:
return "the companion's original intent was to steal from the character"
elif rand == 8:
return 'companion feels a need to protect the character'
elif rand == 9:
return 'mysterious voices and feelings told the companion to seek out the character and join them'
def companionTable761c():
rand = randint(1,10)
if rand <= 3:
return 'loyal friend'
elif rand <= 5:
return 'bumbling buffoon'
elif rand <= 6:
return 'grim, quiet ally'
elif rand <= 7:
return 'enthusiastic leader-type'
elif rand <= 8:
return 'a wise-cracking smart mouth who complains'
elif rand <= 9:
return 'rowdy fighter'
elif rand <= 10:
return 'incurable romantic'
def giftsTable863():
rand = randint(1,20)
if rand <= 1:
return random_choice(giftsTable863a)
elif rand <= 2:
return 'guardianship of a young ward. Use table 761'
elif rand <= 3:
return 'unusual pet. Use table 760'
elif rand <= 4:
return random_choice(giftsTable863b)
elif rand <= 5:
return 'a tapestry'
elif rand <= 6:
return 'an anachronistic device'
elif rand <= 7:
return 'a key'
elif rand <= 8:
return 'a locked or sealed book'
elif rand <= 9:
return 'a shield'
elif rand <= 10:
return 'a sealed bottle'
elif rand <= 11:
return 'a tarnished old helmet'
elif rand <= 12:
return 'a bound wooden staff'
elif rand <= 13:
return 'a riding animal'
elif rand <= 14:
return 'a deed to ' + random_choice(giftsTable863c)
elif rand <= 15:
return 'a musical instrument'
elif rand <= 16:
return random_choice(giftsTable863d)
elif rand <= 17:
return 'a pouch of papers containing ' + random_choice(giftsTable863e)
elif rand <= 18:
return 'a sealed trunk'
elif rand <= 19:
return 'a chainmail hauberk'
elif rand <= 20:
return 'roll again shenanigans'
giftsTable863a = {
'an ornate dagger': 1,
'an ornate sword': 1,
'a plain sword': 1,
'a mace': 1,
'an ornate spear': 1,
'a well-made bow': 1,
'an ornate battle axe': 1,
'an exotic weapon': 1,
}
giftsTable863b = {
'amulet': 1,
'necklace': 1,
'earrings': 1,
'tiara': 1,
'torc': 1,
'arm band': 1,
'ring': 1,
'pin or brooch': 1,
}
giftsTable863c = {
'a tract of land': 1,
'an ancient castle': 1,
'a country manor': 1,
'an elegant town house': 1,
'a temple': 1,
'a factory': 1,
'ancient ruins': 1,
'an inn': 1,
'an apartment building': 1,
}
giftsTable863d = {
'a hat': 1,
'a pair of shoes': 1,
'a belt': 1,
'a cape': 1,
'a tunic': 1,
'trousers': 1,
'a pair of stockings': 1
}
giftsTable863e = {
"an ancient ancestor's letter to his descendents": 1,
'a map': 1,
'an undelivered letter': 1,
'diagrams and plans for a mysterious invention': 1,
'a scroll of magic spells': 1,
'a wild story of adventure': 1,
'a last will and testament determining that the character is an heir': 1,
'a treasure map': 1,
"the character's true family history": 1
}
def deitiesTable864(cuMod=0):
rand = randint(1,20) + cuMod
if rand <= 1:
return 'ancestor worship'
elif rand <= 2:
return 'beast gods'
elif rand <= 3:
return 'hunting god'
elif rand <= 4:
return 'trickster'
elif rand <= 5:
return 'earth goddess'
elif rand <= 6:
return 'agricultural goddess'
elif rand <= 8:
return 'agricultural goddess'
elif rand <= 10:
return 'ruling deity'
elif rand <= 11:
return 'sea god'
elif rand <= 12:
return 'sun god'
elif rand <= 13:
return 'moon goddess'
elif rand <= 14:
return 'storm god'
elif rand <= 15:
return 'evil god'
elif rand <= 16:
return 'war god'
elif rand <= 17:
return 'love goddess'
elif rand <= 18:
return 'underworld god'
elif rand <= 19:
return 'god of wisdom'
elif rand <= 20:
return 'healing god'
elif rand <= 21:
return 'trade god'
elif rand <= 22:
return 'luck goddess'
elif rand <= 23:
return 'night goddess'
elif rand <= 24:
return 'god of thieves'
elif rand <= 27:
return 'decadent god'
else:
raise ValueError("I done fucked up on the deitiesTable864")
colorsTable865 = {
'red': 1,
'red orange': 1,
'orange': 1,
'yellow orange': 1,
'yellow': 1,
'yellow-green': 1,
'green': 1,
'blue-green': 1,
'blue': 1,
'blue-violet': 1,
'violet': 1,
'red violet': 1,
'pink': 1,
'white': 1,
'black': 1,
'gray': 1,
'maroon': 1,
'silver': 1,
'gold': 1,
'reroll': 1
}
birthmarksTable866 = {
'dragon': 1,
'skull': 1,
'bat': 1,
'sword': 1,
'hand': 1,
'crescent moon': 1,
'claw': 1,
'eagle': 1,
'fish': 1,
'a random animal': 1
}
bodyLocationTable867 = {
'right foot': 1,
'left foot': 1,
'right leg': 1,
'left leg': 1,
'abdomen': 2,
'buttocks': 2,
'back': 1,
'chest': 4,
'right arm': 1,
'left arm': 1,
'right hand': 1,
'left hand': 1,
'head': 1,
'face': 2
}
'''
This is an easy copy/paste for creating dicts:
Table = {
'': ,
'': ,
'': ,
'': ,
'': ,
'': ,
'': ,
'':
}
'''
|
venn177/heroesoflegend.py
|
heroesoflegend/rolltables.py
|
Python
|
mit
| 96,123
|
import numpy as np
import pytest
from lsh.cache import Cache
from lsh.minhash import MinHasher
@pytest.fixture
def default_hasher():
return MinHasher(seeds=100)
@pytest.fixture
def default_cache(default_hasher):
return Cache(default_hasher)
def is_nondecreasing(L):
# http://stackoverflow.com/a/4983359/419338
return all(x <= y for x, y in zip(L, L[1:]))
def test_hasher_json_serialisation(default_hasher, tmpdir):
path = str(tmpdir.join("hasher.json"))
default_hasher.to_json(path)
loaded_hasher = MinHasher.from_json_file(path)
doc = 'Once upon a time in a galaxy far far away and what not'
np.testing.assert_array_equal(default_hasher.fingerprint(doc),
loaded_hasher.fingerprint(doc))
def test_cache_json_serialisation(tmpdir, default_cache):
path = str(tmpdir.join("cache.json"))
# easy case- the bins array is empty
default_cache.to_json(path)
loaded_cache = Cache.from_json(path)
# now add some data
doc = "This is a document"
default_cache.add_doc(doc, 0)
loaded_cache.add_doc(doc, 0)
assert (default_cache.get_duplicates_of(doc) ==
loaded_cache.get_duplicates_of(doc))
assert (default_cache.get_duplicates_of(doc_id=0) ==
loaded_cache.get_duplicates_of(doc_id=0))
default_cache.to_json(path)
loaded_cache = Cache.from_json(path)
default_cache.add_doc("The king of Denmark", 1)
loaded_cache.add_doc("The king of Denmark", 1)
default_cache.add_doc("The queen of Zerg", 2)
loaded_cache.add_doc("The queen of Zerg", 2)
default_cache.to_json(path)
loaded_cache = Cache.from_json(path)
assert (default_cache.get_duplicates_of(doc) ==
loaded_cache.get_duplicates_of(doc))
assert (default_cache.get_duplicates_of(doc_id=0) ==
loaded_cache.get_duplicates_of(doc_id=0))
assert (default_cache.get_duplicates_of(doc) ==
loaded_cache.get_duplicates_of(doc))
for id in [0, 1, 2]:
assert (default_cache.get_duplicates_of(doc_id=id) ==
loaded_cache.get_duplicates_of(doc_id=id))
@pytest.mark.parametrize("char_ngram", [2, 3, 4, 5, 6])
@pytest.mark.parametrize("hashbytes", [4, 8])
@pytest.mark.parametrize("num_bands", [20, 40, 50])
@pytest.mark.parametrize("seed", range(3))
def test_cache(char_ngram, hashbytes, num_bands, seed):
hasher = MinHasher(seeds=200, char_ngram=char_ngram,
hashbytes=hashbytes, random_state=seed)
lsh = Cache(hasher, num_bands=num_bands)
# very small band width => always find duplicates
short_doc = 'This is a simple document'
another_doc = 'Some text about animals.'
long_doc = 'A much longer document that contains lots of information\
different words. The document produces many more shingles.'
assert not lsh.is_duplicate(short_doc)
lsh.add_doc(short_doc, 0)
assert lsh.get_duplicates_of(short_doc) == {0}
assert lsh.is_duplicate(short_doc, doc_id=0)
assert lsh.is_duplicate(short_doc)
assert not lsh.is_duplicate(long_doc)
lsh.add_doc(long_doc, 1)
lsh.add_doc(another_doc, 2)
assert lsh.is_duplicate(another_doc)
assert lsh.is_duplicate(long_doc, doc_id=1)
assert lsh.is_duplicate(long_doc)
words = long_doc.split()
long_doc_missing_word = ' '.join([words[0]] + words[2:])
assert lsh.get_duplicates_of(long_doc_missing_word) == {1}
assert lsh.is_duplicate(long_doc_missing_word)
assert lsh.is_duplicate(long_doc + ' Word.')
assert lsh.get_all_duplicates() == set()
lsh.add_doc(long_doc_missing_word, 3)
assert lsh.get_all_duplicates() == {(1, 3)}
lsh.add_doc(long_doc_missing_word, 4)
assert lsh.get_all_duplicates() == {(1, 3), (1, 4), (3, 4)}
mc_long_doc = "Jang MC Min Chul is a Protoss player from South Korea, who " \
"last played for Trig Esports before retiring. On May 23rd, " \
"2016, MC announced his return to pro-gaming by joining CJ " \
"Entus. He is currently "
mc_med_doc = "Jang MC Min Chul is a Protoss player from South Korea, who " \
"last played for Trig Esports before retiring. He is currently "
mc_short_doc = "Jang MC Min Chul is currently "
@pytest.mark.parametrize("doc", [mc_long_doc, mc_med_doc, mc_short_doc])
def test_num_bands(doc):
"""
add near-duplicate documents to three caches with different settings
check that hashers with low band_width finds more matches (over 50 runs)
"""
suffixes = ['teamless', 'retired', 'awesome', 'overweight']
duplicates = []
divisors_of_200 = [4, 10, 20, 25, 40, 50, 100]
for seed in range(10):
hasher = MinHasher(seeds=200, char_ngram=5, random_state=seed)
caches = [Cache(hasher, num_bands=n) for n in divisors_of_200]
for c in caches:
c.add_doc(doc + suffixes[0], 0)
for s in suffixes[1:]:
duplicates.append([c.is_duplicate(doc + s) for c in caches])
sums = np.array(duplicates).sum(axis=0)
print(sums)
assert is_nondecreasing(sums)
@pytest.mark.parametrize("doc", [mc_long_doc, mc_med_doc, mc_short_doc])
def test_real_world_usage(default_cache, doc):
default_cache.add_doc(doc, 0)
default_cache.add_doc(doc, 1)
assert default_cache.is_duplicate(doc)
assert default_cache.is_duplicate(doc, 0)
assert default_cache.is_duplicate(doc, 1)
assert default_cache.is_duplicate(doc, 2)
def test_filtering_by_jaccard(default_cache):
data = {0: mc_long_doc, 1: mc_med_doc,
2: mc_med_doc, 3: mc_short_doc}
for id, doc in data.items():
default_cache.add_doc(doc, id)
for mj in np.arange(0.1, 0.91, step=0.1):
dupes = default_cache.get_all_duplicates(min_jaccard=mj)
assert dupes == {(1, 2)}
dupes = default_cache.get_duplicates_of(doc=mc_med_doc,
min_jaccard=0.9)
assert dupes == {1, 2}
dupes = default_cache.get_duplicates_of(doc_id=1,
min_jaccard=0.9)
assert dupes == {1, 2}
dupes = default_cache.get_duplicates_of('Nothing to see',
min_jaccard=0.1)
assert dupes == set()
def test_jaccard(default_hasher):
assert default_hasher.jaccard("This is a doc", "This is a doc") == 1
high_j = default_hasher.jaccard("This is a doc", "That is a doc")
low_j = default_hasher.jaccard("This is a doc", "Cats in a tree")
assert 0 <= low_j < high_j <= 1
@pytest.mark.parametrize("num_bands", [3, 6, 7, 9, 71, 99, 101])
def test_invalid_settings(num_bands, default_hasher, default_cache):
with pytest.raises(AssertionError):
lsh = Cache(default_hasher, num_bands=num_bands)
lsh.add_doc('Hi', 1)
lsh.get_duplicates_of('Hello')
default_cache.add_doc('Hi', 0)
with pytest.raises(ValueError):
default_cache.get_duplicates_of(doc_id=123)
def test_clear(default_cache):
default_cache.add_doc(mc_long_doc, 0)
assert default_cache.is_duplicate(mc_long_doc)
f = default_cache.hasher.fingerprint(mc_long_doc)
default_cache.clear()
f1 = default_cache.hasher.fingerprint(mc_long_doc)
assert not default_cache.is_duplicate(mc_long_doc)
np.testing.assert_array_equal(f, f1)
def test_remove_by_id(default_cache):
default_cache.add_doc(mc_long_doc, 0)
default_cache.add_doc(mc_med_doc, 1)
default_cache.add_doc(mc_short_doc, 2)
default_cache.add_doc(mc_short_doc, 3)
# initially everything is a duplicate
assert default_cache.is_duplicate(mc_long_doc)
assert default_cache.is_duplicate(mc_med_doc)
assert default_cache.is_duplicate(mc_short_doc)
# doc removed, it must no longer be a dupe, but all others still are
default_cache.remove_id(0)
assert not default_cache.is_duplicate(mc_long_doc)
assert default_cache.is_duplicate(mc_med_doc)
assert default_cache.is_duplicate(mc_short_doc)
# another doc removed. non-removed docs are still duplicates
default_cache.remove_id(1)
assert not default_cache.is_duplicate(mc_long_doc)
assert not default_cache.is_duplicate(mc_med_doc)
assert default_cache.is_duplicate(mc_short_doc)
default_cache.remove_id(2)
assert not default_cache.is_duplicate(mc_long_doc)
assert not default_cache.is_duplicate(mc_med_doc)
assert default_cache.is_duplicate(mc_short_doc)
default_cache.remove_id(3)
assert not default_cache.is_duplicate(mc_short_doc)
with pytest.raises(KeyError):
default_cache.remove_id(123) # unknown id
def test_remove_by_text(default_cache):
default_cache.add_doc(mc_long_doc, 0)
default_cache.add_doc(mc_short_doc, 1)
default_cache.add_doc(mc_short_doc, 2)
assert default_cache.is_duplicate(mc_long_doc)
assert default_cache.is_duplicate(mc_short_doc)
# both occurences of the removed doc should go away
default_cache.remove_doc(mc_short_doc)
assert default_cache.is_duplicate(mc_long_doc)
assert not default_cache.is_duplicate(mc_short_doc)
|
mbatchkarov/LSH
|
lsh/test/test_cache.py
|
Python
|
mit
| 9,113
|
from setuptools import setup
setup(name='scram_plot',
version='0.68.0',
description='scram_plot',
author='Stephen Fletcher',
author_email='s.fletcher@uq.edu.au',
license='MIT',
packages=['scram_plot'],
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
# Indicate who your project is intended for
'Topic :: Scientific/Engineering :: Bio-Informatics',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3.5'],
install_requires=['numpy','matplotlib','bokeh'],
scripts=['scram_plot/scram_plot.py',
'scram_plot/profile_plot.py',
'scram_plot/compare_plot.py'
],
zip_safe=False)
|
sfletc/scram2_plot
|
setup.py
|
Python
|
mit
| 1,074
|
# -*- coding: utf-8 -*-
import json
import pytest
import pyroonga
from pyroonga.odm import query, table
from pyroonga.tests import utils as test_utils
class TestGroongaRecord(object):
@pytest.fixture
def Table1(self, Table):
class Tb(Table):
name = table.Column()
grn = pyroonga.Groonga()
Table.bind(grn)
test_utils.sendquery('table_create --name %s --flags TABLE_HASH_KEY'
' --key_type ShortText' % Tb.__tablename__)
test_utils.sendquery('column_create --table %s --name name --flags'
' COLUMN_SCALAR --type ShortText' %
Tb.__tablename__)
test_utils.insert(Tb.__tablename__, [
{'_key': 'key1', 'name': 'foo'},
{'_key': 'key2', 'name': 'bar'},
{'_key': 'key3', 'name': 'baz'},
])
return Tb
def test_commit_with_not_changed(self, Table1):
record1, record2, record3 = Table1.select().all()
assert record1.name == 'foo'
assert record2.name == 'bar'
assert record3.name == 'baz'
record1.commit()
assert record1.name == 'foo'
assert record2.name == 'bar'
assert record3.name == 'baz'
record2.commit()
assert record1.name == 'foo'
assert record2.name == 'bar'
assert record3.name == 'baz'
record3.commit()
assert record1.name == 'foo'
assert record2.name == 'bar'
assert record3.name == 'baz'
records = json.loads(test_utils.sendquery('select --table %s' %
Table1.__tablename__))
assert records[1][0][2:] == [[1, 'key1', 'foo'], [2, 'key2', 'bar'],
[3, 'key3', 'baz']]
def test_commit(self, Table1):
record1, record2, record3 = Table1.select().all()
expected1, expected2, expected3 = [test_utils.random_string() for _ in
range(3)]
assert record1.name == 'foo'
assert record2.name == 'bar'
assert record3.name == 'baz'
record1.name = expected1
record1.commit()
assert record1.name == expected1
assert record2.name == 'bar'
assert record3.name == 'baz'
records = json.loads(test_utils.sendquery('select --table %s' %
Table1.__tablename__))
assert records[1][0][2:] == [[1, 'key1', expected1],
[2, 'key2', 'bar'],
[3, 'key3', 'baz']]
record2.name = expected2
record2.commit()
assert record1.name == expected1
assert record2.name == expected2
assert record3.name == 'baz'
records = json.loads(test_utils.sendquery('select --table %s' %
Table1.__tablename__))
assert records[1][0][2:] == [[1, 'key1', expected1],
[2, 'key2', expected2],
[3, 'key3', 'baz']]
record3.name = expected3
record3.commit()
assert record1.name == expected1
assert record2.name == expected2
assert record3.name == expected3
records = json.loads(test_utils.sendquery('select --table %s' %
Table1.__tablename__))
assert records[1][0][2:] == [[1, 'key1', expected1],
[2, 'key2', expected2],
[3, 'key3', expected3]]
|
naoina/pyroonga
|
pyroonga/tests/functional/odm/test_query.py
|
Python
|
mit
| 3,656
|
import random
import math
import check_eng
def decrypt(msg, key):
no_of_col = math.ceil(len(msg)/key)
no_of_row = key
unused = (no_of_row * no_of_col) - len(msg)
dec = [''] * no_of_col
col = 0
row = 0
for symbol in msg:
dec[col] = dec[col] + symbol
col = col + 1
if(col == no_of_col or (col == no_of_col - 1 and row >= no_of_row - unused)):
row = row +1
col = 0
return ''.join(dec)
file_name = input("Enter the name of the file to be decrypted :")
f = open(file_name, 'r')
msg = f.read()
f.close()
#decrypting into file
for key in range(1,len(msg)):
print ('Try '+str(key)+'.....')
dec = decrypt(msg,key)
if(check_eng.check(dec)):
print ('Seems to be a hack for key....' + str(key))
o_file_name = input("Enter the name for the decrypted file :")
f = open(o_file_name,'w')
f.write(dec)
f.close()
|
AlMikFox3/Ciphers
|
TranspositionCipher/hack_transposition.py
|
Python
|
mit
| 880
|
#!/usr/bin/env python
import pika
connection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))
channel = connection.channel()
# if we want to connect to a broker on a different machine we'd simply specify its name or IP address here
channel.queue_declare(queue='hello')
channel.basic_publish(exchange='',
routing_key='hello',
body='Hello World!')
print " [x] Sent 'Hello World!'"
connection.close()
|
calebgregory/scraps
|
rabbitmq/helloworld/send.py
|
Python
|
mit
| 434
|
"""
The topography module provides interfaces to global elevation models.
So far, only an interface to the `SRTM30
<https://dds.cr.usgs.gov/srtm/version2_1/SRTM30/srtm30_documentation.pdf>`_ data
set is provided, which has a resolution of 1 km.
Elevation data is downloaded on the fly but is cached to speed up subsequent
access. The interfaces uses the path pointed to by the :code:`TYPHON_DATA_PATH`
environment variable as data cache. This means that data is downloaded only
when they are not found in the cache.
.. note:: If :code:`TYPHON_DATA_PATH` is not set, the location of the file cache
will be determined from the :code:`XDG_CACHE_HOME` environment variable and,
if this is not defined, default to :`${HOME}/.typhon/topography`.
The module can be used in two ways:
1. by extracting the elevation data at native resolution
2. by interpolating to elevation data at arbitrary locations
The two different use cases are described below.
Native resolution
-----------------
Extracting elevation data at native resolution for a given rectangular domain
is done using the :code:`SRTM30.elevation` function. The function returns a
tuple consisting of the latitude and longitude grids as well as the elevation
data in meters.
.. code-block:: python
lat_min = 50
lon_min = 10
lat_max = 60
lon_max = 20
lats, lons, z = SRTM30.elevation(lat_min, lon_min, lat_max, lon_max)
Interpolation to given coordinates
----------------------------------
Interpolation of the elevation data to arbitrary coordinates can be performed
using the :code:`interpolate` method. Interpolation uses nearest neighbor
interpolation and is implemented using a :code:`KDTree`.
Interpolating the SRTM30 data to given latitude and longitude grids can be done
as follows:
.. code-block:: python
lat_min = 50
lon_min = 10
lat_max = 60
lon_max = 20
lats = np.linspace(lat_min, lat_max, 101)
lons = np.linspace(lon_min, lon_max, 101)
z = SRTM30.interpolate(lat, lont)
"""
import os
import shutil
import urllib
import zipfile
import numpy as np
import typhon
from typhon.environment import environ
_data_path = None
def _get_data_path():
global _data_path
if _data_path is None:
if "TYPHON_DATA_PATH" in environ:
_data_path = os.path.join(environ["TYPHON_DATA_PATH"], "topography")
elif "XDG_CACHE_HOME" in environ:
_data_path = environ["XDG_CACHE_HOME"]
else:
home = os.path.expandvars("~")
_data_path = os.path.join(home, ".cache", "typhon", "topography")
if not os.path.exists(_data_path):
os.makedirs(_data_path)
return _data_path
def _latlon_to_cart(lat, lon, R = typhon.constants.earth_radius):
"""
Simple conversion of latitude and longitude to Cartesian coordinates.
Approximates the Earth as sphere with radius :code:`R` and computes
cartesian x, y, z coordinates with the center of the Earth as origin.
Args:
lat: Array of latitude coordinates.
lon: Array of longitude coordinates.
R: The radius to assume.
Returns:
Tuple :code:`(x, y, z)` of arrays :code:`x, y, z` containing the
resulting x-, y- and z-coordinates.
"""
lat = np.radians(lat)
lon = np.radians(lon)
x = R * np.cos(lat) * np.cos(lon)
y = R * np.cos(lat) * np.sin(lon)
z = R * np.sin(lat)
return x, y, z
def _do_overlap(rect_1,
rect_2):
"""
Determines whether the two rectangles have overlap.
Args:
rect_1: Tuple :code:`(lat_min, lon_min, lat_max, lon_max) describing
a rectangular tile.
rect_2: Tuple :code:`(lat_min, lon_min, lat_max, lon_max) describing
a rectangular tile.
Returns:
True if the two rectangles overlap.
"""
lat_min_1, lon_min_1, lat_max_1, lon_max_1 = rect_1
lat_min_2, lon_min_2, lat_max_2, lon_max_2 = rect_2
lat_min = max(lat_min_1, lat_min_2)
lon_min = max(lon_min_1, lon_min_2)
lat_max = min(lat_max_1, lat_max_2)
lon_max = min(lon_max_1, lon_max_2)
return (lat_min < lat_max) and (lon_min < lon_max)
class SRTM30:
"""
Interface to version 2.1 of SRTM30 digital elevation model.
The data set has a resolution of about 1 km and covers all land masses
except Antarctica.
"""
_tile_height = 6000
_tile_width = 4800
_dlat = 50.0 / _tile_height
_dlon = 40.0 / _tile_width
_tiles = [("w180n90", 40, -180, 90, -140),
("w140n90", 40, -140, 90, -100),
("w100n90", 40, -100, 90, -60),
("w060n90", 40, -60, 90, -20),
("w020n90", 40, -20, 90, 20),
("e020n90", 40, 20, 90, 60),
("e060n90", 40, 60, 90, 100),
("e100n90", 40, 100, 90, 140),
("e140n90", 40, 140, 90, 180),
("w180n40", -10, -180, 40, -140),
("w140n40", -10, -140, 40, -100),
("w100n40", -10, -100, 40, -60),
("w060n40", -10, -60, 40, -20),
("w020n40", -10, -20, 40, 20),
("e020n40", -10, 20, 40, 60),
("e060n40", -10, 60, 40, 100),
("e100n40", -10, 100, 40, 140),
("e140n40", -10, 140, 40, 180),
("w180s10", -60, -180, -10, -140),
("w140s10", -60, -140, -10, -100),
("w100s10", -60, -100, -10, -60),
("w060s10", -60, -60, -10, -20),
("w020s10", -60, -20, -10, 20),
("e020s10", -60, 20, -10, 60),
("e060s10", -60, 60, -10, 100),
("e100s10", -60, 100, -10, 140),
("e140s10", -60, 140, -10, 180)]
@staticmethod
def get_tiles(lat_min, lon_min, lat_max, lon_max):
"""
Get names of the tiles that contain the data of the given rectangular
region of interest (ROI).
Args:
lat_min: The latitude of the lower left corner of the ROI
lon_min: The longitude of the lower left corner of the ROI
lat_max: The latitude of the upper right corner of the ROI
lon_max: The longitude of the upper right corner of the ROI
Return:
List of tile names that contain the elevation data for the ROI.
"""
lon_min = lon_min % 360
if lon_min > 180:
lon_min -= 360
lon_max = lon_max % 360
if lon_max > 180:
lon_max -= 360
fits = []
for t in SRTM30._tiles:
name, lat_min_1, lon_min_1, lat_max_1, lon_max_1 = t
if _do_overlap((lat_min, lon_min, lat_max, lon_max),
(lat_min_1, lon_min_1, lat_max_1, lon_max_1)):
fits += [name]
return fits
@staticmethod
def get_bounds(name):
"""
Get the bounds of tile with a given name.
Args:
name(str): The name of the tile.
Returns:
Tuple :code:(`lat_min`, `lon_min`, `lat_max`, `lon_max`) describing
the bounding box of the tile with the given name.
"""
tile = [t for t in SRTM30._tiles if t[0] == name][0]
_, lat_min, lon_min, lat_max, lon_max = tile
return lat_min, lon_min, lat_max, lon_max
@staticmethod
def get_grids(name):
"""
Returns the latitude-longitude grid of the tile with the given name.
Args:
name(str): The name of the tile.
Returns:
Tuple :code:(`lat_grid`, `lon_grid`) containing the one dimensional
latitude and longitude grids corresponding to the given tile.
"""
lat_min, lon_min, lat_max, lon_max = SRTM30.get_bounds(name)
start = lat_min + 0.5 * SRTM30._dlat
stop = lat_max - 0.5 * SRTM30._dlat
lat_grid = np.linspace(start, stop, SRTM30._tile_height)[::-1]
start = lon_min + 0.5 * SRTM30._dlon
stop = lon_max - 0.5 * SRTM30._dlon
lon_grid = np.linspace(start, stop, SRTM30._tile_width)
return lat_grid, lon_grid
@staticmethod
def get_native_grids(lat_min, lon_min, lat_max, lon_max):
"""
Returns the latitude and longitude grid at native SRTM30 resolution
that are included in the given rectangle.
Args:
lat_min: The latitude coordinate of the lower left corner.
lon_min: The longitude coordinate of the lower left corner.
lat_max: The latitude coordinate of the upper right corner.
lon_max: The latitude coordinate of the upper right corner.
Returns:
Tuple :code:`(lats, lons)` of 1D-arrays containing the latitude
and longitude coordinates of the SRTM30 data points within the
given rectangle.
"""
i = (90 - lat_max) / SRTM30._dlat
i_max = np.trunc(i)
if not i_max < i:
i_max = i_max + 1
i = (90 - lat_min) / SRTM30._dlat
i_min = np.trunc(i)
lat_grid = 90 + 0.5 * SRTM30._dlat - np.arange(i_max, i_min + 1) * SRTM30._dlat
j = (lon_max + 180) / SRTM30._dlon
j_max = np.trunc((lon_max + 180.0) / SRTM30._dlon)
if not j_max < j:
j_max = j_max - 1
j_min = np.trunc((lon_min + 180.0) / SRTM30._dlon)
lon_grid = -180 + 0.5 * SRTM30._dlon
lon_grid += np.arange(j_min, j_max + 1) * SRTM30._dlon
return lat_grid, lon_grid
@staticmethod
def download_tile(name):
"""
This function will download and extract the tile with the given name.
The data is stored in the path pointed to by the :code:`_data_path`
attribute of the module.
Args:
name(str): The name of the tile to download.
"""
base_url = "https://dds.cr.usgs.gov/srtm/version2_1/SRTM30"
url = base_url + "/" + name + "/" + name + ".dem.zip"
r = urllib.request.urlopen(url)
filename = os.path.join(_get_data_path(), name + ".dem.zip")
path = os.path.join(filename)
with open(path, 'wb') as f:
shutil.copyfileobj(r, f)
# Extract zip file.
with zipfile.ZipFile(filename, "r") as zip_ref:
zip_ref.extractall(os.path.dirname(filename))
@staticmethod
def get_tile(name):
"""
Get tile with the given name.
Check the cache for the tile with the given name. If not found, the
tile is download.
Args:
name(str): The name of the tile.
"""
dem_file = os.path.join(_get_data_path(), (name + ".dem").upper())
if not (os.path.exists(dem_file)):
SRTM30.download_tile(name)
y = np.fromfile(dem_file, dtype = np.dtype('>i2')).reshape(SRTM30._tile_height,
SRTM30._tile_width)
return y
@staticmethod
def get_tree(name):
"""
Get KD-tree for the tile with the given name.
Args:
name(str): The name of the tile.
"""
from pykdtree.kdtree import KDTree
lat_grid, lon_grid = SRTM30.get_grids(name)
lat_grid, lon_grid = np.meshgrid(lat_grid, lon_grid, indexing = "ij")
x, y, z = _latlon_to_cart(lat_grid, lon_grid)
X = np.concatenate([x.reshape(-1, 1, order = "C"),
y.reshape(-1, 1, order = "C"),
z.reshape(-1, 1, order = "C")], axis = 1)
tree = KDTree(X.astype(np.float32))
return tree
@staticmethod
def elevation(lat_min,
lon_min,
lat_max,
lon_max):
"""
Return elevation data at native resolution in the a given rectangular
domain.
Args:
lat_min(float): Latitude coordinate of the lower-left corner
lon_min(float): Longitude coordinate of the lower-left corner.
lat_max(float): Latitude coordinate of the upper-right corner
lon_max(float): Longitude coordinate of the upper-right corner.
"""
lats_d, lons_d = SRTM30.get_native_grids(lat_min,
lon_min,
lat_max,
lon_max)
lat_min = lats_d.min() - 0.5 * SRTM30._dlat
lat_max = lats_d.max() + 0.5 * SRTM30._dlat
lon_min = lons_d.min() - 0.5 * SRTM30._dlon
lon_max = lons_d.max() + 0.5 * SRTM30._dlon
elevation = np.zeros(lats_d.shape + lons_d.shape)
tiles = SRTM30.get_tiles(lat_min, lon_min, lat_max, lon_max)
for t in tiles:
dem = SRTM30.get_tile(t)
lats, lons = SRTM30.get_grids(t)
lat_min_s, lon_min_s, lat_max_s, lon_max_s = SRTM30.get_bounds(t)
inds_lat = np.logical_and(lat_min <= lats, lats < lat_max)
inds_lon = np.logical_and(lon_min <= lons, lons < lon_max)
inds_s = np.logical_and(inds_lat.reshape(-1, 1),
inds_lon.reshape(1, -1))
inds_lat = np.logical_and(lat_min_s <= lats_d, lats_d < lat_max_s)
inds_lon = np.logical_and(lon_min_s <= lons_d, lons_d < lon_max_s)
inds_d = np.logical_and(inds_lat.reshape(-1, 1),
inds_lon.reshape(1, -1))
elevation[inds_d] = dem[inds_s]
return lats_d, lons_d, elevation
@staticmethod
def interpolate(lats,
lons,
n_neighbors = 1):
"""
Interpolate elevation data to the given coordinates.
Uses KD-tree-based nearest-neighbor interpolation to interpolate
the elevation data to arbitrary grids.
Args:
lats: Array containing latitude coordinates.
lons: Array containing longitude coordinates.
n_neighbors: Number of neighbors over which to average the elevation
data.
"""
lat_min = lats.min()
lat_max = lats.max()
lon_min = lons.min()
lon_max = lons.max()
tiles = SRTM30.get_tiles(lat_min, lon_min, lat_max, lon_max)
elevation = np.zeros(lats.shape)
for t in tiles:
dem = SRTM30.get_tile(t).ravel()
tree = SRTM30.get_tree(t)
lat_min, lon_min, lat_max, lon_max = SRTM30.get_bounds(t)
inds_lat = np.logical_and(lat_min <= lats, lats < lat_max)
inds_lon = np.logical_and(lon_min <= lons, lons < lon_max)
inds = np.logical_and(inds_lat, inds_lon)
X = np.zeros((inds.sum(), 3))
x, y, z = _latlon_to_cart(lats[inds], lons[inds])
X[:, 0] = x
X[:, 1] = y
X[:, 2] = z
_, neighbors = tree.query(np.asarray(X, np.float32), n_neighbors)
if neighbors.size > 0:
if len(neighbors.shape) > 1:
elevation[inds] = dem[neighbors].mean(axis = (1))
else:
elevation[inds] = dem[neighbors]
return elevation
|
atmtools/typhon
|
typhon/topography.py
|
Python
|
mit
| 15,246
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'Event'
db.delete_table('narrative_event')
def backwards(self, orm):
# Adding model 'Event'
db.create_table('narrative_event', (
('origin', self.gf('django.db.models.fields.CharField')(max_length=64)),
('status', self.gf('django.db.models.fields.IntegerField')(default=0)),
('thread_id', self.gf('django.db.models.fields.CharField')(max_length=36, null=True, blank=True)),
('event_name', self.gf('django.db.models.fields.CharField')(max_length=64)),
('timestamp', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('event_details_json', self.gf('django.db.models.fields.TextField')(default=None, null=True, blank=True)),
('expiration_time', self.gf('django.db.models.fields.DateTimeField')(default=None, null=True, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('narrative', ['Event'])
models = {
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'narrative.assertionmeta': {
'Meta': {'object_name': 'AssertionMeta'},
'assertion_load_path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'check_interval_seconds': ('django.db.models.fields.IntegerField', [], {'default': '3600'}),
'display_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_check': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(1970, 1, 1, 0, 0)'})
},
'narrative.datum': {
'Meta': {'object_name': 'Datum'},
'datum_name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'datum_note_json': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'expiration_time': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'origin': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'thread_id': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'narrative.issue': {
'Meta': {'object_name': 'Issue'},
'created_timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'failed_assertion': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['narrative.AssertionMeta']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'resolved_timestamp': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'narrative.modelissue': {
'Meta': {'object_name': 'ModelIssue', '_ormbases': ['narrative.Issue']},
'issue_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['narrative.Issue']", 'unique': 'True', 'primary_key': 'True'}),
'model_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'model_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"})
},
'narrative.resolutionstep': {
'Meta': {'object_name': 'ResolutionStep'},
'action_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['narrative.Issue']"}),
'solution': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['narrative.Solution']", 'null': 'True', 'blank': 'True'})
},
'narrative.solution': {
'Meta': {'object_name': 'Solution'},
'diagnostic_case_name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'enacted': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'plan_json': ('django.db.models.fields.TextField', [], {}),
'problem_description': ('django.db.models.fields.CharField', [], {'max_length': '128'})
}
}
complete_apps = ['narrative']
|
ambitioninc/django-narrative
|
narrative/south_migrations/0007_auto__del_event.py
|
Python
|
mit
| 5,756
|
import lldb
import os
kNoResult = 0x1001
@lldb.command("load_swift")
def load_swift(debugger, path, ctx, result, _):
with open(os.path.expanduser(path)) as f:
contents = f.read()
if path.endswith(".swift"):
options = lldb.SBExpressionOptions()
options.SetLanguage(lldb.eLanguageTypeSwift)
error = ctx.frame.EvaluateExpression(contents, options).error
else:
error = ctx.frame.EvaluateExpression(contents).error
if error.fail and error.value != kNoResult:
result.SetError(error)
|
sberrevoets/dotfiles
|
lldbhelpers/load_swift.py
|
Python
|
mit
| 543
|
from __future__ import print_function
import filecmp
import glob
import itertools
import os
import sys
import sysconfig
import tempfile
import unittest
project_dir = os.path.abspath(os.path.join(__file__, '..', '..', '..'))
test_dir = os.getenv("BROTLI_TESTS_PATH")
BRO_ARGS = [os.getenv("BROTLI_WRAPPER")]
# Fallbacks
if test_dir is None:
test_dir = os.path.join(project_dir, 'tests')
if BRO_ARGS[0] is None:
python_exe = sys.executable or 'python'
bro_path = os.path.join(project_dir, 'python', 'bro.py')
BRO_ARGS = [python_exe, bro_path]
# Get the platform/version-specific build folder.
# By default, the distutils build base is in the same location as setup.py.
platform_lib_name = 'lib.{platform}-{version[0]}.{version[1]}'.format(
platform=sysconfig.get_platform(), version=sys.version_info)
build_dir = os.path.join(project_dir, 'bin', platform_lib_name)
# Prepend the build folder to sys.path and the PYTHONPATH environment variable.
if build_dir not in sys.path:
sys.path.insert(0, build_dir)
TEST_ENV = os.environ.copy()
if 'PYTHONPATH' not in TEST_ENV:
TEST_ENV['PYTHONPATH'] = build_dir
else:
TEST_ENV['PYTHONPATH'] = build_dir + os.pathsep + TEST_ENV['PYTHONPATH']
TESTDATA_DIR = os.path.join(test_dir, 'testdata')
TESTDATA_FILES = [
'empty', # Empty file
'10x10y', # Small text
'alice29.txt', # Large text
'random_org_10k.bin', # Small data
'mapsdatazrh', # Large data
]
TESTDATA_PATHS = [os.path.join(TESTDATA_DIR, f) for f in TESTDATA_FILES]
TESTDATA_PATHS_FOR_DECOMPRESSION = glob.glob(
os.path.join(TESTDATA_DIR, '*.compressed'))
TEMP_DIR = tempfile.mkdtemp()
def get_temp_compressed_name(filename):
return os.path.join(TEMP_DIR, os.path.basename(filename + '.bro'))
def get_temp_uncompressed_name(filename):
return os.path.join(TEMP_DIR, os.path.basename(filename + '.unbro'))
def bind_method_args(method, *args, **kwargs):
return lambda self: method(self, *args, **kwargs)
def generate_test_methods(test_case_class,
for_decompression=False,
variants=None):
# Add test methods for each test data file. This makes identifying problems
# with specific compression scenarios easier.
if for_decompression:
paths = TESTDATA_PATHS_FOR_DECOMPRESSION
else:
paths = TESTDATA_PATHS
opts = []
if variants:
opts_list = []
for k, v in variants.items():
opts_list.append([r for r in itertools.product([k], v)])
for o in itertools.product(*opts_list):
opts_name = '_'.join([str(i) for i in itertools.chain(*o)])
opts_dict = dict(o)
opts.append([opts_name, opts_dict])
else:
opts.append(['', {}])
for method in [m for m in dir(test_case_class) if m.startswith('_test')]:
for testdata in paths:
for (opts_name, opts_dict) in opts:
f = os.path.splitext(os.path.basename(testdata))[0]
name = 'test_{method}_{options}_{file}'.format(
method=method, options=opts_name, file=f)
func = bind_method_args(
getattr(test_case_class, method), testdata, **opts_dict)
setattr(test_case_class, name, func)
class TestCase(unittest.TestCase):
def tearDown(self):
for f in TESTDATA_PATHS:
try:
os.unlink(get_temp_compressed_name(f))
except OSError:
pass
try:
os.unlink(get_temp_uncompressed_name(f))
except OSError:
pass
def assertFilesMatch(self, first, second):
self.assertTrue(
filecmp.cmp(first, second, shallow=False),
'File {} differs from {}'.format(first, second))
|
google/brotli
|
python/tests/_test_utils.py
|
Python
|
mit
| 3,811
|
#!python
import boto.kinesis
con = boto.kinesis.connect_to_region('ap-northeast-1')
res = con.delete_stream('test')
print res
|
digitalbot/KinesisSample
|
delete-stream.py
|
Python
|
mit
| 135
|
import logging
from pprint import pformat
from .. import settings
LOG = logging.getLogger(__name__)
# Event handler mapping
handlers = {}
# Event types
UNHANDLED = -1
MSG = 1
JOIN = 2
LEAVE = 3
INVITE = 4
PROFILE_CHANGE = 5
class RoomEventHandlerType(type):
"""
Metaclass that automatically registers the room event handler in the
handlers dict.
"""
def __new__(meta, name, bases, dct):
cls = super(RoomEventHandlerType, meta).__new__(meta, name, bases, dct)
if bases: # We don't want to register the base class
if "event_type" not in dct:
raise NotImplementedError
handlers[dct["event_type"]] = cls
return cls
class RoomEventHandler(metaclass=RoomEventHandlerType):
"""
Base class for room event handlers.
:param event: The event to handle
:type event: dict
:param client: The client instance the room event observer is created by
:type client: :class:`leaf.client.client.Client`
"""
def __init__(self, event, client):
self.event = event
self.client = client
def parse(self):
"""
This should be implemented to do the event specific parsing.
"""
raise NotImplementedError
def callback(self, **event_data):
"""
The callback functions that takes action on the incoming event.
"""
raise NotImplementedError
def process(self):
"""
Parses the event and executes the callback.
"""
self.callback(**self.parse())
class UnhandledHandler(RoomEventHandler):
"""
Handler for all room events that are not parsed.
"""
event_type = UNHANDLED
def parse(self):
return {
"event": self.event
}
def callback(self, event):
if not settings.debug:
return
text = "UNHANDLED EVENT: {}".format(pformat(event))
self.client.ui.draw_unhandled(text)
class MessageHandler(RoomEventHandler):
"""
Handler for text message events.
"""
event_type = MSG
def parse(self):
return {
"user_id": self.event["sender"],
"msg": self.event["content"]["body"]
}
def callback(self, user_id, msg):
user = self.client.users.get_user(user_id)
self.client.ui.draw_user_message(user, msg)
class JoinHandler(RoomEventHandler):
"""
Event handler for users joining the room.
"""
event_type = JOIN
def parse(self):
nick = None
if "displayname" in self.event["content"]:
nick = self.event["content"]["displayname"]
return {
"user_id": self.event["sender"],
"nick": nick
}
def callback(self, user_id, nick):
user = self.client.users.add_user(user_id, nick=nick)
self.client.ui.draw_user_join(user)
class LeaveHandler(RoomEventHandler):
"""
Event handler for users leaving the room.
"""
event_type = LEAVE
def parse(self):
return {
"user_id": self.event["sender"]
}
def callback(self, user_id):
user = self.client.users.remove_user(user_id)
# Could happen that a user leave event is included in the backlog but
# not the associated join. Get a temporary user.
if not user:
user = self.client.users.get_user(user_id)
self.client.ui.draw_user_leave(user)
class InviteHandler(RoomEventHandler):
"""
Handler for invite events.
"""
event_type = INVITE
def parse(self):
return {
"user_id": self.event["sender"],
"invited_user_id": self.event["state_key"]
}
def callback(self, user_id, invited_user_id):
user = self.client.users.get_user(user_id)
invited_user = self.client.users.get_user(invited_user_id)
self.client.ui.draw_user_invite(user, invited_user)
class ProfileChangeHandler(RoomEventHandler):
"""
Handler for profile change events.
This only supports displayname changes right now.
"""
event_type = PROFILE_CHANGE
def parse(self):
return {
"user_id": self.event["sender"],
"new_nick": self.event["content"]["displayname"]
}
def callback(self, user_id, new_nick):
"""
:param user_id: The MXID of the user that did a profile change
:param new_nick: The new displayname of the user.
"""
user = self.client.users.get_user(user_id)
# Profile changes could be caused by different things other than
# changing the nick
if user.nick == new_nick:
return
self.client.ui.draw_user_change_nick(user, new_nick)
# We want to update the nick after the ui room event output has been
# drawn so that it shows the previous nick of the user
user.update_nick(nick=new_nick)
class RoomEventObserver:
"""
The RoomEventObserver class takes care of incoming room events from the
server sync thread. It parses incoming event types and runs the
corresponding room event handler.
:param client: The client instance the room event observer is created by
:type client: :class:`leaf.client.client.Client`
"""
def __init__(self, client):
self.client = client
def on_room_event(self, room, event):
"""
The room event listener function which receives incoming events and
executes the appropriate handler.
:param room: The room which the observer is listening to
:param event: The event that was just intercepted
"""
LOG.debug("Received event: {}".format(pformat(event)))
event_type = self.parse_event_type(event)
handler = handlers[event_type](event, self.client)
handler.process()
def parse_event_type(self, event):
"""
This function parses an event and returns an event type.
:param event: The event that is going to be parsed
:return: The event type
"""
if "redacted_because" in event:
return UNHANDLED
if event["type"] == "m.room.member":
return self._parse_membership_event_type(event)
if event["type"] == "m.room.message":
if ("msgtype" in event["content"] and
event["content"]["msgtype"] == "m.text"):
return MSG
return UNHANDLED
def _parse_membership_event_type(self, event):
if event["content"]["membership"] == "join":
return self._parse_join_event_type(event)
if event["content"]["membership"] == "leave":
return LEAVE
if event["content"]["membership"] == "invite":
return INVITE
def _parse_join_event_type(self, event):
# join -> join is a profile change
if ("prev_content" in event["unsigned"] and
event["unsigned"]["prev_content"]["membership"] == "join"):
return PROFILE_CHANGE
else:
return JOIN
|
simonklb/matrix-leaf
|
leaf/client/room_event.py
|
Python
|
mit
| 7,074
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.exceptions import DeserializationError
from msrestazure.azure_operation import AzureOperationPoller
from .. import models
class ApplicationSecurityGroupsOperations(object):
"""ApplicationSecurityGroupsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client API version. Constant value: "2017-11-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-11-01"
self.config = config
def _delete_initial(
self, resource_group_name, application_security_group_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationSecurityGroups/{applicationSecurityGroupName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationSecurityGroupName': self._serialize.url("application_security_group_name", application_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, application_security_group_name, custom_headers=None, raw=False, **operation_config):
"""Deletes the specified application security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_security_group_name: The name of the application
security group.
:type application_security_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns None or
ClientRawResponse if raw=true
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
application_security_group_name=application_security_group_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
if raw:
return raw_result
# Construct and send request
def long_running_send():
return raw_result.response
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
header_parameters = {}
header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id']
return self._client.send(
request, header_parameters, stream=False, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def get(
self, resource_group_name, application_security_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets information about the specified application security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_security_group_name: The name of the application
security group.
:type application_security_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ApplicationSecurityGroup or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.network.v2017_11_01.models.ApplicationSecurityGroup or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationSecurityGroups/{applicationSecurityGroupName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationSecurityGroupName': self._serialize.url("application_security_group_name", application_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ApplicationSecurityGroup', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def _create_or_update_initial(
self, resource_group_name, application_security_group_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationSecurityGroups/{applicationSecurityGroupName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationSecurityGroupName': self._serialize.url("application_security_group_name", application_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'ApplicationSecurityGroup')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ApplicationSecurityGroup', response)
if response.status_code == 201:
deserialized = self._deserialize('ApplicationSecurityGroup', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, application_security_group_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Creates or updates an application security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_security_group_name: The name of the application
security group.
:type application_security_group_name: str
:param parameters: Parameters supplied to the create or update
ApplicationSecurityGroup operation.
:type parameters:
~azure.mgmt.network.v2017_11_01.models.ApplicationSecurityGroup
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns
ApplicationSecurityGroup or ClientRawResponse if raw=true
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_11_01.models.ApplicationSecurityGroup]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
application_security_group_name=application_security_group_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
if raw:
return raw_result
# Construct and send request
def long_running_send():
return raw_result.response
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
header_parameters = {}
header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id']
return self._client.send(
request, header_parameters, stream=False, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = self._deserialize('ApplicationSecurityGroup', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def list_all(
self, custom_headers=None, raw=False, **operation_config):
"""Gets all application security groups in a subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of ApplicationSecurityGroup
:rtype:
~azure.mgmt.network.v2017_11_01.models.ApplicationSecurityGroupPaged[~azure.mgmt.network.v2017_11_01.models.ApplicationSecurityGroup]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Network/applicationSecurityGroups'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.ApplicationSecurityGroupPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.ApplicationSecurityGroupPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets all the application security groups in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of ApplicationSecurityGroup
:rtype:
~azure.mgmt.network.v2017_11_01.models.ApplicationSecurityGroupPaged[~azure.mgmt.network.v2017_11_01.models.ApplicationSecurityGroup]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationSecurityGroups'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.ApplicationSecurityGroupPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.ApplicationSecurityGroupPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
|
AutorestCI/azure-sdk-for-python
|
azure-mgmt-network/azure/mgmt/network/v2017_11_01/operations/application_security_groups_operations.py
|
Python
|
mit
| 20,853
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class LoadBalancerProbesOperations:
"""LoadBalancerProbesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
load_balancer_name: str,
**kwargs: Any
) -> AsyncIterable["_models.LoadBalancerProbeListResult"]:
"""Gets all the load balancer probes.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LoadBalancerProbeListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_11_01.models.LoadBalancerProbeListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LoadBalancerProbeListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('LoadBalancerProbeListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/probes'} # type: ignore
async def get(
self,
resource_group_name: str,
load_balancer_name: str,
probe_name: str,
**kwargs: Any
) -> "_models.Probe":
"""Gets load balancer probe.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param probe_name: The name of the probe.
:type probe_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Probe, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_11_01.models.Probe
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Probe"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'probeName': self._serialize.url("probe_name", probe_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Probe', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/probes/{probeName}'} # type: ignore
|
Azure/azure-sdk-for-python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_11_01/aio/operations/_load_balancer_probes_operations.py
|
Python
|
mit
| 8,483
|
# coding=utf-8
from django.http import HttpResponse, HttpResponseServerError
from .models import CompoundAdverseEvent, OpenFDACompound, AdverseEvent
import ujson as json
# TODO TODO TODO REVISE IN PYTHON 3
import cgi
import html
def main(request):
"""Default to Server Error"""
return HttpResponseServerError()
def fetch_adverse_events_data(request):
ae_data = list(CompoundAdverseEvent.objects.prefetch_related(
'compound__compound',
'event__organ'
).all().values(
'compound_id',
'compound__compound__name',
'compound__compound_id',
# Probably should be, you know, NAME
'event__event',
'frequency',
'compound__estimated_usage',
# WHY ISN'T THIS JUST NAME??
'event__organ__organ_name',
# Add logp
'compound__compound__logp',
'compound__compound__alogp',
'compound__black_box',
# SUBJECT TO CHANGE
# 'compound__compound__tctc',
# 'compound__compound__mps',
# 'compound__compound__epa'
'compound__compound__synonyms'
))
data = []
# A serializer would probably better serve us here...
for ae in ae_data:
# project = ''
# if ae.get('compound__compound__tctc'):
# project += 'TCTC'
# if ae.get('compound__compound__epa'):
# project += 'EPA'
# if ae.get('compound__compound__mps'):
# project += 'MPS'
# if not project:
# project = 'Unassigned'
organ_name = ''
if ae.get('event__organ__organ_name'):
organ_name = ae.get('event__organ__organ_name')
black_box_warning = False
if ae.get('compound__black_box'):
black_box_warning = True
normalized_reports = ''
estimated_usage = ''
if ae.get('compound__estimated_usage'):
normalized_reports = '{:,.2f}'.format(
float(ae.get('frequency')) / ae.get('compound__estimated_usage') * 10000
)
estimated_usage = '{:,}'.format(ae.get('compound__estimated_usage'))
data.append(
{
'view': ae.get('compound_id'),
'compound': {
'id': ae.get('compound__compound_id'),
'name': html.escape(ae.get('compound__compound__name'))
},
'event': {
'lower': html.escape(ae.get('event__event').lower()),
'name': html.escape(ae.get('event__event'))
},
'number_of_reports': '{:,}'.format(
ae.get('frequency')
),
'normalized_reports': normalized_reports,
'estimated_usage': estimated_usage,
'organ': organ_name,
'black_box_warning': black_box_warning,
# 'project': project,
'logp': ae.get('compound__compound__logp'),
'alogp': ae.get('compound__compound__alogp'),
'synonyms': ae.get('compound__compound__synonyms')
}
)
all_data = {
'data': data
}
return HttpResponse(
json.dumps(all_data),
content_type="application/json"
)
def fetch_aggregate_ae_by_compound(request):
compounds = OpenFDACompound.objects.all().prefetch_related(
'compound'
)
compound_frequency = {}
ae_to_compound = {}
for adverse_event in CompoundAdverseEvent.objects.all().prefetch_related('event', 'compound__compound'):
compound_frequency.setdefault(adverse_event.compound_id, []).append(adverse_event.frequency)
ae_to_compound.setdefault(adverse_event.event.event, {}).update({
adverse_event.compound.compound.name: True
})
data = []
for compound in compounds:
estimated_usage = ''
if compound.estimated_usage:
estimated_usage = '{:,}'.format(compound.estimated_usage)
checkbox = '<input class="table-checkbox big-checkbox compound" type="checkbox" value="{}">'.format(compound.compound.name)
data.append({
# 'checkbox': html.escape(compound.compound.name),
'checkbox': html.escape(checkbox),
'compound': compound.compound.name,
'estimated_usage': estimated_usage,
'frequency': '{:,}'.format(sum(compound_frequency.get(compound.id, [0]))),
'synonyms': compound.compound.synonyms
})
all_data = {
'data': data,
'ae_to_compound': ae_to_compound
}
return HttpResponse(
json.dumps(all_data),
content_type="application/json"
)
def fetch_aggregate_ae_by_event(request):
adverse_events = AdverseEvent.objects.all().prefetch_related(
'organ'
)
adverse_event_frequency = {}
for adverse_event in CompoundAdverseEvent.objects.all():
adverse_event_frequency.setdefault(adverse_event.event_id, []).append(adverse_event.frequency)
data = []
for adverse_event in adverse_events:
frequency = sum(adverse_event_frequency.get(adverse_event.id, [0]))
organ_name = ''
if adverse_event.organ:
organ_name = adverse_event.organ.organ_name
checkbox = '<input class="table-checkbox big-checkbox adverse-event" type="checkbox" value="{}">'.format(adverse_event.event)
if frequency:
data.append({
# 'checkbox': html.escape(adverse_event.event),
'checkbox': html.escape(checkbox),
'event': adverse_event.event,
'organ': organ_name,
'frequency': '{:,}'.format(frequency)
})
all_data = {
'data': data
}
return HttpResponse(
json.dumps(all_data),
content_type="application/json"
)
switch = {
'fetch_adverse_events_data': fetch_adverse_events_data,
'fetch_aggregate_ae_by_event': fetch_aggregate_ae_by_event,
'fetch_aggregate_ae_by_compound': fetch_aggregate_ae_by_compound
}
# Should probably consolidate these (DRY)
def ajax(request):
"""Switch to correct function given POST call
Receives the following from POST:
call -- What function to redirect to
"""
post_call = request.POST.get('call', '')
# Abort if there is no valid call sent to us from Javascript
if not post_call:
return main(request)
# Route the request to the correct handler function
# and pass request to the functions
try:
# select the function from the dictionary
procedure = switch[post_call]
# If all else fails, handle the error message
except KeyError:
return main(request)
else:
# execute the function
return procedure(request)
|
UPDDI/mps-database-server
|
drugtrials/ajax.py
|
Python
|
mit
| 6,822
|
#!/usr/bin/python
import abc
class base_fun(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def __init__(self):
pass
@abc.abstractmethod
def run(self):
return
|
trelay/multi-executor
|
main/main.py
|
Python
|
mit
| 211
|
import tarfile
import io
import sys
from datetime import datetime
import pytz
tz = pytz.timezone('Asia/Kolkata')
archive_size = 1000 # lines
log_dir = '/home/ubuntu/migration/slog/'
def archive_gen():
log_count = 0
while True:
data = yield
log_count += 1
log_name = log_dir + str(log_count) + '.tar.gz'
tar = tarfile.open(log_name, 'w:gz')
logdata = io.BytesIO()
logstr = '\n'.join(data)
logdata.write(logstr.encode('utf-8'))
logdata.seek(0)
info = tarfile.TarInfo(name='log')
info.size = len(logstr)
tar.addfile(tarinfo=info, fileobj=logdata)
tar.close()
t = datetime.utcnow().replace(tzinfo=pytz.utc).astimezone(tz).strftime('%H:%M:%S %Y-%m-%d')
gist_log = log_dir + 'gist'
with open(gist_log, 'a') as f:
last_line = data[0] if data else 'empty'
log_entry = '{}: created {}\n{}\n'.format(t, log_name, last_line)
f.write(log_entry)
f.close()
def log_gen():
archiver = archive_gen()
next(archiver)
cache = []
while True:
data = yield
if not data:
archiver.send(cache)
else:
cache.append(data)
if len(cache) >= archive_size:
archiver.send(cache)
cache = []
log = log_gen()
next(log)
while True:
l = sys.stdin.readline()
log.send(l)
if not l:
break
|
apoorv-kumar/PyThugLife
|
logx/giant_compressed_log.py
|
Python
|
mit
| 1,761
|
from jsonmodels.models import Base
from .channel import Channel
class Device(Base):
""" Contains info about a device and it's channels. """
def __init__(self, **kwargs):
"""
Initializes a Device object by looping through the
keywords in kwargs and setting them as attributes.
:param kwargs: Dictionary containing a device.
"""
for keyword in ["id", "naturalId", "name",
"thingId", "channels", "signatures"]:
if keyword == "channels" and kwargs[keyword] is not None:
kwargs[keyword] = [Channel(**channel_info)
for channel_info in kwargs[keyword]]
setattr(self, keyword, kwargs[keyword])
|
keerts/pyninjasphere
|
pyninjasphere/logic/device.py
|
Python
|
mit
| 744
|
#rainfall sensor.
#VCC
#GND
#DO <--> GPIO(X12) Digital data
#AO <--> ADC Port(X11) Analog data
#if value is low than defined data, DO value is 0,
#if value is high than defined data, DO value is 1.
#AO is the specific value.
import pyb
from pyb import Pin
p_in = Pin('X12', Pin.IN, Pin.PULL_UP)
p_in.value
adc = pyb.ADC(Pin('X11')) # create an analog object from a pin
adc = pyb.ADC(pyb.Pin.board.X11)
# read an analog value
def getRainAo():
print('rainfall Ao')
return adc.read()
# read an digital value
def getRainDo():
print('rainfall Do')
return p_in.value
|
Python-IoT/Smart-IoT-Planting-System
|
device/src/rainfall.py
|
Python
|
mit
| 609
|
# coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: api-support@onshape.zendesk.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
# python 2 and python 3 compatibility library
import six
from onshape_client.oas.api_client import ApiClient
from onshape_client.oas.exceptions import ApiTypeError, ApiValueError
from onshape_client.oas.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
int,
none_type,
str,
validate_and_convert_types,
)
from onshape_client.oas.models import bt_release_package_params
from onshape_client.oas.models import bt_active_workflow_info
from onshape_client.oas.models import bt_release_package_info
from onshape_client.oas.models import bt_update_release_package_params
class ReleaseManagementApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def __create_obsoletion_package(self, wfid, **kwargs):
"""create_obsoletion_package # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_obsoletion_package(wfid, async_req=True)
>>> result = thread.get()
Args:
wfid (str):
Keyword Args:
revision_id (str): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int): specifies the index of the server
that we want to use.
Default is 0.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get(
"_return_http_data_only", True
)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_host_index"] = kwargs.get("_host_index", 0)
kwargs["wfid"] = wfid
return self.call_with_http_info(**kwargs)
self.create_obsoletion_package = Endpoint(
settings={
"response_type": None,
"auth": ["OAuth2"],
"endpoint_path": "/api/releasepackages/obsoletion/{wfid}",
"operation_id": "create_obsoletion_package",
"http_method": "POST",
"servers": [],
},
params_map={
"all": ["wfid", "revision_id",],
"required": ["wfid",],
"nullable": [],
"enum": [],
"validation": [],
},
root_map={
"validations": {},
"allowed_values": {},
"openapi_types": {"wfid": (str,), "revision_id": (str,),},
"attribute_map": {"wfid": "wfid", "revision_id": "revisionId",},
"location_map": {"wfid": "path", "revision_id": "query",},
"collection_format_map": {},
},
headers_map={
"accept": ["application/vnd.onshape.v1+json;charset=UTF-8;qs=0.1"],
"content_type": [],
},
api_client=api_client,
callable=__create_obsoletion_package,
)
def __create_release_package(self, wfid, bt_release_package_params, **kwargs):
"""create_release_package # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_release_package(wfid, bt_release_package_params, async_req=True)
>>> result = thread.get()
Args:
wfid (str):
bt_release_package_params (bt_release_package_params.BTReleasePackageParams):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int): specifies the index of the server
that we want to use.
Default is 0.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get(
"_return_http_data_only", True
)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_host_index"] = kwargs.get("_host_index", 0)
kwargs["wfid"] = wfid
kwargs["bt_release_package_params"] = bt_release_package_params
return self.call_with_http_info(**kwargs)
self.create_release_package = Endpoint(
settings={
"response_type": None,
"auth": ["OAuth2"],
"endpoint_path": "/api/releasepackages/release/{wfid}",
"operation_id": "create_release_package",
"http_method": "POST",
"servers": [],
},
params_map={
"all": ["wfid", "bt_release_package_params",],
"required": ["wfid", "bt_release_package_params",],
"nullable": [],
"enum": [],
"validation": [],
},
root_map={
"validations": {},
"allowed_values": {},
"openapi_types": {
"wfid": (str,),
"bt_release_package_params": (
bt_release_package_params.BTReleasePackageParams,
),
},
"attribute_map": {"wfid": "wfid",},
"location_map": {"wfid": "path", "bt_release_package_params": "body",},
"collection_format_map": {},
},
headers_map={
"accept": ["application/vnd.onshape.v1+json;charset=UTF-8;qs=0.1"],
"content_type": ["application/json;charset=UTF-8; qs=0.09"],
},
api_client=api_client,
callable=__create_release_package,
)
def __get_company_release_workflow(self, **kwargs):
"""get_company_release_workflow # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_company_release_workflow(async_req=True)
>>> result = thread.get()
Keyword Args:
document_id (str): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int): specifies the index of the server
that we want to use.
Default is 0.
async_req (bool): execute request asynchronously
Returns:
bt_active_workflow_info.BTActiveWorkflowInfo
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get(
"_return_http_data_only", True
)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_host_index"] = kwargs.get("_host_index", 0)
return self.call_with_http_info(**kwargs)
self.get_company_release_workflow = Endpoint(
settings={
"response_type": (bt_active_workflow_info.BTActiveWorkflowInfo,),
"auth": ["OAuth2"],
"endpoint_path": "/api/releasepackages/companyreleaseworkflow",
"operation_id": "get_company_release_workflow",
"http_method": "GET",
"servers": [],
},
params_map={
"all": ["document_id",],
"required": [],
"nullable": [],
"enum": [],
"validation": [],
},
root_map={
"validations": {},
"allowed_values": {},
"openapi_types": {"document_id": (str,),},
"attribute_map": {"document_id": "documentId",},
"location_map": {"document_id": "query",},
"collection_format_map": {},
},
headers_map={
"accept": ["application/vnd.onshape.v1+json;charset=UTF-8;qs=0.1"],
"content_type": [],
},
api_client=api_client,
callable=__get_company_release_workflow,
)
def __get_release_package(self, rpid, **kwargs):
"""get_release_package # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_release_package(rpid, async_req=True)
>>> result = thread.get()
Args:
rpid (str):
Keyword Args:
detailed (bool): [optional] if omitted the server will use the default value of False
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int): specifies the index of the server
that we want to use.
Default is 0.
async_req (bool): execute request asynchronously
Returns:
bt_release_package_info.BTReleasePackageInfo
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get(
"_return_http_data_only", True
)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_host_index"] = kwargs.get("_host_index", 0)
kwargs["rpid"] = rpid
return self.call_with_http_info(**kwargs)
self.get_release_package = Endpoint(
settings={
"response_type": (bt_release_package_info.BTReleasePackageInfo,),
"auth": ["OAuth2"],
"endpoint_path": "/api/releasepackages/{rpid}",
"operation_id": "get_release_package",
"http_method": "GET",
"servers": [],
},
params_map={
"all": ["rpid", "detailed",],
"required": ["rpid",],
"nullable": [],
"enum": [],
"validation": [],
},
root_map={
"validations": {},
"allowed_values": {},
"openapi_types": {"rpid": (str,), "detailed": (bool,),},
"attribute_map": {"rpid": "rpid", "detailed": "detailed",},
"location_map": {"rpid": "path", "detailed": "query",},
"collection_format_map": {},
},
headers_map={
"accept": ["application/vnd.onshape.v1+json;charset=UTF-8;qs=0.1"],
"content_type": [],
},
api_client=api_client,
callable=__get_release_package,
)
def __update_release_package(
self, rpid, bt_update_release_package_params, **kwargs
):
"""update_release_package # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_release_package(rpid, bt_update_release_package_params, async_req=True)
>>> result = thread.get()
Args:
rpid (str):
bt_update_release_package_params (bt_update_release_package_params.BTUpdateReleasePackageParams):
Keyword Args:
action (str): [optional] if omitted the server will use the default value of 'UPDATE'
wfaction (str): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int): specifies the index of the server
that we want to use.
Default is 0.
async_req (bool): execute request asynchronously
Returns:
bt_release_package_info.BTReleasePackageInfo
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get(
"_return_http_data_only", True
)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_host_index"] = kwargs.get("_host_index", 0)
kwargs["rpid"] = rpid
kwargs[
"bt_update_release_package_params"
] = bt_update_release_package_params
return self.call_with_http_info(**kwargs)
self.update_release_package = Endpoint(
settings={
"response_type": (bt_release_package_info.BTReleasePackageInfo,),
"auth": ["OAuth2"],
"endpoint_path": "/api/releasepackages/{rpid}",
"operation_id": "update_release_package",
"http_method": "POST",
"servers": [],
},
params_map={
"all": [
"rpid",
"bt_update_release_package_params",
"action",
"wfaction",
],
"required": ["rpid", "bt_update_release_package_params",],
"nullable": [],
"enum": [],
"validation": [],
},
root_map={
"validations": {},
"allowed_values": {},
"openapi_types": {
"rpid": (str,),
"bt_update_release_package_params": (
bt_update_release_package_params.BTUpdateReleasePackageParams,
),
"action": (str,),
"wfaction": (str,),
},
"attribute_map": {
"rpid": "rpid",
"action": "action",
"wfaction": "wfaction",
},
"location_map": {
"rpid": "path",
"bt_update_release_package_params": "body",
"action": "query",
"wfaction": "query",
},
"collection_format_map": {},
},
headers_map={
"accept": ["application/vnd.onshape.v1+json;charset=UTF-8;qs=0.1"],
"content_type": ["application/json;charset=UTF-8; qs=0.09"],
},
api_client=api_client,
callable=__update_release_package,
)
class Endpoint(object):
def __init__(
self,
settings=None,
params_map=None,
root_map=None,
headers_map=None,
api_client=None,
callable=None,
):
"""Creates an endpoint
Args:
settings (dict): see below key value pairs
'response_type' (tuple/None): response type
'auth' (list): a list of auth type keys
'endpoint_path' (str): the endpoint path
'operation_id' (str): endpoint string identifier
'http_method' (str): POST/PUT/PATCH/GET etc
'servers' (list): list of str servers that this endpoint is at
params_map (dict): see below key value pairs
'all' (list): list of str endpoint parameter names
'required' (list): list of required parameter names
'nullable' (list): list of nullable parameter names
'enum' (list): list of parameters with enum values
'validation' (list): list of parameters with validations
root_map
'validations' (dict): the dict mapping endpoint parameter tuple
paths to their validation dictionaries
'allowed_values' (dict): the dict mapping endpoint parameter
tuple paths to their allowed_values (enum) dictionaries
'openapi_types' (dict): param_name to openapi type
'attribute_map' (dict): param_name to camelCase name
'location_map' (dict): param_name to 'body', 'file', 'form',
'header', 'path', 'query'
collection_format_map (dict): param_name to `csv` etc.
headers_map (dict): see below key value pairs
'accept' (list): list of Accept header strings
'content_type' (list): list of Content-Type header strings
api_client (ApiClient) api client instance
callable (function): the function which is invoked when the
Endpoint is called
"""
self.settings = settings
self.params_map = params_map
self.params_map["all"].extend(
[
"async_req",
"_host_index",
"_preload_content",
"_request_timeout",
"_return_http_data_only",
"_check_input_type",
"_check_return_type",
]
)
self.params_map["nullable"].extend(["_request_timeout"])
self.validations = root_map["validations"]
self.allowed_values = root_map["allowed_values"]
self.openapi_types = root_map["openapi_types"]
extra_types = {
"async_req": (bool,),
"_host_index": (int,),
"_preload_content": (bool,),
"_request_timeout": (none_type, int, (int,), [int]),
"_return_http_data_only": (bool,),
"_check_input_type": (bool,),
"_check_return_type": (bool,),
}
self.openapi_types.update(extra_types)
self.attribute_map = root_map["attribute_map"]
self.location_map = root_map["location_map"]
self.collection_format_map = root_map["collection_format_map"]
self.headers_map = headers_map
self.api_client = api_client
self.callable = callable
def __validate_inputs(self, kwargs):
for param in self.params_map["enum"]:
if param in kwargs:
check_allowed_values(self.allowed_values, (param,), kwargs[param])
for param in self.params_map["validation"]:
if param in kwargs:
check_validations(self.validations, (param,), kwargs[param])
if kwargs["_check_input_type"] is False:
return
for key, value in six.iteritems(kwargs):
fixed_val = validate_and_convert_types(
value,
self.openapi_types[key],
[key],
False,
kwargs["_check_input_type"],
configuration=self.api_client.configuration,
)
kwargs[key] = fixed_val
def __gather_params(self, kwargs):
params = {
"body": None,
"collection_format": {},
"file": {},
"form": [],
"header": {},
"path": {},
"query": [],
}
for param_name, param_value in six.iteritems(kwargs):
param_location = self.location_map.get(param_name)
if param_location is None:
continue
if param_location:
if param_location == "body":
params["body"] = param_value
continue
base_name = self.attribute_map[param_name]
if param_location == "form" and self.openapi_types[param_name] == (
file_type,
):
params["file"][param_name] = [param_value]
elif param_location == "form" and self.openapi_types[param_name] == (
[file_type],
):
# param_value is already a list
params["file"][param_name] = param_value
elif param_location in {"form", "query"}:
param_value_full = (base_name, param_value)
params[param_location].append(param_value_full)
if param_location not in {"form", "query"}:
params[param_location][base_name] = param_value
collection_format = self.collection_format_map.get(param_name)
if collection_format:
params["collection_format"][base_name] = collection_format
return params
def __call__(self, *args, **kwargs):
""" This method is invoked when endpoints are called
Example:
pet_api = PetApi()
pet_api.add_pet # this is an instance of the class Endpoint
pet_api.add_pet() # this invokes pet_api.add_pet.__call__()
which then invokes the callable functions stored in that endpoint at
pet_api.add_pet.callable or self.callable in this class
"""
return self.callable(self, *args, **kwargs)
def call_with_http_info(self, **kwargs):
try:
_host = self.settings["servers"][kwargs["_host_index"]]
except IndexError:
if self.settings["servers"]:
raise ApiValueError(
"Invalid host index. Must be 0 <= index < %s"
% len(self.settings["servers"])
)
_host = None
for key, value in six.iteritems(kwargs):
if key not in self.params_map["all"]:
raise ApiTypeError(
"Got an unexpected parameter '%s'"
" to method `%s`" % (key, self.settings["operation_id"])
)
# only throw this nullable ApiValueError if _check_input_type
# is False, if _check_input_type==True we catch this case
# in self.__validate_inputs
if (
key not in self.params_map["nullable"]
and value is None
and kwargs["_check_input_type"] is False
):
raise ApiValueError(
"Value may not be None for non-nullable parameter `%s`"
" when calling `%s`" % (key, self.settings["operation_id"])
)
for key in self.params_map["required"]:
if key not in kwargs.keys():
raise ApiValueError(
"Missing the required parameter `%s` when calling "
"`%s`" % (key, self.settings["operation_id"])
)
self.__validate_inputs(kwargs)
params = self.__gather_params(kwargs)
accept_headers_list = self.headers_map["accept"]
if accept_headers_list:
params["header"]["Accept"] = self.api_client.select_header_accept(
accept_headers_list
)
content_type_headers_list = self.headers_map["content_type"]
if content_type_headers_list:
header_list = self.api_client.select_header_content_type(
content_type_headers_list
)
params["header"]["Content-Type"] = header_list
return self.api_client.call_api(
self.settings["endpoint_path"],
self.settings["http_method"],
params["path"],
params["query"],
params["header"],
body=params["body"],
post_params=params["form"],
files=params["file"],
response_type=self.settings["response_type"],
auth_settings=self.settings["auth"],
async_req=kwargs["async_req"],
_check_type=kwargs["_check_return_type"],
_return_http_data_only=kwargs["_return_http_data_only"],
_preload_content=kwargs["_preload_content"],
_request_timeout=kwargs["_request_timeout"],
_host=_host,
collection_formats=params["collection_format"],
)
|
onshape-public/onshape-clients
|
python/onshape_client/oas/api/release_management_api.py
|
Python
|
mit
| 30,910
|
"""Echo request message tests."""
from pyof.foundation.basic_types import DPID, HWAddress
from pyof.v0x01.common.phy_port import PhyPort, PortConfig, PortState
from pyof.v0x01.controller2switch.features_reply import FeaturesReply
from tests.test_struct import TestStruct
class TestFeaturesReply(TestStruct):
"""Feature reply message tests (also those in :class:`.TestDump`)."""
@classmethod
def setUpClass(cls):
"""Configure raw file and its object in parent class (TestDump)."""
super().setUpClass()
super().set_raw_dump_file('v0x01', 'ofpt_features_reply')
kwargs = _get_kwargs()
super().set_raw_dump_object(FeaturesReply, **kwargs)
super().set_minimum_size(32)
def _get_kwargs():
return {'xid': 2, 'datapath_id': DPID('00:00:00:00:00:00:00:01'),
'n_buffers': 256, 'n_tables': 254, 'capabilities': 0x000000c7,
'actions': 4095, 'ports': _get_ports()}
def _get_ports():
return [
PhyPort(port_no=65534,
hw_addr=HWAddress('0e:d3:98:a5:30:47'),
name='s1',
config=PortConfig.OFPPC_PORT_DOWN,
state=PortState.OFPPS_LINK_DOWN,
curr=0,
advertised=0,
supported=0,
peer=0),
PhyPort(port_no=1,
hw_addr=HWAddress('0a:54:cf:fc:4e:6d'),
name='s1-eth1',
config=0,
state=PortState.OFPPS_STP_LISTEN,
curr=0x000000c0,
advertised=0,
supported=0,
peer=0),
PhyPort(port_no=2,
hw_addr=HWAddress('f6:b6:ab:cc:f8:4f'),
name='s1-eth2',
config=0,
state=PortState.OFPPS_STP_LISTEN,
curr=0x000000c0,
advertised=0,
supported=0,
peer=0)
]
|
cemsbr/python-openflow
|
tests/v0x01/test_controller2switch/test_features_reply.py
|
Python
|
mit
| 1,921
|
#!/usr/bin/env python
# "genstyles" Notepad++ to Atom Syntax Style Converter
# Copyright (c) 2014, Adam Rehn
#
# Script to convert Notepad++ style XML files to LESS stylesheets for Atom.
#
# Requires the following files in the current directory:
#
# styleMappings.json - contains required conversion information
# stylers.xml - Notepad++ syntax styles XML file
#
# ---
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import print_function
import xml.dom.minidom
import json
# Wraps an object in a list if is not a list already
def forceList(x):
if x == None:
return None
elif isinstance(x, list):
return x
else:
return [x]
# Writes the contents of a file
def putFileContents(filename, data):
f = open(filename, "wb")
f.write(data.encode("utf-8"))
f.close()
# Parses a JSON file
def parseJSONFile(filename):
f = open(filename, "r")
decoded = json.load(f)
f.close()
return decoded
# Generates CSS from a WordsStyle XML element
def convertStyle(styleNode):
fgColour = styleNode.getAttribute("fgColor")
isBold = (styleNode.getAttribute("fontStyle") == "1")
css = "\t\tcolor: #" + fgColour + ";\n"
css += "\t\tfont-weight: " + ("bold" if isBold else "normal") + ";\n"
return css
# Parse the style mappings JSON file
styleMappings = parseJSONFile("styleMappings.json")
# Parse the stylers.xml file with minidom
document = xml.dom.minidom.parse("stylers.xml")
# We will build the LESS stylesheet iteratively
styles = ""
# Iterate over the languages
for langNode in document.getElementsByTagName("LexerType"):
# Determine if we have conversion details for the current language
langName = langNode.getAttribute("name")
if (langName in styleMappings):
# Open the selector block for the current language
styles += "." + styleMappings[langName]["class"] + " {\n"
# Iterate over the styles for the current language
for styleNode in langNode.childNodes:
if (styleNode.nodeType != styleNode.TEXT_NODE and styleNode.tagName == "WordsStyle"):
# Determine if we have a conversion mapping for the current style
styleName = styleNode.getAttribute("name")
if (styleName in styleMappings[langName]["styles"]):
mappings = forceList(styleMappings[langName]["styles"][styleName])
if mappings != None:
styles += "\t" + ",\n\t".join(mappings) + " {\n" + convertStyle(styleNode) + "\t}\n"
# Close the selector block for the current language
styles += "}\n"
# Write the generated stylesheet to the file language-styles.less
putFileContents("../stylesheets/language-styles.less", styles)
|
adamrehn/notepad-plus-plus-default-styles
|
genstyles/genstyles.py
|
Python
|
mit
| 3,631
|
import ast
import csv
import datetime
import pytz
from sqlalchemy import Column, Integer, String, Boolean, DateTime
from sqlalchemy.dialects.postgresql import INET
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Gateway(Base):
__tablename__ = 'gateways'
id = Column(Integer, primary_key=True, autoincrement=True)
host = Column(INET, nullable=False, unique=True)
name = Column(String, nullable=True)
enabled = Column(Boolean, nullable=False, default=True)
eui = Column(Integer, nullable=True)
power = Column(Integer, nullable=False)
port = Column(Integer, nullable=True)
created = Column(DateTime(timezone=True), nullable=False)
updated = Column(DateTime(timezone=True), nullable=False)
@classmethod
def seed(cls, session):
gateways = []
# Read fields from the CSV file
with open('gateways.csv') as sfile:
reader = csv.DictReader(sfile)
for line in reader:
# Convert data using literal_eval
g = {}
for k,v in line.iteritems():
g[k] = v
gateways.append(g)
# Set timestamps as UTC
for g in gateways:
now = datetime.datetime.now(tz=pytz.utc).isoformat()
g['created'] = now
g['updated'] = now
# Insert rows
session.bulk_insert_mappings(Gateway, gateways)
@classmethod
def clear (cls, session):
gateways = session.query(Gateway).all()
for g in gateways:
session.delete(g)
|
Fluent-networks/floranet
|
floranet/data/seed/gateways.py
|
Python
|
mit
| 1,604
|
# Copyright (c) Hynek Schlawack, Richard Wall
# See LICENSE for details.
from __future__ import absolute_import, division, print_function
import getdns
from twisted.trial.unittest import SynchronousTestCase
from twisted.python.filepath import FilePath
from OpenSSL import crypto
from danex import _dane
class TLSADomainNameTests(SynchronousTestCase):
def test_tlsaDomainName(self):
"""
L{_dane.tlsaDomainName} returns the port, proto and parent domain as
labels of a new domain name string.
"""
self.assertEqual(
"_443._tcp.example.com",
_dane.tlsaDomainName('example.com', 443, 'tcp')
)
class GetdnsResponseErrorTests(SynchronousTestCase):
def test_errorText(self):
"""
L{_dane.GetdnsResponseError} has an C{errorText} attribute which is the name
of the corresponding L{getdns} constant.
"""
self.assertEqual(
"GETDNS_RESPSTATUS_NO_NAME",
_dane.GetdnsResponseError(getdns.GETDNS_RESPSTATUS_NO_NAME).errorText
)
class TLSARecordTests(SynchronousTestCase):
def test_matchesCertificateCertTrue(self):
"""
"""
serverCertBytes = FilePath(__file__).sibling('example_cert.bin').open().read()
serverCert = crypto.load_certificate(crypto.FILETYPE_ASN1, serverCertBytes)
self.assertEqual(
True,
_dane.TLSARecord(
payload=serverCertBytes,
usage=0,
selector=_dane.SELECTOR.CERT.value,
matchingType=_dane.MATCHING_TYPE.FULL.value
).matchesCertificate(serverCert)
)
def test_matchesCertificateCertFalse(self):
"""
"""
serverCertBytesOriginal = FilePath(__file__).sibling('example_cert.bin').open().read()
serverCert = crypto.load_certificate(crypto.FILETYPE_ASN1, serverCertBytesOriginal)
originalSerial = serverCert.get_serial_number()
serverCert.set_serial_number(100)
self.assertNotEqual(originalSerial, serverCert.get_serial_number())
serverCertBytesNew = crypto.dump_certificate(crypto.FILETYPE_ASN1, serverCert)
self.assertNotEqual(serverCertBytesOriginal, serverCertBytesNew)
self.assertEqual(
False,
_dane.TLSARecord(
payload=serverCertBytesNew,
usage=0,
selector=_dane.SELECTOR.CERT.value,
matchingType=_dane.MATCHING_TYPE.FULL.value
).matchesCertificate(serverCert)
)
test_matchesCertificateCertFalse.skip = True
def test_matchesCertificateSPKITrue(self):
"""
"""
serverCertBytesOriginal = FilePath(__file__).sibling('example_cert.bin').open().read()
serverCert = crypto.load_certificate(crypto.FILETYPE_ASN1, serverCertBytesOriginal)
serverCert.set_serial_number(100)
serverCertBytes = crypto.dump_certificate(crypto.FILETYPE_ASN1, serverCert)
self.assertEqual(serverCertBytesOriginal, serverCertBytes)
# import pdb; pdb.set_trace()
self.assertEqual(
False,
_dane.TLSARecord(
payload=serverCertBytes + b'xxx',
usage=0,
selector=_dane.SELECTOR.SPKI.value,
matchingType=_dane.MATCHING_TYPE.FULL.value
).matchesCertificate(serverCert)
)
test_matchesCertificateSPKITrue.skip = True
class FakeGetdns(object):
"""
An in memory fake of the getdns api for testing.
"""
def __init__(self, generalResult=None):
self._generalResult = generalResult
for k, v in getdns.__dict__.items():
if k.startswith('GETDNS_'):
setattr(self, k, v)
def context_create(self):
"""
"""
def general(self, context, name, request_type, extensions):
"""
"""
return self._generalResult
class TLSATests(SynchronousTestCase):
def test_tlsaCert(self):
"""
L{_dane.lookup_tlsa_records} returns a L{_dane.TLSARecord} instance if
the domain name exists and a verified record is found and the record
selector type is CERT.
"""
fakeGetdns = FakeGetdns(
generalResult=createResults(status=getdns.GETDNS_RESPSTATUS_GOOD,
selector=_dane.SELECTOR.CERT.value,
certificate_association_data=b'FOOBAR'))
_, (res,) = _dane.lookup_tlsa_records(
'example.com', 443, 'tcp', getdns=fakeGetdns)
self.assertEqual(
(_dane.SELECTOR.CERT, b'FOOBAR'),
(res.selector, res.payload)
)
def test_tlsaSPKI(self):
"""
L{_dane.lookup_tlsa_records} returns a L{_dane.TLSARecord} instance if
the domain name exists and a verfied record is found and the record
selector type is SPKI.
"""
fakeGetdns = FakeGetdns(
generalResult=createResults(status=getdns.GETDNS_RESPSTATUS_GOOD,
selector=_dane.SELECTOR.SPKI.value,
certificate_association_data=b'FOOBAR'))
_, (res,) = _dane.lookup_tlsa_records(
'example.com', 443, 'tcp', getdns=fakeGetdns)
self.assertEqual(
(_dane.SELECTOR.SPKI, b'FOOBAR'),
(res.selector, res.payload)
)
def test_tlsaNoname(self):
"""
L{_dane.lookup_tlsa_records} raises LookupError if the domain name does
not exist.
"""
e = self.assertRaises(
_dane.GetdnsResponseError,
_dane.lookup_tlsa_records, 'example.com', 443, 'tcp',
getdns=FakeGetdns(
generalResult=createResults(
status=getdns.GETDNS_RESPSTATUS_NO_NAME
)
)
)
self.assertEqual(
getdns.GETDNS_RESPSTATUS_NO_NAME,
e.errorCode
)
def createResults(status=getdns.GETDNS_RESPSTATUS_GOOD,
selector=None,
certificate_association_data=b"",):
return {'answer_type': 800,
'canonical_name': '_443._tcp.getdnsapi.org.',
'just_address_answers': [],
# 'replies_full': [<read-only buffer ptr 0x7fe2e0029e80, size 636 at 0x7fe2e4e58fb0>],
'replies_tree': [{'answer': [{'class': 1,
'name': '_443._tcp.getdnsapi.org.',
'rdata': {
'certificate_association_data': certificate_association_data,
'certificate_usage': 3,
'matching_type': 1,
# 'rdata_raw': "",
'selector': selector
},
# 'ttl': 450,
'type': 52,
# {'class': 1,
# 'name': '_443._tcp.getdnsapi.org.',
# 'rdata': {'algorithm': 7,
# 'key_tag': 49262,
# 'labels': 4,
# 'original_ttl': 450,
# 'rdata_raw': <read-only buffer ptr 0x7fe2e0261b70, size 161 at 0x7fe2e4e60130>,
# 'signature': <read-only buffer ptr 0x7fe2e0254c40, size 128 at 0x7fe2e4e60170>,
# 'signature_expiration': 1399325172,
# 'signature_inception': 1398100703,
# 'signers_name': 'getdnsapi.org.',
# 'type_covered': 52},
# 'ttl': 450,
# 'type': 46
}
],
'answer_type': 800,
# 'authority': [{'class': 1,
# 'name': 'getdnsapi.org.',
# 'rdata': {'nsdname': 'ns.secret-wg.org.',
# 'rdata_raw': 'ns.secret-wg.org.'},
# 'ttl': 450,
# 'type': 2},
# {'class': 1,
# 'name': 'getdnsapi.org.',
# 'rdata': {'nsdname': 'mcvax.nlnetlabs.nl.',
# 'rdata_raw': 'mcvax.nlnetlabs.nl.'},
# 'ttl': 450,
# 'type': 2},
# {'class': 1,
# 'name': 'getdnsapi.org.',
# 'rdata': {'nsdname': 'open.nlnetlabs.nl.',
# 'rdata_raw': 'open.nlnetlabs.nl.'},
# 'ttl': 450,
# 'type': 2},
# {'class': 1,
# 'name': 'getdnsapi.org.',
# 'rdata': {'algorithm': 7,
# 'key_tag': 49262,
# 'labels': 2,
# 'original_ttl': 450,
# 'rdata_raw': <read-only buffer ptr 0x7fe2e0261f90, size 161 at 0x7fe2e4e601f0>,
# 'signature': <read-only buffer ptr 0x7fe2e0028120, size 128 at 0x7fe2e4e60230>,
# 'signature_expiration': 1399278072,
# 'signature_inception': 1398093503,
# 'signers_name': 'getdnsapi.org.',
# 'type_covered': 2},
# 'ttl': 450,
# 'type': 46}],
'canonical_name': '_443._tcp.getdnsapi.org.',
'dnssec_status': 400,
'header': {'aa': 0,
'ad': 1,
'ancount': 2,
'arcount': 0,
'cd': 0,
'id': 0,
'nscount': 4,
'opcode': 0,
'qdcount': 1,
'qr': 1,
'ra': 1,
'rcode': 0,
'rd': 1,
'tc': 0,
'z': 0},
'question': {'qclass': 1,
'qname': '_443._tcp.getdnsapi.org.',
'qtype': 52}}],
'status': status}
|
hynek/tnw
|
danex/test/test_dane.py
|
Python
|
mit
| 12,017
|
"""pytest tests for reading the result file"""
import pytest
import serpentTools.data
from serpentTools.settings import rc
@pytest.fixture(scope="module")
def read_2_1_29():
with rc:
rc["serpentVersion"] = "2.1.29"
yield
@pytest.fixture
def fullPwrFile(read_2_1_29):
return serpentTools.data.readDataFile("pwr_res.m")
def test_scalars(fullPwrFile):
"""Ensure that the full precision of scalars are captured
Related: GH issue #411
"""
assert fullPwrFile["miscMemsize"] == pytest.approx([6.59, 6.59], abs=0, rel=0)
assert fullPwrFile["availMem"] == pytest.approx([15935.20, 15935.20], abs=0, rel=0)
assert fullPwrFile["totCpuTime"] == pytest.approx([0.282078, 0.494889], abs=0, rel=0)
|
CORE-GATECH-GROUP/serpent-tools
|
tests/test_pt_results.py
|
Python
|
mit
| 740
|
#! /usr/bin/python3
import xmlrpc.client
SATELLITE_URL = 'https://pm1.solutions.local/rpc/api'
SATELLITE_LOGIN = ''
SATELLITE_PASSWORD = ''
client = xmlrpc.client.Server(SATELLITE_URL, verbose=0)
key = client.auth.login(SATELLITE_LOGIN, SATELLITE_PASSWORD)
existing_channels = [x['label'] for x in client.channel.list_all_channels(key)]
for channel in existing_channels:
try:
client.channel.software.sync_repo(key, channel)
except Exception:
raise
|
peteches/Houston
|
sync_repos.py
|
Python
|
mit
| 477
|
# coding: utf8
from __future__ import print_function
from itertools import product
from nltk.tree import Tree
class ChunkTreeInformationExtractor():
def __init__(self):
self.arg = lambda chunk: ' '.join([word for word, tag in chunk.leaves()])
def extract(self, chunked_tree):
""" extracts information from chunk tree
Args:
chunked_tree (nltk.Tree): chunk tree of a sentence
Returns:
list(str,str,str): list of informations in for of (argument1, argument2, relation)
"""
informations = []
arg1s = []
arg2s = []
chunks_list = list(chunked_tree)
for chunk in chunked_tree:
if type(chunk) is not Tree and chunk[1] == "PUNC":
chunks_list.remove(chunk)
for c in range(len(chunks_list)):
if c >= len(chunks_list):
break
chunk = chunks_list[c]
if type(chunk) is not Tree:
try:
if chunk[0] == 'که':
last_args = []
if len(arg1s) > 0 and self.arg(chunks_list[c-1]) in arg1s[-1]:
last_args = arg1s
elif len(arg2s) > 0 and self.arg(chunks_list[c-1]) in arg2s[-1]:
last_args = arg2s
else:
continue
last_label = ''
while type(chunk) is not Tree or last_label is not 'VP':
chunk = chunks_list[c]
if type(chunk) is Tree:
last_args[-1] += ' ' + self.arg(chunk)
last_label = chunk.label()
else:
last_args[-1] += ' ' + chunk[0]
chunks_list.pop(c)
except:
pass
continue
if chunk.label() == 'NP':
try:
if type(chunks_list[c - 1]) == Tree and chunks_list[c - 1].label() == 'PP':
arg2s.append(self.arg(chunks_list[c - 1]) + ' ' + self.arg(chunk))
elif type(chunks_list[c + 1]) == Tree and chunks_list[c + 1].label() == 'POSTP':
arg2s.append(self.arg(chunk) + ' ' + self.arg(chunks_list[c + 1]))
else:
if len(arg1s) == 0:
arg1s.append(self.arg(chunk))
except:
continue
elif chunk.label() == 'VP':
if len(arg1s) > 0 and len(arg2s) > 0:
rel = self.arg(chunk)
if len(chunk) <= 1 and type(chunks_list[c - 1]) is Tree:
if chunks_list[c - 1].label() is 'ADJP':
rel = self.arg(chunks_list[c - 1]) + ' ' + rel
elif chunks_list[c - 1].label() == 'NP' and self.arg(chunks_list[c - 1]) not in (arg1s[-1] + arg2s[-1]):
rel = self.arg(chunks_list[c - 1]) + ' ' + rel
for arg1, arg2 in product(arg1s, arg2s):
informations.append((arg1, arg2, rel))
arg1s = []
arg2s = []
return informations
|
sobhe/baaz
|
baaz/ChunkTreeInformationExtractor.py
|
Python
|
mit
| 2,465
|
#!/usr/bin/env python
import argparse
import os
import subprocess
import sys
from lib.config import get_target_arch, PLATFORM
from lib.util import meson_gyp, import_vs_env
CONFIGURATIONS = ['Release', 'Debug']
SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
PRODUCT_NAME = meson_gyp()['product_name%']
BUILD_TARGETS = {
'darwin': [
'{0}.framework'.format(PRODUCT_NAME),
'{0} Helper.app'.format(PRODUCT_NAME),
],
'win32': [
PRODUCT_NAME,
],
'linux': [
PRODUCT_NAME,
],
}
def main():
os.chdir(SOURCE_ROOT)
# Update the VS build env.
import_vs_env(get_target_arch())
ninja = os.path.join('vendor', 'depot_tools', 'ninja')
if sys.platform == 'win32':
ninja += '.exe'
args = parse_args()
for config in args.configuration:
build_path = os.path.join('out', config[0])
build_targets = BUILD_TARGETS[PLATFORM]
if args.target != '':
build_targets = [args.target]
for build_target in build_targets:
cmds = [ninja, '-C', build_path, build_target]
if args.verbose:
cmds.append('-v')
ret = subprocess.call(cmds)
if ret != 0:
sys.exit(ret)
def parse_args():
parser = argparse.ArgumentParser(description='Build project')
parser.add_argument('-c', '--configuration',
help='Build with Release or Debug configuration',
nargs='+',
default=CONFIGURATIONS,
required=False)
parser.add_argument('-t', '--target',
help='Build specified target',
default='',
required=False)
parser.add_argument('-v', '--verbose',
action='store_true',
help='Prints the output of subprocess')
return parser.parse_args()
if __name__ == '__main__':
sys.exit(main())
|
go-meson/framework
|
script/build.py
|
Python
|
mit
| 1,907
|
# Copyright (C) 2020 kamyu. All rights reserved.
#
# Google Code Jam 2014 Qualification Round - Problem C. Minesweeper Master
# https://code.google.com/codejam/contest/2974486/dashboard#s=p2
#
# Time: O(R * C)
# Space: O(1)
#
def minesweeper_master():
R, C, M = map(int, raw_input().strip().split())
result, empty = [['*']*C for _ in xrange(R)], R*C - M
if empty == 1:
pass
elif R == 1 or C == 1:
for i in xrange(empty):
if R == 1:
result[0][i] = '.'
else:
result[i][0] = '.'
elif (R == 2 or C == 2) and (empty != 2 and not empty%2):
for i in xrange(empty//2):
if R == 2:
result[0][i] = '.'
result[1][i] = '.'
else:
result[i][0] = '.'
result[i][1] = '.'
elif (R >= 3 and C >= 3) and (empty == 4 or empty == 6 or empty >= 8):
if empty < 2*C+2:
for i in xrange(empty//2):
result[0][i] = '.'
result[1][i] = '.'
if empty%2:
result[2][0] = '.'
result[2][1] = '.'
result[2][2] = '.'
result[0][empty//2-1] = '*'
result[1][empty//2-1] = '*'
else:
for i in xrange(R):
for j in xrange(max(min(empty-C*i, C), 0)):
result[i][j] = '.'
if empty%C == 1:
result[empty//C][1] = '.'
result[empty//C-1][C-1] = '*'
else:
return "Impossible"
result[0][0] = 'c'
return "\n".join(map(lambda result: "".join(result), result))
for case in xrange(input()):
print 'Case #%d: \n%s' % (case+1, minesweeper_master())
|
kamyu104/GoogleCodeJam-2014
|
Qualification Round/minesweeper_master.py
|
Python
|
mit
| 1,750
|
import InstagramAPI
# /////// CONFIG ///////
username = ''
password = ''
debug = False
photo = '' # path to the photo
caption = '' # caption
# //////////////////////
i = InstagramAPI.Instagram(username, password, debug)
try:
i.login()
except Exception as e:
e.message
exit()
try:
i.uploadPhoto(photo, caption)
except Exception as e:
print e.message
|
danleyb2/Instagram-API
|
examples/uploadPhoto.py
|
Python
|
mit
| 376
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# Licensed under the terms of the MIT License
"""
Bootstrapping Lattice graph designer
Detect environment and execute program from source
@author: Ivan Luchko (luchko.ivan@gmail.com)
"""
if __name__ == '__main__':
import sys
from latticegraph_designer.app import main
sys.exit(main.run())
|
luchko/latticegraph_designer
|
bootstrap.py
|
Python
|
mit
| 356
|
from pyga.operator.mutation import Mutation
class ListOrderMutation(Mutation):
"""
Mutate order on candidate which has data of list type.
:param mutations: int
:param probability: Probability
:param random: Random
"""
def __init__(self, probability, random, mutations):
super().__init__()
self.mutations = mutations
self.probability = probability
self.random = random
def mutate(self, subject):
"""
Perform order mutation on list.
:param subject: list
:return: list
"""
for _ in range(self.mutations):
a, b = sorted([self.random.int(0, len(subject) - 1),
self.random.int(0, len(subject) - 1)])
tmp = subject[a]
subject[a] = subject[b]
subject[b] = tmp
return subject
|
Eyjafjallajokull/pyga
|
pyga/operator/list_order_mutation.py
|
Python
|
mit
| 867
|
# encoding: utf-8
# pylint: disable=too-few-public-methods,invalid-name,bad-continuation
"""
RESTful API User resources
--------------------------
"""
import logging
from flask_login import current_user
from flask_restplus_patched import Resource
from app.extensions.api import Namespace, http_exceptions
from app.extensions.api.parameters import PaginationParameters
from . import permissions, schemas, parameters
from .models import db, User
log = logging.getLogger(__name__)
api = Namespace('users', description="Users")
@api.route('/')
class Users(Resource):
"""
Manipulations with users.
"""
@api.login_required(oauth_scopes=['users:read'])
@api.permission_required(permissions.AdminRolePermission())
@api.parameters(PaginationParameters())
@api.response(schemas.BaseUserSchema(many=True))
def get(self, args):
"""
List of users.
Returns a list of users starting from ``offset`` limited by ``limit``
parameter.
"""
return User.query.offset(args['offset']).limit(args['limit'])
@api.parameters(parameters.AddUserParameters())
@api.response(schemas.DetailedUserSchema())
@api.response(code=http_exceptions.Forbidden.code)
@api.response(code=http_exceptions.Conflict.code)
def post(self, args):
"""
Create a new user.
"""
with api.commit_or_abort(
db.session,
default_error_message="Failed to create a new user."
):
new_user = User(**args)
db.session.add(new_user)
return new_user
@api.route('/signup_form')
class UserSignupForm(Resource):
@api.response(schemas.UserSignupFormSchema())
def get(self):
"""
Get signup form keys.
This endpoint must be used in order to get a server reCAPTCHA public key which
must be used to receive a reCAPTCHA secret key for POST /users/ form.
"""
# TODO:
return {"recaptcha_server_key": "TODO"}
@api.route('/<int:user_id>')
@api.login_required(oauth_scopes=['users:read'])
@api.response(
code=http_exceptions.NotFound.code,
description="User not found.",
)
@api.resolve_object_by_model(User, 'user')
class UserByID(Resource):
"""
Manipulations with a specific user.
"""
@api.permission_required(
permissions.OwnerRolePermission,
kwargs_on_request=lambda kwargs: {'obj': kwargs['user']}
)
@api.response(schemas.DetailedUserSchema())
def get(self, user):
"""
Get user details by ID.
"""
return user
@api.login_required(oauth_scopes=['users:write'])
@api.permission_required(
permissions.OwnerRolePermission,
kwargs_on_request=lambda kwargs: {'obj': kwargs['user']}
)
@api.permission_required(permissions.WriteAccessPermission())
@api.parameters(parameters.PatchUserDetailsParameters())
@api.response(schemas.DetailedUserSchema())
@api.response(code=http_exceptions.Conflict.code)
def patch(self, args, user):
"""
Patch user details by ID.
"""
with api.commit_or_abort(
db.session,
default_error_message="Failed to update user details."
):
parameters.PatchUserDetailsParameters.perform_patch(args, user)
db.session.merge(user)
return user
@api.route('/me')
@api.login_required(oauth_scopes=['users:read'])
class UserMe(Resource):
"""
Useful reference to the authenticated user itself.
"""
@api.response(schemas.DetailedUserSchema())
def get(self):
"""
Get current user details.
"""
return User.query.get_or_404(current_user.id)
|
millen1m/flask-restplus-server-example
|
app/modules/users/resources.py
|
Python
|
mit
| 3,746
|
"""mydisk URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from rest_framework_jwt.views import obtain_jwt_token
from django.conf.urls.static import static
from django.views.generic import TemplateView
from photogram import settings
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^api/v1/users/login', obtain_jwt_token),
url(r'^api/v1/users', include('users.urls')),
url(r'^api/v1/photos', include('photos.urls')),
url(r'^', TemplateView.as_view(template_name='index.html'))
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
KirovVerst/photogram
|
photogram/urls.py
|
Python
|
mit
| 1,311
|
from django.http import Http404,HttpResponse,HttpResponseRedirect
from django.shortcuts import render_to_response
from mysite.credits.models import Student,Logdate
from django.contrib.auth.decorators import login_required
from django.contrib import auth
from django.core.context_processors import csrf
import decimal
import re
def search_form(request):
return render_to_response('search_form.html')
def search(request):
if 'q' in request.GET and request.GET['q']:
q = request.GET['q']
error = 0
try:
student = Student.objects.get(ID = q)
ID = q
running = student.running
match = student.match
club = student.club
others = student.others
credit = student.calcredit()
except Student.DoesNotExist:
error =1
return render_to_response('search_result.html',locals())
#@login_required
def upload_form(request):
if not request.user.is_authenticated():
return HttpResponseRedirect('/admin/')
c = {}
c.update(csrf(request))
return render_to_response('upload_form.html',c)
def file_process(request):
file_obj = request.FILES.get("rawfile", None)
# [log_date, filetype] = str(file_obj.name()).split('.')
name = str(file_obj.name)
[log_date, filetype] = name.split('.')
if(filetype != 'csv'):
return HttpResponse('csv only')
if not re.match("\d{4}-\d{2}-\d{2}",log_date):
return HttpResponse('filename error!')
if Logdate.objects.filter(logdate = log_date):
return HttpResponse('already uploaded!')
p = Logdate(logdate = log_date)
p.save()
filecontext = file_obj.read()
filecontext = filecontext.split('\n')
ID_list = []
for line in filecontext:
line = line.split(',')
if len(line) == 4 and line[2] == 'STOP_RUNNING':
ID_list.append(line[1])
for a in ID_list:
try:
p = Student.objects.get(ID = a)
except Student.DoesNotExist:
p = Student(ID = a)
p.save()
finally:
p = Student.objects.get(ID = a)
p.running = p.running + decimal.Decimal(1.5)
p.save()
html = 'ok'
return HttpResponse(html)
|
berry10086/credit
|
mysite/views.py
|
Python
|
mit
| 1,964
|
# coding: utf-8
"""
Salt Edge Account Information API
API Reference for services # noqa: E501
OpenAPI spec version: 5.0.0
Contact: support@saltedge.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.consent_request_body import ConsentRequestBody # noqa: E501
from swagger_client.rest import ApiException
class TestConsentRequestBody(unittest.TestCase):
"""ConsentRequestBody unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testConsentRequestBody(self):
"""Test ConsentRequestBody"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.consent_request_body.ConsentRequestBody() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
ltowarek/budget-supervisor
|
third_party/saltedge/test/test_consent_request_body.py
|
Python
|
mit
| 942
|
# -*- coding: utf-8 -*-
from src.actions import BambooFilterableMenu, HOST_URL, PLANS_CACHE_KEY, UPDATE_INTERVAL_PLANS, \
BambooWorkflowAction, build_bamboo_facade
from src.util import workflow
class BranchesFilterableMenu(BambooFilterableMenu):
def __init__(self, args):
super(BranchesFilterableMenu, self).__init__(entity_name='plan branch',
update_interval=UPDATE_INTERVAL_PLANS,
cache_key=PLANS_CACHE_KEY,
args=args)
def _add_to_result_list(self, branch):
workflow().add_item(title=branch.name,
subtitle=branch.description,
largetext=branch.name,
arg=':branches ' + branch.key,
modifier_subtitles={
u'shift': u'Trigger build execution for this plan branch'
}, # `cmd``, ``ctrl``, ``shift``, ``alt`` and ``fn``
copytext='{}/browse/{}'.format(workflow().settings.get(HOST_URL), branch.key),
valid=True)
def _get_result_filter(self):
return lambda b: u' '.join([b.name, b.description])
def _transform_from_cache(self, plans, q):
return self.__find_branches_matching_plan(plans, q)
def __find_branches_matching_plan(self, plans, q):
branches = next((p.branches for p in plans if p.plan_key == q), [])
return branches
# args: [':branches', 'plan_key, branch_name]
def _get_query(self):
return self.args[-2] if len(self.args) == 3 else self.args[-1]
def _get_sub_query(self):
return self.args[-1] if len(self.args) == 3 else None
class BranchesWorkflowAction(BambooWorkflowAction):
def menu(self, args):
branch_workflow = BranchesFilterableMenu(args)
return branch_workflow.run()
def execute(self, args, ctrl_pressed, shift_pressed):
branch_key = args[-1]
if shift_pressed:
try:
facade = build_bamboo_facade()
facade.trigger_build(branch_key)
print('Successfully triggered build for {}'.format(branch_key))
except Exception, e:
print('Failed to trigger build for {}: {}'.format(branch_key, str(e)))
else:
import webbrowser
branch_browse_url = '{}/browse/{}'.format(workflow().settings.get(HOST_URL), branch_key)
webbrowser.open(branch_browse_url)
|
mibexsoftware/alfred-bamboo-workflow
|
workflow/src/actions/branches.py
|
Python
|
mit
| 2,616
|
import pickle
import numpy as np
import os
import cv2
import random
import gate
from itertools import repeat
import sys
import time
INPUT_SIZE = 32 * 32 * 3
OUTPUT_SIZE = 1
HEAD_START = 10
TRAINING_DURATION = 3
TRAINING_SAMPLE_SIZE = 200
TESTING_SAMPLE_SIZE = 100
def load_batch(fpath, label_key='labels'):
# Internal utility for parsing CIFAR data
f = open(fpath, 'rb')
d = pickle.load(f)
f.close()
data = d['data']
labels = d[label_key]
data = data.reshape(data.shape[0], 3, 32, 32)
return data, labels
def show_output(factory,testing=False):
global error
global error_divisor
if testing:
time.sleep(TRAINING_DURATION/2)
output = factory.output
error += abs(testing[0] - output)
error_divisor += 1
print "RESULT: " + str(output) + "\tExpected: " + str(testing)
else:
time.sleep(TRAINING_DURATION)
output = factory.output
#print "Output: " + str(output)
print "\n___ GATEFACTORY MEDIUM CLASSIFICATION (CATDOG) EXAMPLE ___\n"
print "Load CIFAR-10 dataset"
print "Pick random " + str(TRAINING_SAMPLE_SIZE) + " cat and " + str(TRAINING_SAMPLE_SIZE) + " dog images from the CIFAR-10 data batch to TRAIN the factory"
print "Pick random " + str(TESTING_SAMPLE_SIZE) + " cat and " + str(TESTING_SAMPLE_SIZE) + " dog images from the CIFAR-10 test batch to TEST the factory"
# Load CIFAR-10 dataset
path = './examples/cifar-10-batches-py/'
num_train_samples = 50000
x_train = np.zeros((num_train_samples, 3, 32, 32), dtype='uint8')
y_train = np.zeros((num_train_samples,), dtype='uint8')
for i in range(1, 6):
fpath = os.path.join(path, 'data_batch_' + str(i))
data, labels = load_batch(fpath)
x_train[(i - 1) * 10000: i * 10000, :, :, :] = data
y_train[(i - 1) * 10000: i * 10000] = labels
fpath = os.path.join(path, 'test_batch')
x_test, y_test = load_batch(fpath)
y_train = np.reshape(y_train, (len(y_train), 1))
y_test = np.reshape(y_test, (len(y_test), 1))
# channels last
x_train = x_train.transpose(0, 2, 3, 1)
x_test = x_test.transpose(0, 2, 3, 1)
# Generate the training data
cats = []
for i in range(0, num_train_samples - 1):
if y_train[i] == 3:
cats.append(x_train[i])
dogs = []
for i in range(0, num_train_samples - 1):
if y_train[i] == 5:
dogs.append(x_train[i])
# Generate the testing data
test_cats = []
for i in range(0, num_train_samples/5 - 1):
if y_test[i] == 3:
test_cats.append(x_test[i])
test_dogs = []
for i in range(0, num_train_samples/5 - 1):
if y_test[i] == 5:
test_dogs.append(x_test[i])
print "Create a new GateFactory with input size of " + str(INPUT_SIZE) + " and output size of " + str(OUTPUT_SIZE)
factory = gate.Factory(INPUT_SIZE,OUTPUT_SIZE,HEAD_START)
error = 0
error_divisor = 0
print "\n*** LEARNING ***"
print "\nMap " + str(TRAINING_SAMPLE_SIZE/2) + " Different Cat Images to Color Blue & " + str(TRAINING_SAMPLE_SIZE/2) + " Different Dog Images to Color Red - Training Duration: " + str(TRAINING_DURATION * TRAINING_SAMPLE_SIZE) + " seconds (OpenCV latency not included)"
for i in range(0,TRAINING_SAMPLE_SIZE):
if (i % 2) == 0:
cat = random.sample(cats, 1)[0]
cat_normalized = np.true_divide(cat, 255).flatten()
cat_binary = (cat_normalized > 0.5).astype(int)
factory.load(cat_binary,[1])
else:
dog = random.sample(dogs, 1)[0]
dog_normalized = np.true_divide(dog, 255).flatten()
dog_binary = (dog_normalized > 0.5).astype(int)
factory.load(dog_binary,[0])
show_output(factory)
print "\nTest " + str(TESTING_SAMPLE_SIZE/2) + " Different Cat Images & " + str(TESTING_SAMPLE_SIZE/2) + " Different Dog Images - Testing Duration: " + str(TRAINING_DURATION * TESTING_SAMPLE_SIZE) + " seconds (OpenCV latency not included)"
for i in range(0,TESTING_SAMPLE_SIZE):
binary_random = random.randint(0,1)
if binary_random == 0:
cat = random.sample(test_cats, 1)[0]
cat_normalized = np.true_divide(cat, 255).flatten()
cat_binary = (cat_normalized > 0.5).astype(int)
factory.load(cat_binary)
show_output(factory,[1])
else:
dog = random.sample(test_dogs, 1)[0]
dog_normalized = np.true_divide(dog, 255).flatten()
dog_binary = (dog_normalized > 0.5).astype(int)
factory.load(dog_binary)
show_output(factory,[0])
print ""
factory.stop()
cv2.destroyAllWindows()
print "\nGateFactory searched the solution over " + str(factory.combination_counter) + " different boolean combinations by going " + str(factory.level_counter) + " levels of deepness\n"
print "\nOverall error: " + str(float(error)/error_divisor) + "\n"
print "\nThe best boolean expression has been found for your problem is:\n\t" + str(factory.best) + "\n"
print "Depth of this boolean expression is: " + str(factory.best_depth) + "\n"
factory.generate_tex_file()
|
mertyildiran/GateFactory
|
examples/classification_medium.py
|
Python
|
mit
| 4,925
|
from maka.data.EditHistory import Edit, EditHistory
class Document(object):
extensionName = None # string, more human-friendly than class name
observationClasses = None # set, useful for constructing document formats
fieldClasses = None # set, useful for constructing document formats
'''
Want to be able to extend the application with new edit operations. An edit
operation will take a document, a selection, and possibly other information
collected from a GUI and modify the document. The operation may or may not
use the selection. An edit operation will have a name (e.g. "Cut" or "Reduce"),
a shortcut, a tip, and a description.
'''
def __init__(
self, observations=None, documentFormat=None, fileFormat=None, filePath=None,
edited=False):
super(Document, self).__init__()
self.observations = [] if observations is None else observations
self.documentFormat = documentFormat
self.fileFormat = fileFormat
self.filePath = filePath
self.edited = edited
self._editHistory = EditHistory()
self._editListeners = set()
def addEditListener(self, listener):
self._editListeners.add(listener)
def removeEditListener(self, listener):
self._editListeners.remove(listener)
def _notifyEditListeners(self, edit):
for listener in self._editListeners:
listener(edit)
def edit(self, name, startIndex, endIndex, observations):
edit = DocumentEdit(name, self, startIndex, endIndex, observations)
edit.do()
self._editHistory.append(edit)
self._notifyEditListeners(edit)
@property
def saved(self):
return self._editHistory.documentSaved
def markSaved(self):
self._editHistory.markDocumentSaved()
@property
def undoName(self):
return self._editHistory.undoName
@property
def redoName(self):
return self._editHistory.redoName
def undo(self):
edit = self._editHistory.undo()
self._notifyEditListeners(edit)
return edit
def redo(self):
edit = self._editHistory.redo()
self._notifyEditListeners(edit)
return edit
class DocumentEdit(Edit):
def __init__(self, name, document, startIndex, endIndex, observations):
super(DocumentEdit, self).__init__(name)
_checkEditIndices(startIndex, endIndex, len(document.observations))
self._document = document
self._startIndex = startIndex
self._endIndex = endIndex
self.oldObservations = _copy(document.observations, startIndex, endIndex)
self.newObservations = _copy(observations, 0, len(observations))
@property
def document(self):
return self._document
@property
def startIndex(self):
return self._startIndex
@property
def endIndex(self):
return self._endIndex
@property
def inverse(self):
name = self.name + ' Inverse'
startIndex = self.startIndex
endIndex = startIndex + len(self.newObservations)
return DocumentEdit(name, self.document, startIndex, endIndex, self.oldObservations)
def do(self):
self.document.observations[self.startIndex:self.endIndex] = \
_copy(self.newObservations, 0, len(self.newObservations))
def _checkEditIndices(startIndex, endIndex, maxIndex):
_checkEditIndex(startIndex, maxIndex, 'start')
_checkEditIndex(endIndex, maxIndex, 'end')
if endIndex < startIndex:
raise ValueError('Edit end index must be at least start index.')
def _checkEditIndex(index, maxIndex, name):
if index < 0:
raise ValueError('Edit {:s} index must be at least zero.'.format(name))
if index > maxIndex:
raise ValueError('Edit {:s} index must not exceed document length.'.format(name))
def _copy(observations, startIndex, endIndex):
return tuple(observations[i].copy() for i in xrange(startIndex, endIndex))
|
HaroldMills/Maka
|
src/maka/data/Document.py
|
Python
|
mit
| 4,372
|
# coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: api-support@onshape.zendesk.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
class BTUpdateMeshUnitsParams(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"units": (str,), # noqa: E501
}
@staticmethod
def discriminator():
return None
attribute_map = {
"units": "units", # noqa: E501
}
@staticmethod
def _composed_schemas():
return None
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""bt_update_mesh_units_params.BTUpdateMeshUnitsParams - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
units (str): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
for var_name, var_value in six.iteritems(kwargs):
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
setattr(self, var_name, var_value)
|
onshape-public/onshape-clients
|
python/onshape_client/oas/models/bt_update_mesh_units_params.py
|
Python
|
mit
| 4,589
|
from django.shortcuts import render
from .models import Contest, Score
from django.http import Http404
from problems.models import Problem
from datetime import datetime
from django.template.context_processors import csrf
from django.contrib.auth.models import User
from django.utils import timezone
from core.models import Profile
#now = timezone.make_aware(datetime.now(),timezone.get_default_timezone()).astimezone(timezone.utc)
def index(request):
"""
Display list of all :model:`contests.Contest` objects. The contests are divided into two parts :
1. Future contests
2. Past contests
This view enables the user to access all contests from one page and keeps him informed about upcoming contests as well.
**Args:**
1. ``past_contests``
A query-set of contests. Stores contests that have been finished
2. ``current_contests``
A query-set of contests. Stores contests that are currently running
3. ``upcoming_contests``
Aa query-set of contests. Stores contests that will occur in future
4. ``top_rated``
A query-set of profiles. Stores users sorted by user ratings
**Template:**
:template:`contests/index.html`
"""
past_contests = Contest.objects.filter(end_time__lt=timezone.now())
current_contests = Contest.objects.filter(start_time__lt=timezone.now(), end_time__gt=timezone.now())
upcoming_contests = Contest.objects.filter(start_time__gt=timezone.now())
top_rated = Profile.objects.order_by('-rating')[:5]
return render(request,"contests/index.html", {'past_contests':past_contests, 'current_contests':current_contests, 'upcoming_contests':upcoming_contests, 'toprated':top_rated })
def contest(request,contestID):
"""
It is the detailed view for a particular contests. This views enables you to access all problems in a particular contests. It has been divided into three parts and three different templates have been created for each of them
1. Future contests
2. Past contests
3. Current contests
Future contests allow user to register for the contest, past contests show the user list of problems while the current contests show the user the the problems in the contests with a onsite countdown clock, which when time finishes refreshes the contest page into past contests.
**Args:**
1. ``contest``
An instance of a contest. Stores the properties cotnained by contest like problems, scores etc
2. ``registered``
A query-set of users. Stores the lsit of users registered for the contest.
**Template:**
1. :template:`contests/notactive.html`
2. :template:`contests/contest.html`
"""
try:
contest = Contest.objects.get(pk=contestID)
user = request.user
except Contest.DoesNotExist:
raise Http404("There is no such contest :/ Please check again :P")
registered = contest.registered_user.filter(username = request.user.username)
top_scores=Score.objects.filter(contest=contest).order_by('-score')[:5]
if contest.end_time.strftime('%Y-%m-%d %H:%M:%S') <= timezone.make_aware(datetime.now(),timezone.get_default_timezone()).astimezone(timezone.utc).strftime('%Y-%m-%d %H:%M:%S'):
problems = Problem.objects.filter(contest=contest).order_by('letter')
if contest.completed == False:
contest.completed=True
rating_update(contest.id)
contest.save()
return render(request,"contests/contest.html", {'contest':contest, 'problems':problems, 'registered':registered, 'topscores':top_scores})
elif timezone.make_aware(datetime.now(),timezone.get_default_timezone()).astimezone(timezone.utc).strftime('%Y-%m-%d %H:%M:%S') >= contest.start_time.strftime('%Y-%m-%d %H:%M:%S'):
problems = Problem.objects.filter(contest=contest).order_by('letter')
return render(request,"contests/contest.html", {'contest':contest, 'problems':problems, 'registered':registered, 'topscores':top_scores})
else:
if request.method=='POST':
contest.registered_user.add(request.user)
contest.score_query-set.create(user=user,score=0)
print(request.user.username)
args = {}
args.update(csrf(request))
args['contest'] = contest
args['registered'] = registered
return render(request,"contests/notactive.html", args)
def contest_registered(request,contestID):
"""
This view provides the list of registered users in a particular contests
**Args:**
1. ``contest``
An instance of a contest. Stores the properties cotnained by contest like problems, scores etc
2. ``registerd_user_list``
A query-set of users. Stores the list of users registered for the contest.
**Template:**
:template:`contests/user_list.html`
"""
try:
contest = Contest.objects.get(pk=contestID)
except Contest.DoesNotExist:
raise Http404("There is no such contest :/ Please check again :P")
#registered = contest.registered_user.filter(username = request.user.username) ##boolean variable
registerd_user_list = contest.registered_user.all();
return render(request,"contests/user_list.html",{'contest':contest, 'registerd_user_list':registerd_user_list})
def rating_update(contestID):
contest=Contest.objects.get(id=contestID)
contest_scores=Score.objects.filter(contest=contest)
total_score=0
for score in contest_scores:
total_score+=score.user.profile.rating
for score1 in contest_scores:
for score2 in contest_scores:
if score1.score>score2.score:
score1.wins += 1
elif score1.score<score2.score:
score1.wins -= 1
score1.save()
games=contest_scores.count()-1
if games>0:
for score in contest_scores:
profile=score.user.profile
profile.rating = (total_score-profile.rating + 400*score.wins)/games
profile.save()
|
cs251-eclipse/EclipseOJ
|
EclipseOJ/contests/views.py
|
Python
|
mit
| 6,017
|
from collections import namedtuple
import lasagne as nn
from lasagne.layers.dnn import Conv2DDNNLayer, MaxPool2DDNNLayer
import data_iterators
import numpy as np
import theano.tensor as T
from functools import partial
import nn_heart
import utils_heart
from pathfinder import PKL_TRAIN_DATA_PATH, TRAIN_LABELS_PATH, PKL_VALIDATE_DATA_PATH
import data
import utils
caching = None
restart_from_save = None
rng = np.random.RandomState(42)
patch_size = (64, 64)
mm_patch_size = (128, 128)
train_transformation_params = {
'patch_size': patch_size,
'mm_patch_size': mm_patch_size,
'rotation_range': (-180, 180),
'mask_roi': False,
'translation_range_x': (-10, 10),
'translation_range_y': (-10, 10),
'shear_range': (0, 0),
'roi_scale_range': (1.2, 1.5),
'do_flip': (True, False),
'zoom_range': (1 / 1.5, 1.5),
'sequence_shift': True
}
valid_transformation_params = {
'patch_size': patch_size,
'mm_patch_size': mm_patch_size,
'mask_roi': False
}
test_transformation_params = {
'patch_size': patch_size,
'mm_patch_size': mm_patch_size,
'rotation_range': (-180, 180),
'mask_roi': False,
'translation_range_x': (-10, 10),
'translation_range_y': (-10, 10),
'shear_range': (0, 0),
'roi_scale_range': (1.2, 1.5),
'do_flip': (True, False),
'zoom_range': (1., 1.),
'sequence_shift': True
}
data_prep_fun = data.transform_norm_rescale_after
batch_size = 32
nbatches_chunk = 12
chunk_size = batch_size * nbatches_chunk
train_valid_ids = utils.get_train_valid_split(PKL_TRAIN_DATA_PATH)
train_data_iterator = data_iterators.SliceNormRescaleDataGenerator(data_path=PKL_TRAIN_DATA_PATH,
batch_size=chunk_size,
transform_params=train_transformation_params,
patient_ids=train_valid_ids['train'],
labels_path=TRAIN_LABELS_PATH,
slice2roi_path='pkl_train_slice2roi.pkl',
full_batch=True, random=True, infinite=True,
view='4ch',
data_prep_fun=data_prep_fun)
valid_data_iterator = data_iterators.SliceNormRescaleDataGenerator(data_path=PKL_TRAIN_DATA_PATH,
batch_size=chunk_size,
transform_params=valid_transformation_params,
patient_ids=train_valid_ids['valid'],
labels_path=TRAIN_LABELS_PATH,
slice2roi_path='pkl_train_slice2roi.pkl',
full_batch=False, random=False, infinite=False,
view='4ch',
data_prep_fun=data_prep_fun)
test_data_iterator = data_iterators.SliceNormRescaleDataGenerator(data_path=PKL_VALIDATE_DATA_PATH,
batch_size=chunk_size,
transform_params=test_transformation_params,
slice2roi_path='pkl_validate_slice2roi.pkl',
full_batch=False, random=False, infinite=False,
view='4ch',
data_prep_fun=data_prep_fun)
nchunks_per_epoch = max(1, train_data_iterator.nsamples / chunk_size)
max_nchunks = nchunks_per_epoch * 300
learning_rate_schedule = {
0: 0.0001,
int(max_nchunks * 0.5): 0.00008,
int(max_nchunks * 0.6): 0.00004,
int(max_nchunks * 0.8): 0.00001,
int(max_nchunks * 0.9): 0.000005
}
validate_every = nchunks_per_epoch
save_every = nchunks_per_epoch
conv3 = partial(Conv2DDNNLayer,
stride=(1, 1),
pad="same",
filter_size=(3, 3),
nonlinearity=nn.nonlinearities.very_leaky_rectify,
b=nn.init.Constant(0.1),
W=nn.init.Orthogonal("relu"))
max_pool = partial(MaxPool2DDNNLayer,
pool_size=(2, 2),
stride=(2, 2))
def build_model(l_in=None):
l_in = nn.layers.InputLayer((None, 30) + patch_size) if not l_in else l_in
l = conv3(l_in, num_filters=128)
l = conv3(l, num_filters=128)
l = max_pool(l)
l = conv3(l, num_filters=128)
l = conv3(l, num_filters=128)
l = max_pool(l)
l = conv3(l, num_filters=256)
l = conv3(l, num_filters=256)
l = conv3(l, num_filters=256)
l = max_pool(l)
l = conv3(l, num_filters=512)
l = conv3(l, num_filters=512)
l = conv3(l, num_filters=512)
l = max_pool(l)
l = conv3(l, num_filters=512)
l = conv3(l, num_filters=512)
l = conv3(l, num_filters=512)
l = max_pool(l)
l_d01 = nn.layers.DenseLayer(l, num_units=1024, W=nn.init.Orthogonal("relu"),
b=nn.init.Constant(0.1),nonlinearity=nn.nonlinearities.very_leaky_rectify)
l_d02 = nn.layers.DenseLayer(nn.layers.dropout(l_d01, p=0.5), num_units=512, W=nn.init.Orthogonal("relu"),
b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.very_leaky_rectify)
mu0 = nn.layers.DenseLayer(nn.layers.dropout(l_d02, p=0.5), num_units=1, W=nn.init.Orthogonal(),
b=nn.init.Constant(100), nonlinearity=nn_heart.lb_softplus())
sigma0 = nn.layers.DenseLayer(nn.layers.dropout(l_d02, p=0.5), num_units=1, W=nn.init.Orthogonal(),
b=nn.init.Constant(20), nonlinearity=nn_heart.lb_softplus())
l_cdf0 = nn_heart.NormalCDFLayer(mu0, sigma0)
# ---------------------------------------------------------------
l_d11 = nn.layers.DenseLayer(l, num_units=1024, W=nn.init.Orthogonal("relu"),
b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.very_leaky_rectify)
l_d12 = nn.layers.DenseLayer(nn.layers.dropout(l_d11, p=0.5), num_units=512, W=nn.init.Orthogonal("relu"),
b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.very_leaky_rectify)
mu1 = nn.layers.DenseLayer(nn.layers.dropout(l_d12, p=0.5), num_units=1, W=nn.init.Orthogonal(),
b=nn.init.Constant(150), nonlinearity=nn_heart.lb_softplus())
sigma1 = nn.layers.DenseLayer(nn.layers.dropout(l_d12, p=0.5), num_units=1, W=nn.init.Orthogonal(),
b=nn.init.Constant(20), nonlinearity=nn_heart.lb_softplus())
l_cdf1 = nn_heart.NormalCDFLayer(mu1, sigma1)
l_outs = [l_cdf0, l_cdf1]
l_top = nn.layers.MergeLayer(l_outs)
l_target_mu0 = nn.layers.InputLayer((None, 1))
l_target_mu1 = nn.layers.InputLayer((None, 1))
l_targets = [l_target_mu0, l_target_mu1]
mu_layers = [mu0, mu1]
sigma_layers = [sigma0, sigma1]
return namedtuple('Model', ['l_ins', 'l_outs', 'l_targets', 'l_top', 'mu_layers', 'sigma_layers'])([l_in], l_outs,
l_targets, l_top,
mu_layers,
sigma_layers)
def build_objective(model, deterministic=False):
p0 = nn.layers.get_output(model.l_outs[0], deterministic=deterministic)
t0 = nn.layers.get_output(model.l_targets[0])
t0_heaviside = nn_heart.heaviside(t0)
crps0 = T.mean((p0 - t0_heaviside) ** 2)
p1 = nn.layers.get_output(model.l_outs[1], deterministic=deterministic)
t1 = nn.layers.get_output(model.l_targets[1])
t1_heaviside = nn_heart.heaviside(t1)
crps1 = T.mean((p1 - t1_heaviside) ** 2)
return 0.5 * (crps0 + crps1)
def build_updates(train_loss, model, learning_rate):
updates = nn.updates.adam(train_loss, nn.layers.get_all_params(model.l_top), learning_rate)
return updates
def get_mean_validation_loss(batch_predictions, batch_targets):
return [0, 0]
def get_mean_crps_loss(batch_predictions, batch_targets, batch_ids):
nbatches = len(batch_predictions)
npredictions = len(batch_predictions[0])
crpss = []
for i in xrange(npredictions):
p, t = [], []
for j in xrange(nbatches):
p.append(batch_predictions[j][i])
t.append(batch_targets[j][i])
p, t = np.vstack(p), np.vstack(t)
target_cdf = utils_heart.heaviside_function(t)
crpss.append(np.mean((p - target_cdf) ** 2))
return crpss
def get_avg_patient_predictions(batch_predictions, batch_patient_ids, mean):
return utils_heart.get_patient_average_cdf_predictions(batch_predictions, batch_patient_ids, mean)
|
317070/kaggle-heart
|
ira/configurations/ch4_zoom_leaky_after_nomask_seqshift.py
|
Python
|
mit
| 9,506
|
from rl.core import Processor
from rl.memory import Memory
from rl.policy import Policy
from package.environment import SeleniumEnvironment
class AbstractConfiguration(object):
mode = None # type: KickoffModes
use_preset_training = None # type: bool
render = None # type: bool
warmup_steps = None # type: int
number_test_episodes = None # type: int
number_of_steps = None # type: int
window_width = None # type: int
window_height = None # type: int
window_length = None # type: int
gamma = None # type: int
target_model_update = None # type: int
train_interval = None # type: int
delta_clip = None # type: int
learning_rate = None # type: int
metrics = None # type: str
processor = None # type: Processor
memory = None # type: Memory
weights_filename = None # type: str
checkpoint_interval_steps = None # type: int
checkpoint_weights_filename_base = None # type: str
environment = None # type: SeleniumEnvironment
policy = None # type: Policy
def determine_reward(self, driver, action_index):
raise NotImplementedError()
def create_cnn_model(self):
pass
def on_step_reset(self):
pass
def on_environment_creation(self):
pass
def get_preset_training_step(self):
pass
class KickoffModes(object):
train = 'train'
test = 'test'
|
bewestphal/SeleniumAI
|
package/models.py
|
Python
|
mit
| 1,419
|
__author__ = 'chris'
import sys
import argparse
import json
import time
from twisted.internet import reactor
from txjsonrpc.netstring.jsonrpc import Proxy
from binascii import hexlify, unhexlify
from dht.utils import digest
from txjsonrpc.netstring import jsonrpc
from market.profile import Profile
from protos import objects, countries
from db.datastore import HashMap
from keyutils.keys import KeyChain
from market.contracts import Contract
from collections import OrderedDict
from interfaces import MessageListener
from zope.interface import implements
from dht.node import Node
def do_continue(value):
pass
def print_value(value):
print json.dumps(value, indent=4)
reactor.stop()
def print_error(error):
print 'error', error
reactor.stop()
class Parser(object):
def __init__(self, proxy_obj):
parser = argparse.ArgumentParser(
description='OpenBazaar Network CLI',
usage='''
python networkcli.py command [<arguments>]
commands:
addsocialaccount add a social media account to the profile
addpgpkey add a pgp key to the profile
follow follow a user
unfollow unfollow a user
getinfo returns an object containing various state info
getpeers returns the id of all the peers in the routing table
get fetches the given keyword from the dht
set sets the given keyword/key in the dht
delete deletes the keyword/key from the dht
getnode returns a node's ip address given its guid.
getcontract fetchs a contract from a node given its hash and guid
getcontractmetadata fetches the metadata (including thumbnail image) for the contract
getimage fetches an image from a node given its hash and guid
getprofile fetches the profile from the given node.
getmoderators fetches a list of moderators
getusermetadata fetches the metadata (shortened profile) for the node
getlistings fetches metadata about the store's listings
getfollowers fetches a list of followers of a node
getfollowing fetches a list of users a node is following
getmessages fetches messages from the dht
sendnotification sends a notification to all your followers
setcontract sets a contract in the filesystem and db
setimage maps an image hash to a filepath in the db
setasmoderator sets a node as a moderator
setprofile sets the given profile data in the database
shutdown closes all outstanding connections.
''')
parser.add_argument('command', help='Execute the given command')
args = parser.parse_args(sys.argv[1:2])
if not hasattr(self, args.command):
parser.print_help()
exit(1)
getattr(self, args.command)()
self.proxy = proxy_obj
@staticmethod
def get():
parser = argparse.ArgumentParser(
description="Fetch the given keyword from the dht and return all the entries",
usage='''usage:
networkcli.py get [-kw KEYWORD]''')
parser.add_argument('-kw', '--keyword', required=True, help="the keyword to fetch")
args = parser.parse_args(sys.argv[2:])
keyword = args.keyword
d = proxy.callRemote('get', keyword)
d.addCallbacks(print_value, print_error)
reactor.run()
@staticmethod
def set():
parser = argparse.ArgumentParser(
description='Set the given keyword/key pair in the dht. The value will be your '
'serialized node information.',
usage='''usage:
networkcli.py set [-kw KEYWORD] [-k KEY]''')
parser.add_argument('-kw', '--keyword', required=True, help="the keyword to set in the dht")
parser.add_argument('-k', '--key', required=True, help="the key to set at the keyword")
args = parser.parse_args(sys.argv[2:])
keyword = args.keyword
key = args.key
d = proxy.callRemote('set', keyword, key)
d.addCallbacks(print_value, print_error)
reactor.run()
@staticmethod
def delete():
parser = argparse.ArgumentParser(
description="Deletes the given keyword/key from the dht. Signature will be automatically generated.",
usage='''usage:
networkcli.py delete [-kw KEYWORD] [-k KEY]''')
parser.add_argument('-kw', '--keyword', required=True, help="where to find the key")
parser.add_argument('-k', '--key', required=True, help="the key to delete")
args = parser.parse_args(sys.argv[2:])
keyword = args.keyword
key = args.key
d = proxy.callRemote('delete', keyword, key)
d.addCallbacks(print_value, print_error)
reactor.run()
@staticmethod
def getinfo():
parser = argparse.ArgumentParser(
description="Returns an object containing various state info",
usage='''usage:
networkcli getinfo''')
parser.parse_args(sys.argv[2:])
d = proxy.callRemote('getinfo')
d.addCallbacks(print_value, print_error)
reactor.run()
@staticmethod
def shutdown():
parser = argparse.ArgumentParser(
description="Terminates all outstanding connections.",
usage='''usage:
networkcli shutdown''')
parser.parse_args(sys.argv[2:])
d = proxy.callRemote('shutdown')
d.addCallbacks(print_value, print_error)
reactor.run()
@staticmethod
def getpubkey():
parser = argparse.ArgumentParser(
description="Returns this node's public key.",
usage='''usage:
networkcli getpubkey''')
parser.parse_args(sys.argv[2:])
d = proxy.callRemote('getpubkey')
d.addCallbacks(print_value, print_error)
reactor.run()
@staticmethod
def getcontract():
parser = argparse.ArgumentParser(
description="Fetch a contract given its hash and guid.",
usage='''usage:
networkcli.py getcontract [-c HASH] [-g GUID]''')
parser.add_argument('-c', '--hash', required=True, help="the hash of the contract")
parser.add_argument('-g', '--guid', required=True, help="the guid to query")
args = parser.parse_args(sys.argv[2:])
hash_value = args.hash
guid = args.guid
d = proxy.callRemote('getcontract', hash_value, guid)
d.addCallbacks(print_value, print_error)
reactor.run()
@staticmethod
def getimage():
parser = argparse.ArgumentParser(
description="Fetch an image given its hash and guid.",
usage='''usage:
networkcli.py getcontract [-i HASH] [-g GUID]''')
parser.add_argument('-i', '--hash', required=True, help="the hash of the image")
parser.add_argument('-g', '--guid', required=True, help="the guid to query")
args = parser.parse_args(sys.argv[2:])
hash_value = args.hash
guid = args.guid
d = proxy.callRemote('getimage', hash_value, guid)
d.addCallbacks(print_value, print_error)
reactor.run()
@staticmethod
def getpeers():
parser = argparse.ArgumentParser(
description="Returns id of all peers in the routing table",
usage='''usage:
networkcli getpeers''')
parser.parse_args(sys.argv[2:])
d = proxy.callRemote('getpeers')
d.addCallbacks(print_value, print_error)
reactor.run()
@staticmethod
def getnode():
parser = argparse.ArgumentParser(
description="Fetch the ip address for a node given its guid.",
usage='''usage:
networkcli.py getnode [-g GUID]''')
parser.add_argument('-g', '--guid', required=True, help="the guid to find")
args = parser.parse_args(sys.argv[2:])
guid = args.guid
d = proxy.callRemote('getnode', guid)
d.addCallbacks(print_value, print_error)
reactor.run()
@staticmethod
def setprofile():
parser = argparse.ArgumentParser(
description="Sets a profile in the database.",
usage='''usage:
networkcli.py setprofile [options]''')
parser.add_argument('-n', '--name', help="the name of the user/store")
parser.add_argument('-o', '--onename', help="the onename id")
parser.add_argument('-a', '--avatar', help="the file path to the avatar image")
parser.add_argument('-hd', '--header', help="the file path to the header image")
parser.add_argument('-c', '--country',
help="a string consisting of country from protos.countries.CountryCode")
# we could add all the fields here but this is good enough to test.
args = parser.parse_args(sys.argv[2:])
p = Profile()
u = objects.Profile()
h = HashMap()
if args.name is not None:
u.name = args.name
if args.country is not None:
u.location = countries.CountryCode.Value(args.country.upper())
if args.onename is not None:
u.handle = args.onename
if args.avatar is not None:
with open(args.avatar, "r") as filename:
image = filename.read()
hash_value = digest(image)
u.avatar_hash = hash_value
h.insert(hash_value, args.avatar)
if args.header is not None:
with open(args.header, "r") as filename:
image = filename.read()
hash_value = digest(image)
u.header_hash = hash_value
h.insert(hash_value, args.header)
u.encryption_key = KeyChain().encryption_pubkey
p.update(u)
@staticmethod
def addpgpkey():
parser = argparse.ArgumentParser(
description="Add a pgp key to the profile.",
usage='''usage:
networkcli.py addpgpkey -k KEY, -s SIGNATURE''')
parser.add_argument('-k', '--key', help="path to the key file")
parser.add_argument('-s', '--signature', help="path to the signature file")
args = parser.parse_args(sys.argv[2:])
with open(args.key, "r") as filename:
key = filename.read()
with open(args.signature, "r") as filename:
sig = filename.read()
p = Profile()
print p.add_pgp_key(key, sig, KeyChain().guid.encode("hex"))
@staticmethod
def addsocialaccount():
parser = argparse.ArgumentParser(
description="Add a social media account to the profile.",
usage='''usage:
networkcli.py addsocialaccout -t TYPE, -u USERNAME, -p PROOF''')
parser.add_argument('-t', '--type', help="the type of account")
parser.add_argument('-u', '--username', help="the username")
parser.add_argument('-p', '--proof', help="the proof url")
args = parser.parse_args(sys.argv[2:])
p = Profile()
p.add_social_account(args.type, args.username, args.proof)
@staticmethod
def getprofile():
parser = argparse.ArgumentParser(
description="Fetch the profile from the given node. Images will be saved in cache.",
usage='''usage:
networkcli.py getprofile [-g GUID]''')
parser.add_argument('-g', '--guid', required=True, help="the guid to query")
args = parser.parse_args(sys.argv[2:])
guid = args.guid
d = proxy.callRemote('getprofile', guid)
d.addCallbacks(print_value, print_error)
reactor.run()
@staticmethod
def getusermetadata():
parser = argparse.ArgumentParser(
description="Fetches the metadata (small profile) from"
"a given node. The images will be saved in cache.",
usage='''usage:
networkcli.py getusermetadata [-g GUID]''')
parser.add_argument('-g', '--guid', required=True, help="the guid to query")
args = parser.parse_args(sys.argv[2:])
guid = args.guid
d = proxy.callRemote('getusermetadata', guid)
d.addCallbacks(print_value, print_error)
reactor.run()
@staticmethod
def setcontract():
parser = argparse.ArgumentParser(
description="Sets a new contract in the database and filesystem.",
usage='''usage:
networkcli.py setcontract [-f FILEPATH]''')
parser.add_argument('-f', '--filepath', help="a path to a completed json contract")
args = parser.parse_args(sys.argv[2:])
with open(args.filepath) as data_file:
contract = json.load(data_file, object_pairs_hook=OrderedDict)
Contract(contract).save()
@staticmethod
def setimage():
parser = argparse.ArgumentParser(
description="Maps a image hash to a file path in the database",
usage='''usage:
networkcli.py setimage [-f FILEPATH]''')
parser.add_argument('-f', '--filepath', help="a path to the image")
args = parser.parse_args(sys.argv[2:])
with open(args.filepath, "r") as f:
image = f.read()
d = digest(image)
h = HashMap()
h.insert(d, args.filepath)
print h.get_file(d)
@staticmethod
def getlistings():
parser = argparse.ArgumentParser(
description="Fetches metadata about the store's listings",
usage='''usage:
networkcli.py getmetadata [-g GUID]''')
parser.add_argument('-g', '--guid', required=True, help="the guid to query")
args = parser.parse_args(sys.argv[2:])
guid = args.guid
d = proxy.callRemote('getlistings', guid)
d.addCallbacks(print_value, print_error)
reactor.run()
@staticmethod
def getcontractmetadata():
parser = argparse.ArgumentParser(
description="Fetches the metadata for the given contract. The thumbnail images will be saved in cache.",
usage='''usage:
networkcli.py getcontractmetadata [-g GUID] [-c CONTRACT]''')
parser.add_argument('-g', '--guid', required=True, help="the guid to query")
parser.add_argument('-c', '--contract', required=True, help="the contract hash")
args = parser.parse_args(sys.argv[2:])
guid = args.guid
contract = args.contract
d = proxy.callRemote('getcontractmetadata', guid, contract)
d.addCallbacks(print_value, print_error)
reactor.run()
@staticmethod
def setasmoderator():
parser = argparse.ArgumentParser(
description="Sets the given node as a moderator.",
usage='''usage:
networkcli.py setasmoderator''')
parser.parse_args(sys.argv[2:])
d = proxy.callRemote('setasmoderator')
d.addCallbacks(print_value, print_error)
reactor.run()
@staticmethod
def getmoderators():
parser = argparse.ArgumentParser(
description="Fetches a list of moderators",
usage='''usage:
networkcli.py getmoderators ''')
parser.parse_args(sys.argv[2:])
d = proxy.callRemote('getmoderators')
d.addCallbacks(print_value, print_error)
reactor.run()
@staticmethod
def follow():
parser = argparse.ArgumentParser(
description="Follow a user",
usage='''usage:
networkcli.py follow [-g GUID]''')
parser.add_argument('-g', '--guid', required=True, help="the guid to follow")
args = parser.parse_args(sys.argv[2:])
guid = args.guid
d = proxy.callRemote('follow', guid)
d.addCallbacks(print_value, print_error)
reactor.run()
@staticmethod
def unfollow():
parser = argparse.ArgumentParser(
description="Unfollow a user",
usage='''usage:
networkcli.py unfollow [-g GUID]''')
parser.add_argument('-g', '--guid', required=True, help="the guid to unfollow")
args = parser.parse_args(sys.argv[2:])
guid = args.guid
d = proxy.callRemote('unfollow', guid)
d.addCallbacks(print_value, print_error)
reactor.run()
@staticmethod
def getfollowers():
parser = argparse.ArgumentParser(
description="Get a list of followers of a node",
usage='''usage:
networkcli.py getfollowers [-g GUID]''')
parser.add_argument('-g', '--guid', required=True, help="the guid to query")
args = parser.parse_args(sys.argv[2:])
guid = args.guid
d = proxy.callRemote('getfollowers', guid)
d.addCallbacks(print_value, print_error)
reactor.run()
@staticmethod
def getfollowing():
parser = argparse.ArgumentParser(
description="Get a list users a node is following",
usage='''usage:
networkcli.py getfollowing [-g GUID]''')
parser.add_argument('-g', '--guid', required=True, help="the guid to query")
args = parser.parse_args(sys.argv[2:])
guid = args.guid
d = proxy.callRemote('getfollowing', guid)
d.addCallbacks(print_value, print_error)
reactor.run()
@staticmethod
def sendnotification():
parser = argparse.ArgumentParser(
description="Send a notification to all your followers",
usage='''usage:
networkcli.py sendnotification [-m MESSAGE]''')
parser.add_argument('-m', '--message', required=True, help="the message to send")
args = parser.parse_args(sys.argv[2:])
message = args.message
d = proxy.callRemote('sendnotification', message)
d.addCallbacks(print_value, print_error)
reactor.run()
@staticmethod
def sendmessage():
parser = argparse.ArgumentParser(
description="Send a message to another node",
usage='''usage:
networkcli.py sendmessage [-g GUID] [-p PUBKEY] [-m MESSAGE] [-o]''')
parser.add_argument('-g', '--guid', required=True, help="the guid to send to")
parser.add_argument('-p', '--pubkey', required=True, help="the encryption key of the node")
parser.add_argument('-m', '--message', required=True, help="the message to send")
parser.add_argument('-o', '--offline', action='store_true', help="sends to offline recipient")
args = parser.parse_args(sys.argv[2:])
message = args.message
guid = args.guid
pubkey = args.pubkey
offline = args.offline
d = proxy.callRemote('sendmessage', guid, pubkey, message, offline)
d.addCallbacks(print_value, print_error)
reactor.run()
@staticmethod
def getmessages():
parser = argparse.ArgumentParser(
description="Get messages from the dht",
usage='''usage:
networkcli.py getmessages''')
parser.parse_args(sys.argv[2:])
d = proxy.callRemote('getmessages')
d.addCallbacks(print_value, print_error)
reactor.run()
# RPC-Server
class RPCCalls(jsonrpc.JSONRPC):
def __init__(self, kserver, mserver, keys):
jsonrpc.JSONRPC.__init__(self)
self.kserver = kserver
self.mserver = mserver
self.keys = keys
def jsonrpc_getpubkey(self):
return hexlify(self.keys.guid_signed_pubkey)
def jsonrpc_getinfo(self):
info = {"version": "0.1"}
num_peers = 0
for bucket in self.kserver.protocol.router.buckets:
num_peers += bucket.__len__()
info["known peers"] = num_peers
info["stored messages"] = len(self.kserver.storage.data)
size = sys.getsizeof(self.kserver.storage.data)
size += sum(map(sys.getsizeof, self.kserver.storage.data.itervalues())) + sum(
map(sys.getsizeof, self.kserver.storage.data.iterkeys()))
info["db size"] = size
return info
def jsonrpc_set(self, keyword, key):
def handle_result(result):
print "JSONRPC result:", result
d = self.kserver.set(str(keyword), unhexlify(key), self.kserver.node.getProto().SerializeToString())
d.addCallback(handle_result)
return "Sending store request..."
def jsonrpc_get(self, keyword):
def handle_result(result):
print "JSONRPC result:", result
for mod in result:
try:
val = objects.Value()
val.ParseFromString(mod)
node = objects.Node()
node.ParseFromString(val.serializedData)
print node
except Exception as e:
print 'malformed protobuf', e.message
d = self.kserver.get(keyword)
d.addCallback(handle_result)
return "Sent get request. Check log output for result"
def jsonrpc_delete(self, keyword, key):
def handle_result(result):
print "JSONRPC result:", result
signature = self.keys.signing_key.sign(digest(key))
d = self.kserver.delete(str(keyword), digest(key), signature[:64])
d.addCallback(handle_result)
return "Sending delete request..."
def jsonrpc_shutdown(self):
for addr in self.kserver.protocol:
connection = self.kserver.protocol._active_connections.get(addr)
if connection is not None:
connection.shutdown()
return "Closing all connections."
def jsonrpc_getpeers(self):
peers = []
for bucket in self.kserver.protocol.router.buckets:
for node in bucket.getNodes():
peers.append(node.id.encode("hex"))
return peers
def jsonrpc_getnode(self, guid):
def print_node(node):
print node.ip, node.port
d = self.kserver.resolve(unhexlify(guid))
d.addCallback(print_node)
return "finding node..."
def jsonrpc_getcontract(self, contract_hash, guid):
def get_node(node):
def print_resp(resp):
print resp
if node is not None:
d = self.mserver.get_contract(node, unhexlify(contract_hash))
d.addCallback(print_resp)
d = self.kserver.resolve(unhexlify(guid))
d.addCallback(get_node)
return "getting contract..."
def jsonrpc_getimage(self, image_hash, guid):
def get_node(node):
def print_resp(resp):
print resp
if node is not None:
d = self.mserver.get_image(node, unhexlify(image_hash))
d.addCallback(print_resp)
d = self.kserver.resolve(unhexlify(guid))
d.addCallback(get_node)
return "getting image..."
def jsonrpc_getprofile(self, guid):
start = time.time()
def get_node(node):
def print_resp(resp):
print time.time() - start
print resp
print hexlify(resp.encryption_key)
if node is not None:
d = self.mserver.get_profile(node)
d.addCallback(print_resp)
d = self.kserver.resolve(unhexlify(guid))
d.addCallback(get_node)
return "getting profile..."
def jsonrpc_getusermetadata(self, guid):
start = time.time()
def get_node(node):
def print_resp(resp):
print time.time() - start
print resp
if node is not None:
d = self.mserver.get_user_metadata(node)
d.addCallback(print_resp)
d = self.kserver.resolve(unhexlify(guid))
d.addCallback(get_node)
return "getting user metadata..."
def jsonrpc_getlistings(self, guid):
start = time.time()
def get_node(node):
def print_resp(resp):
print time.time() - start
if resp:
for l in resp.listing:
resp.listing.remove(l)
h = l.contract_hash
l.contract_hash = hexlify(h)
resp.listing.extend([l])
print resp
if node is not None:
d = self.mserver.get_listings(node)
d.addCallback(print_resp)
d = self.kserver.resolve(unhexlify(guid))
d.addCallback(get_node)
return "getting listing metadata..."
def jsonrpc_getcontractmetadata(self, guid, contract_hash):
start = time.time()
def get_node(node):
def print_resp(resp):
print time.time() - start
print resp
if node is not None:
d = self.mserver.get_contract_metadata(node, unhexlify(contract_hash))
d.addCallback(print_resp)
d = self.kserver.resolve(unhexlify(guid))
d.addCallback(get_node)
return "getting contract metadata..."
def jsonrpc_setasmoderator(self):
self.mserver.make_moderator()
def jsonrpc_getmoderators(self):
def print_mods(mods):
print mods
self.mserver.get_moderators().addCallback(print_mods)
return "finding moderators in dht..."
def jsonrpc_follow(self, guid):
def get_node(node):
if node is not None:
def print_resp(resp):
print resp
d = self.mserver.follow(node)
d.addCallback(print_resp)
d = self.kserver.resolve(unhexlify(guid))
d.addCallback(get_node)
return "following node..."
def jsonrpc_unfollow(self, guid):
def get_node(node):
if node is not None:
def print_resp(resp):
print resp
d = self.mserver.unfollow(node)
d.addCallback(print_resp)
d = self.kserver.resolve(unhexlify(guid))
d.addCallback(get_node)
return "unfollowing node..."
def jsonrpc_getfollowers(self, guid):
def get_node(node):
if node is not None:
def print_resp(resp):
print resp
d = self.mserver.get_followers(node)
d.addCallback(print_resp)
d = self.kserver.resolve(unhexlify(guid))
d.addCallback(get_node)
return "getting followers..."
def jsonrpc_getfollowing(self, guid):
def get_node(node):
if node is not None:
def print_resp(resp):
print resp
d = self.mserver.get_following(node)
d.addCallback(print_resp)
d = self.kserver.resolve(unhexlify(guid))
d.addCallback(get_node)
return "getting following..."
def jsonrpc_sendnotification(self, message):
def get_count(count):
print "Notification reached %i follower(s)" % count
d = self.mserver.send_notification(message)
d.addCallback(get_count)
return "sendng notification..."
def jsonrpc_sendmessage(self, guid, pubkey, message, offline=False):
def get_node(node):
if node is not None or offline is True:
if offline is True:
node = Node(unhexlify(guid), "127.0.0.1", 1234, digest("adsf"))
self.mserver.send_message(node, pubkey, objects.Plaintext_Message.CHAT, message)
d = self.kserver.resolve(unhexlify(guid))
d.addCallback(get_node)
return "sending message..."
def jsonrpc_getmessages(self):
class GetMyMessages(object):
implements(MessageListener)
@staticmethod
def notify(sender_guid, encryption_pubkey, subject, message_type, message):
print message
self.mserer.get_messages(GetMyMessages())
return "getting messages..."
if __name__ == "__main__":
proxy = Proxy('127.0.0.1', 18465)
Parser(proxy)
|
the9ull/OpenBazaar-Server
|
networkcli.py
|
Python
|
mit
| 27,857
|
import redis
import yaml
import json
from slackclient import SlackClient
##############
# user stuff #
##############
class UserMap:
"""
So we don't have to keep track of two dictionaries.
Easiest is just to keep one instance of this in the game state.
We create this at the beginning of the game.
So we can do -
user_id -> name
name -> user_id
"""
def __init__(self):
self.r_server = redis.Redis('localhost')
def add(self, user_id, name, DM):
"""
adds
user_id -> name
name -> user_id
DM:user_id -> Direct Message channel.
"""
self.r_server.hmset('users:game', {user_id: name, name: user_id, 'DM:'+user_id: DM})
def get(self, user_id=None, name=None, DM=None):
if DM and user_id:
return self.r_server.hmget('users:game', 'DM:'+user_id)[0]
elif user_id:
return self.r_server.hmget('users:game', user_id)[0]
elif name:
return self.r_server.hmget('users:game', name)[0]
else:
return None
def get_user_name(user_id):
config = yaml.load(file('rtmbot.conf', 'r'))
sc = SlackClient(config['SLACK_TOKEN'])
u = UserMap()
def poll_slack_for_user():
user_obj = json.loads(sc.api_call('users.info', user=user_id))
user_name = user_obj['user']['name']
im = json.loads(sc.api_call('im.open', user=user_id))
return user_name, im['channel']['id']
try:
user_name, im = poll_slack_for_user()
except Exception as e:
print(e)
# try one more time.
user_name, im = poll_slack_for_user()
if user_name:
u.add(user_id, user_name, im)
return user_name
|
nickweinberg/werewolf-slackbot
|
plugins/werewolf/user_map.py
|
Python
|
mit
| 1,736
|
import collections
import cProfile
import pstats
import functools
import unittest
data = """turn on 489,959 through 759,964
turn off 820,516 through 871,914
turn off 427,423 through 929,502
turn on 774,14 through 977,877
turn on 410,146 through 864,337
turn on 931,331 through 939,812
turn off 756,53 through 923,339
turn off 313,787 through 545,979
turn off 12,823 through 102,934
toggle 756,965 through 812,992
turn off 743,684 through 789,958
toggle 120,314 through 745,489
toggle 692,845 through 866,994
turn off 587,176 through 850,273
turn off 674,321 through 793,388
toggle 749,672 through 973,965
turn on 943,30 through 990,907
turn on 296,50 through 729,664
turn on 212,957 through 490,987
toggle 171,31 through 688,88
turn off 991,989 through 994,998
turn off 913,943 through 958,953
turn off 278,258 through 367,386
toggle 275,796 through 493,971
turn off 70,873 through 798,923
toggle 258,985 through 663,998
turn on 601,259 through 831,486
turn off 914,94 through 941,102
turn off 558,161 through 994,647
turn on 119,662 through 760,838
toggle 378,775 through 526,852
turn off 384,670 through 674,972
turn off 249,41 through 270,936
turn on 614,742 through 769,780
turn on 427,70 through 575,441
turn on 410,478 through 985,753
turn off 619,46 through 931,342
turn on 284,55 through 768,922
turn off 40,592 through 728,685
turn on 825,291 through 956,950
turn on 147,843 through 592,909
turn off 218,675 through 972,911
toggle 249,291 through 350,960
turn off 556,80 through 967,675
toggle 609,148 through 968,279
toggle 217,605 through 961,862
toggle 407,177 through 548,910
toggle 400,936 through 599,938
turn off 721,101 through 925,455
turn on 268,631 through 735,814
toggle 549,969 through 612,991
toggle 553,268 through 689,432
turn off 817,668 through 889,897
toggle 801,544 through 858,556
toggle 615,729 through 832,951
turn off 427,477 through 958,948
turn on 164,49 through 852,946
turn on 542,449 through 774,776
turn off 923,196 through 980,446
toggle 90,310 through 718,846
turn off 657,215 through 744,252
turn off 800,239 through 811,712
turn on 502,90 through 619,760
toggle 649,512 through 862,844
turn off 334,903 through 823,935
turn off 630,233 through 839,445
turn on 713,67 through 839,865
turn on 932,50 through 982,411
turn off 480,729 through 984,910
turn on 100,219 through 796,395
turn on 758,108 through 850,950
turn off 427,276 through 439,938
turn on 178,284 through 670,536
toggle 540,27 through 625,102
turn off 906,722 through 936,948
toggle 345,418 through 859,627
toggle 175,775 through 580,781
toggle 863,28 through 929,735
turn off 824,858 through 905,973
toggle 752,312 through 863,425
turn on 985,716 through 988,852
turn off 68,504 through 763,745
toggle 76,209 through 810,720
turn off 657,607 through 676,664
toggle 596,869 through 896,921
turn off 915,411 through 968,945
turn off 368,39 through 902,986
turn on 11,549 through 393,597
turn off 842,893 through 976,911
toggle 274,106 through 581,329
toggle 406,403 through 780,950
toggle 408,988 through 500,994
toggle 217,73 through 826,951
turn on 917,872 through 961,911
toggle 394,34 through 510,572
toggle 424,603 through 583,626
toggle 106,159 through 755,738
turn off 244,610 through 472,709
turn on 350,265 through 884,690
turn on 688,184 through 928,280
toggle 279,443 through 720,797
turn off 615,493 through 888,610
toggle 118,413 through 736,632
turn on 798,782 through 829,813
turn off 250,934 through 442,972
turn on 68,503 through 400,949
toggle 297,482 through 313,871
toggle 710,3 through 839,859
turn on 125,300 through 546,888
toggle 482,39 through 584,159
turn off 536,89 through 765,962
turn on 530,518 through 843,676
turn on 994,467 through 994,676
turn on 623,628 through 744,927
toggle 704,912 through 837,983
turn on 154,364 through 517,412
toggle 344,409 through 780,524
turn off 578,740 through 725,879
turn on 251,933 through 632,957
turn on 827,705 through 971,789
toggle 191,282 through 470,929
toggle 324,525 through 446,867
toggle 534,343 through 874,971
toggle 550,650 through 633,980
toggle 837,404 through 881,915
toggle 338,881 through 845,905
turn on 469,462 through 750,696
turn on 741,703 through 892,870
turn off 570,215 through 733,562
turn on 445,576 through 870,775
turn on 466,747 through 554,878
turn off 820,453 through 868,712
turn off 892,706 through 938,792
turn off 300,238 through 894,746
turn off 306,44 through 457,444
turn off 912,569 through 967,963
toggle 109,756 through 297,867
turn on 37,546 through 41,951
turn on 321,637 through 790,910
toggle 66,50 through 579,301
toggle 933,221 through 933,791
turn on 486,676 through 878,797
turn on 417,231 through 556,317
toggle 904,468 through 981,873
turn on 417,675 through 749,712
turn on 692,371 through 821,842
toggle 324,73 through 830,543
turn on 912,490 through 977,757
turn off 634,872 through 902,949
toggle 266,779 through 870,798
turn on 772,982 through 990,996
turn off 607,46 through 798,559
turn on 295,602 through 963,987
turn on 657,86 through 944,742
turn off 334,639 through 456,821
turn off 997,667 through 997,670
turn off 725,832 through 951,945
turn off 30,120 through 952,984
turn on 860,965 through 917,976
toggle 471,997 through 840,998
turn off 319,307 through 928,504
toggle 823,631 through 940,908
toggle 969,984 through 981,993
turn off 691,319 through 865,954
toggle 911,926 through 938,929
turn on 953,937 through 968,991
toggle 914,643 through 975,840
turn on 266,982 through 436,996
turn off 101,896 through 321,932
turn off 193,852 through 751,885
turn off 576,532 through 863,684
turn on 761,456 through 940,783
turn on 20,290 through 398,933
turn off 435,335 through 644,652
turn on 830,569 through 905,770
turn off 630,517 through 905,654
turn on 664,53 through 886,976
toggle 275,416 through 408,719
turn on 370,621 through 515,793
turn on 483,373 through 654,749
turn on 656,786 through 847,928
turn off 532,752 through 945,974
toggle 301,150 through 880,792
turn off 951,488 through 958,952
turn on 207,729 through 882,828
toggle 694,532 through 973,961
toggle 676,639 through 891,802
turn off 653,6 through 905,519
toggle 391,109 through 418,312
turn on 877,423 through 957,932
turn on 340,145 through 563,522
turn off 978,467 through 988,895
turn off 396,418 through 420,885
turn off 31,308 through 816,316
turn on 107,675 through 758,824
turn on 61,82 through 789,876
turn on 750,743 through 754,760
toggle 88,733 through 736,968
turn off 754,349 through 849,897
toggle 157,50 through 975,781
turn off 230,231 through 865,842
turn off 516,317 through 630,329
turn off 697,820 through 829,903
turn on 218,250 through 271,732
toggle 56,167 through 404,431
toggle 626,891 through 680,927
toggle 370,207 through 791,514
toggle 860,74 through 949,888
turn on 416,527 through 616,541
turn off 745,449 through 786,908
turn on 485,554 through 689,689
turn on 586,62 through 693,141
toggle 506,759 through 768,829
turn on 473,109 through 929,166
turn on 760,617 through 773,789
toggle 595,683 through 618,789
turn off 210,775 through 825,972
toggle 12,426 through 179,982
turn on 774,539 through 778,786
turn on 102,498 through 121,807
turn off 706,897 through 834,965
turn off 678,529 through 824,627
turn on 7,765 through 615,870
turn off 730,872 through 974,943
turn off 595,626 through 836,711
turn off 215,424 through 841,959
toggle 341,780 through 861,813
toggle 507,503 through 568,822
turn on 252,603 through 349,655
toggle 93,521 through 154,834
turn on 565,682 through 951,954
turn on 544,318 through 703,418
toggle 756,953 through 891,964
turn on 531,123 through 856,991
turn on 148,315 through 776,559
turn off 925,835 through 963,971
turn on 895,944 through 967,964
turn off 102,527 through 650,747
toggle 626,105 through 738,720
turn off 160,75 through 384,922
toggle 813,724 through 903,941
turn on 207,107 through 982,849
toggle 750,505 through 961,697
toggle 105,410 through 885,819
turn on 226,104 through 298,283
turn off 224,604 through 508,762
turn on 477,368 through 523,506
turn off 477,901 through 627,936
turn off 887,131 through 889,670
turn on 896,994 through 938,999
toggle 401,580 through 493,728
toggle 987,184 through 991,205
turn on 821,643 through 882,674
toggle 784,940 through 968,959
turn off 251,293 through 274,632
turn off 339,840 through 341,844
turn off 675,351 through 675,836
toggle 918,857 through 944,886
toggle 70,253 through 918,736
turn off 612,604 through 772,680
turn off 277,40 through 828,348
toggle 692,139 through 698,880
toggle 124,446 through 883,453
toggle 969,932 through 990,945
toggle 855,692 through 993,693
toggle 722,472 through 887,899
toggle 978,149 through 985,442
toggle 837,540 through 916,889
turn off 612,2 through 835,82
toggle 560,767 through 878,856
turn on 461,734 through 524,991
toggle 206,824 through 976,912
turn on 826,610 through 879,892
turn on 577,699 through 956,933
turn off 9,250 through 50,529
turn off 77,657 through 817,677
turn on 68,419 through 86,426
turn on 991,720 through 992,784
turn on 668,20 through 935,470
turn off 133,418 through 613,458
turn off 487,286 through 540,328
toggle 247,874 through 840,955
toggle 301,808 through 754,970
turn off 34,194 through 578,203
turn off 451,49 through 492,921
turn on 907,256 through 912,737
turn off 479,305 through 702,587
turn on 545,583 through 732,749
toggle 11,16 through 725,868
turn on 965,343 through 986,908
turn on 674,953 through 820,965
toggle 398,147 through 504,583
turn off 778,194 through 898,298
turn on 179,140 through 350,852
turn off 241,118 through 530,832
turn off 41,447 through 932,737
turn off 820,663 through 832,982
turn on 550,460 through 964,782
turn on 31,760 through 655,892
toggle 628,958 through 811,992"""
Point = collections.namedtuple("Point", ["x", "y"])
class Grid(object):
def __init__(self, width, height):
self.width = width
self.height = height
self.data = [
[0 for _ in xrange(width)] for _ in xrange(height)
]
def TurnOn(self, startPoint, endPoint):
# Could use a generator to do doublely nested for loop iteration but doing it directly
# was about 4x faster.
for x in xrange(startPoint.x, endPoint.x + 1):
for y in xrange(startPoint.y, endPoint.y + 1):
self.data[x][y] += 1
def TurnOff(self, startPoint, endPoint):
for x in xrange(startPoint.x, endPoint.x + 1):
for y in xrange(startPoint.y, endPoint.y + 1):
# Slower
# self.data[x][y] = max(self.data[x][y] - 1, 0)
# Following is faster than above
if self.data[x][y] > 0:
self.data[x][y] -= 1
def Toggle(self, startPoint, endPoint):
for x in xrange(startPoint.x, endPoint.x + 1):
for y in xrange(startPoint.y, endPoint.y + 1):
self.data[x][y] += 2
def TotalBrightness(self):
brightness = 0
for col in xrange(self.width):
for row in xrange(self.height):
if self.data[row][col]:
brightness += self.data[row][col]
return brightness
class Interpreter(object):
def __init__(self, grid):
self.grid = grid
self.COMMANDS = {
"turn on": self.grid.TurnOn,
"turn off": self.grid.TurnOff,
"toggle": self.grid.Toggle,
}
def Eval(self, instructions):
for instruction in instructions:
self._eval(*self._parse(instruction))
def _parse(self, instruction):
command, rest = self._parseCommand(instruction)
args = self._parseArgs(rest)
return command, args
def _parseCommand(self, instruction):
for command_str in self.COMMANDS.iterkeys():
if instruction[:len(command_str)] == command_str:
return command_str, instruction[len(command_str):]
def _parseArgs(self, instruction):
startPoint, _, endPoint = instruction.split()
return self._parsePoint(startPoint), self._parsePoint(endPoint)
def _parsePoint(self, point_str):
x, y = point_str.split(",")
return Point(int(x), int(y))
def _eval(self, command, args):
command = self.COMMANDS[command]
command(*args)
# ------------- ANSWER
pr = cProfile.Profile()
pr.enable()
grid = Grid(1000, 1000)
interpreter = Interpreter(grid)
interpreter.Eval(data.split("\n"))
print grid.TotalBrightness()
pr.disable()
ps = pstats.Stats(pr).sort_stats("tottime")
ps.print_stats()
ps.print_callers()
# ------------- TESTS
class GridTests(unittest.TestCase):
def test_Construction(self):
g = Grid(3, 2)
self.assertEqual(g.data, [[0, 0, 0], [0, 0, 0]])
def test_Commands(self):
g = Grid(4, 3)
g.TurnOn(Point(0, 1), Point(1, 3))
self.assertEqual(g.data, [
[0, 1, 1, 1],
[0, 1, 1, 1],
[0, 0, 0, 0]
])
g.TurnOff(Point(0, 1), Point(1, 2))
self.assertEqual(g.data, [
[0, 0, 0, 1],
[0, 0, 0, 1],
[0, 0, 0, 0]
])
g.Toggle(Point(0, 2), Point(2, 3))
self.assertEqual(g.data, [
[0, 0, 2, 3],
[0, 0, 2, 3],
[0, 0, 2, 2]
])
self.assertEqual(g.TotalBrightness(), 14)
class InterpreterTests(unittest.TestCase):
def test_Interpreter(self):
class LoggingGrid(object):
def __init__(self):
self.log = []
def TurnOn(self, startPoint, endPoint):
self.log.append(("turn on", startPoint, endPoint))
def TurnOff(self, startPoint, endPoint):
self.log.append(("turn off", startPoint, endPoint))
def Toggle(self, startPoint, endPoint):
self.log.append(("toggle", startPoint, endPoint))
g = LoggingGrid()
i = Interpreter(g)
instructions = [
"turn on 489,959 through 759,964",
"turn off 820,516 through 871,914",
"toggle 297,482 through 313,871",
]
i.Eval(instructions)
self.assertEqual(g.log, [
("turn on", Point(489, 959), Point(759, 964)),
("turn off", Point(820, 516), Point(871, 914)),
("toggle", Point(297, 482), Point(313, 871)),
])
if __name__ == "__main__":
unittest.main()
|
misterwilliam/advent-of-code
|
6/main.py
|
Python
|
mit
| 13,814
|
# -*- coding: utf-8 -*-
import os
from fabric.api import *
import config
import david
# Example usage
env.hosts = ['david@david']
APP_ROOT = os.path.dirname(os.path.abspath(__file__)) + '/david'
TRANSLATION_ROOT = APP_ROOT + '/translations'
REMOTE_APP_ROOT = '/srv/user/david/app/tongdawei.cc'
REMOTE_ALEMBIC_CONFIG_FILE = REMOTE_APP_ROOT + '/local_alembic.ini'
def babel():
babel_init()
babel_update()
babel_compile()
def babel_extract():
local('pybabel extract -F david/babel.cfg '
'--msgid-bugs-address "kisdmud@gmail.com" '
'--project david '
'--version %s '
'--copyright-holder "Jesse Yang" '
'-o /tmp/messages.pot %s '
'david/ config/' % (david.__version__, APP_ROOT))
def babel_update():
babel_extract()
local('pybabel update -i /tmp/messages.pot '
'-d %s' % TRANSLATION_ROOT)
def babel_compile():
local('pybabel compile -d %s -f --statistics' % TRANSLATION_ROOT)
def babel_init():
langs = config.BABEL_ACCEPT_LANGUAGE
for l in langs:
if os.path.exists(os.path.join(TRANSLATION_ROOT, l)):
print 'Skip existing translation dir %s' % l
continue
local('pybabel init -i messages.pot -d %s -l %s' % (TRANSLATION_ROOT, l))
def deploy():
with cd(REMOTE_APP_ROOT):
run('source venv/bin/activate && pip install -r requirements.txt')
run('cd ./david/static && npm install && grunt build')
run('source venv/bin/activate && alembic -c %s upgrade head' % REMOTE_ALEMBIC_CONFIG_FILE)
run('make dpyc')
run('sudo supervisorctl -c /etc/supervisor/supervisord.conf restart david')
def fillup():
ret = prompt('Discard existing remote data?', default='Y')
if ret != 'Y':
return
with cd(REMOTE_APP_ROOT):
run('source venv/bin/activate && make fillup')
def bootstrap():
with cd(REMOTE_APP_ROOT):
run('virtualenv --distribute venv')
run('source venv/bin/activate && pip install gunicorn')
|
ktmud/david
|
fabfile.py
|
Python
|
mit
| 2,023
|
import os
import warnings
from gcloud import storage
import pocs.utils.logger
class PanStorage(object):
""" Class for interacting with Google Cloud Platform """
def __init__(self, project_id='panoptes-survey', bucket_name=None, prefix=None):
assert bucket_name is not None, warnings.warn(
"A valid bucket name is required.")
super(PanStorage, self).__init__()
self.logger = pocs.utils.logger.get_root_logger()
self.project_id = project_id
self.prefix = prefix
self.client = storage.Client(self.project_id)
self.bucket_name = bucket_name
self.bucket = self.client.get_bucket(bucket_name)
def list_remote(self, prefix=None):
"""Return a list of blobs in the remote bucket with the given prefix."""
if not prefix:
prefix = self.prefix
blobs = self.bucket.list_blobs(prefix=prefix)
files = []
for blob in blobs:
files.append(blob.name)
return files
def upload(self, local_path, remote_path=None):
"""Upload the given file to the Google Cloud Storage bucket."""
assert self.project_id and os.path.exists(local_path)
self.logger.debug('Building upload request...')
if remote_path is None:
remote_path = local_path
self.logger.debug('Uploading file: %s to bucket: %s object: %s '.format(
local_path, self.bucket.name, remote_path))
try:
self.bucket.blob(remote_path).upload_from_filename(
filename=local_path)
self.logger.debug('Upload complete!')
except Exception as err:
self.logger.warning(
'Problem uploading file {}: {}'.format(local_path, err))
return remote_path
def download(self, remote_path, local_path=None):
"""Download the given file from the Google Cloud Storage bucket."""
if local_path is None:
local_path = '{}/temp/{}'.format(os.getenv('PANDIR'), remote_path)
os.makedirs(os.path.dirname(local_path), exist_ok=True)
try:
self.bucket.get_blob(remote_path).download_to_filename(
filename=local_path)
self.logger.debug('Download complete!')
except Exception as err:
self.logger.warning(
'Problem downloading {}: {}'.format(remote_path, err))
return local_path
def upload_string(self, data, remote_path):
"""Upload the given data string to the Google Cloud Storage bucket."""
if remote_path in self.list_remote():
try:
self.bucket.get_blob(remote_path).upload_from_string(data)
self.logger.debug('String upload complete!')
except Exception as err:
self.logger.warning('Problem uploading string: {}'.format(err))
else:
try:
self.bucket.blob(remote_path).upload_from_string(data)
self.logger.debug('String upload complete!')
except Exception as err:
self.logger.warning('Problem uploading string: {}'.format(err))
return remote_path
def download_string(self, remote_path):
"""Download the given file as a string from the Google Cloud Storage bucket."""
try:
data = self.bucket.get_blob(remote_path).download_as_string()
self.logger.debug('String download complete!')
except Exception as err:
self.logger.warning(
'Problem downloading {}: {}'.format(remote_path, err))
return data
|
AstroHuntsman/POCS
|
pocs/utils/google/storage.py
|
Python
|
mit
| 3,627
|
class Entity():
"""
Class to represent an antity used to hold the information for debtor and
creditor
@ivar _name: Entity name
@type _name: string
@ivar _address_lines: List of address lines for the entity (max 5 lines)
@type _address_lines: string
@ivar _country: Country
@type _country: string
"""
def __init__(self, name, country, *adr_lines):
"""
@param name: string
@type name: Name of the entity
@param country: string
@type country: Entity's country
@param *adr_lines: Maximum of 5 address-lines. First add street address, then postal code
@type *adr_lines: string
@raise IndexError: If the number of address lines passed is over 5
"""
self._name = name
self._country = country
self._address_lines = []
if len(adr_lines) <= 5:
for x in adr_lines:
self._address_lines.append(x)
else:
raise IndexError('Too many address lines, maximum allowed is 5')
|
luojus/bankws
|
bankws/entity.py
|
Python
|
mit
| 1,061
|
import unittest
from irc_hooky.github.pull_request import PullRequest
from irc_hooky.github.pull_request_event import PullRequestEvent
from irc_hooky.github.github_user import GithubUser
class TestPullRequestEvent(unittest.TestCase):
def setUp(self):
self.ghpre = PullRequestEvent()
def test_default_setup(self):
self.assertEqual(self.ghpre.action, "<No Action Specified>")
self.assertEqual(self.ghpre.number, "")
self.assertEqual(self.ghpre.pull_request, PullRequest())
self.assertEqual(self.ghpre.sender, GithubUser())
def test_load_empty_secondary_objects(self):
payload = {}
self.ghpre.load(payload)
self.assertEqual(self.ghpre.action, "<No Action Specified>")
self.assertEqual(self.ghpre.number, "")
self.assertEqual(self.ghpre.pull_request, PullRequest())
self.assertEqual(self.ghpre.sender, GithubUser())
def test_non_empty_pr(self):
payload = {
"action": "sleep",
"pull_request": {
"title": "new pr"
}
}
self.ghpre.load(payload)
self.assertEqual(self.ghpre.action, "sleep")
self.assertEqual(self.ghpre.number, "")
self.assertEqual(self.ghpre.pull_request, PullRequest(title="new pr"))
self.assertEqual(self.ghpre.sender, GithubUser())
def test_non_empty_sender(self):
payload = {
"action": "sleep",
"number": "42",
"pull_request": {
"title": "new pr"
},
"sender": {
"login": "steven"
}
}
self.ghpre.load(payload)
self.assertEqual(self.ghpre.action, "sleep")
self.assertEqual(self.ghpre.number, "42")
self.assertEqual(self.ghpre.pull_request, PullRequest(title="new pr"))
self.assertEqual(self.ghpre.sender, GithubUser(login="steven"))
|
marvinpinto/irc-hooky
|
tests/github/test_pull_request_event.py
|
Python
|
mit
| 1,926
|
cal=0
print("Welcome to Chip's fast food imporium")
print("(1)Cheeseburger (461 calories)")
print("(2)Fish Burger (431 calories)")
print("(3)Veggie Burger (420 calories)")
print("(4)None(0 calories)")
burger=input("Please enter a burger choice:")
if burger==1:
cal+=461
if burger==2:
cal+=431
if burger==3:
cal+=420
if burger==4:
cal+=0
print("(1)fries (100 calories)")
print("(2)Baked Potato(57 calories)")
print("(3)Chef salad (70 calories)")
print("(4)None(0 calories)")
burger=input("Please enter a Side order choice:")
if burger==1:
cal+=100
if burger==2:
cal+=57
if burger==3:
cal+=70
if burger==4:
cal+=0
print("(1)Soft Drink (130 calories)")
print("(2)Orange Juice (160 calories)")
print("(3)Milk (118 calories)")
print("(4)None(0 calories)")
burger=input("Please enter a Drink choice:")
if burger==1:
cal+=130
if burger==2:
cal+=160
if burger==3:
cal+=118
if burger==4:
cal+=0
print("(1)Apple Pie (167 calories)")
print("(2)Sundae (266 calories)")
print("(3)Fruit Cup (75 calories)")
print("(4)None(0 calories)")
burger=input("Please enter a dessert choice:")
if burger==1:
cal+=167
if burger==2:
cal+=266
if burger==3:
cal+=75
if burger==4:
cal+=0
print (cal)
|
lizerd123/github
|
Dungeon/burger.py
|
Python
|
mit
| 1,185
|
from datetime import datetime
from unittest import skip
from django.test import TestCase
from pyanalysis.apps.corpus import models as corpus_models
from pyanalysis.apps.dimensions import registry
class DatasetModelTest(TestCase):
def test_created_at_set(self):
"""Dataset.created_at should get set automatically."""
dset = corpus_models.Dataset.objects.create(name="Test Corpus", description="My Dataset")
self.assertIsInstance(dset.created_at, datetime)
class MessageModelTest(TestCase):
def setUp(self):
self.dataset = corpus_models.Dataset.objects.create(name="Test Corpus", description="My Dataset")
def test_can_get_message(self):
"""Should be able to get messages from a dataset."""
corpus_models.Message.objects.create(dataset=self.dataset, text="Some text")
msgs = self.dataset.message_set.all()
self.assertEquals(msgs.count(), 1)
self.assertEquals(msgs.first().text, "Some text")
class GetExampleMessageTest(TestCase):
def generate_some_messages(self, dataset):
corpus_models.Message.objects.create(
dataset=dataset,
text="blah blah blah",
time="2015-02-02T01:19:02Z",
shared_count=0,
)
hashtag = corpus_models.Hashtag.objects.create(text="OurPriorities")
msg = corpus_models.Message.objects.create(
dataset=dataset,
text="blah blah blah #%s" % hashtag.text,
time="2015-02-02T01:19:02Z",
shared_count=10,
)
msg.hashtags.add(hashtag)
self.dimension_time = registry.get_dimension('time')
self.dimension_hashtags = registry.get_dimension('hashtags')
self.dimension_shared = registry.get_dimension('shares')
def setUp(self):
self.dataset = corpus_models.Dataset.objects.create(name="Test Corpus", description="My Dataset")
self.generate_some_messages(self.dataset)
def test_with_no_filters(self):
"""Empty filter settings should return all messages"""
filters = {}
msgs = self.dataset.get_example_messages(filters)
self.assertEquals(msgs.count(), 2)
def test_with_inclusive_filters(self):
"""Filters that include all the messages."""
filters = [
{
"dimension": self.dimension_time,
"min_time": "2015-02-02T01:19:02Z",
"max_time": "2015-02-02T01:19:03Z"
},
{
"dimension": self.dimension_time,
"value": "2015-02-02T01:19:02Z",
}
]
msgs = self.dataset.get_example_messages(filters)
self.assertEquals(msgs.count(), 2)
def test_with_exclusive_filters(self):
"""Filters that exclude all the messages"""
filters = [
{
"dimension": self.dimension_time,
"min_time": "2015-02-02T01:19:03Z",
"max_time": "2015-02-02T01:19:03Z"
},
{
"dimension": self.dimension_time,
"value": "2015-02-02T01:19:03Z",
}
]
msgs = self.dataset.get_example_messages(filters)
self.assertEquals(msgs.count(), 0)
def test_quantitative_filter(self):
"""Filter on a numeric field"""
filters = [
{
"dimension": self.dimension_shared,
"min": 5,
"max": 15,
},
]
msgs = self.dataset.get_example_messages(filters)
self.assertEquals(msgs.count(), 1)
def test_value_match(self):
"""Filters that match just one message"""
filters = [
{
"dimension": self.dimension_hashtags,
"value": "OurPriorities",
}
]
msgs = self.dataset.get_example_messages(filters)
self.assertEquals(msgs.count(), 1)
def test_dataset_specific_examples(self):
"""Does not mix messages across datasets."""
other_dataset = corpus_models.Dataset.objects.create(name="second test corpus", description="blah")
self.generate_some_messages(other_dataset)
filters = {}
msgs = self.dataset.get_example_messages(filters)
self.assertEquals(msgs.count(), 2)
|
nanchenchen/script-analysis
|
pyanalysis/apps/corpus/tests.py
|
Python
|
mit
| 4,324
|
"""
WSGI config for pizzaweb project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pizzaweb.settings")
application = get_wsgi_application()
|
DaBbleR23/Pizza-web
|
pizzaweb_backend/pizzaweb/pizzaweb/wsgi.py
|
Python
|
mit
| 393
|
########################################
# Automatically generated, do not edit.
########################################
from pyvisdk.thirdparty import Enum
HostIncompatibleForRecordReplayReason = Enum(
'processor',
'product',
)
|
xuru/pyvisdk
|
pyvisdk/enums/host_incompatible_for_record_replay_reason.py
|
Python
|
mit
| 243
|
import sys
from setuptools import setup, find_packages
py26_dependency = []
if sys.version_info[:2] <= (2, 6):
py26_dependency = ["argparse >= 1.1", "ordereddict >= 1.1"]
setup(
name='dataset',
version='0.7.0',
description="Toolkit for Python-based data processing.",
long_description="",
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3'
'Programming Language :: Python :: 3.4'
'Programming Language :: Python :: 3.5'
],
keywords='sql sqlalchemy etl loading utility',
author='Friedrich Lindenberg, Gregor Aisch, Stefan Wehrmeyer',
author_email='info@okfn.org',
url='http://github.com/pudo/dataset',
license='MIT',
packages=find_packages(exclude=['ez_setup', 'examples', 'test']),
namespace_packages=[],
include_package_data=False,
zip_safe=False,
install_requires=[
'sqlalchemy >= 0.9.1',
'alembic >= 0.6.2',
'normality >= 0.2.2',
"PyYAML >= 3.10",
"six >= 1.7.3"
] + py26_dependency,
tests_require=[],
test_suite='test',
entry_points={
'console_scripts': [
'datafreeze = dataset.freeze.app:main',
]
}
)
|
stefanw/dataset
|
setup.py
|
Python
|
mit
| 1,472
|
import _plotly_utils.basevalidators
class SymmetricValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name="symmetric", parent_name="scatter.error_y", **kwargs
):
super(SymmetricValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs
)
|
plotly/plotly.py
|
packages/python/plotly/plotly/validators/scatter/error_y/_symmetric.py
|
Python
|
mit
| 425
|
#!/usr/bin/env python3
##
## Mixlr ChatBox (IRC)
##
## Version: idunnolol?
## Dependency: https://pypi.python.org/pypi/irc
##
## Description: this is poorly commented and written. I don't know what I'm doing, lol?
## Original Author: Bob Barker
##
## PS. this is really dumb, like multiple layers of abstraction dumb.
##
from mixlr import broadcast
from time import sleep
import irc.bot
class ircBot(irc.bot.SingleServerIRCBot):
def checkMixlrComments(self):
for comment in self.mixlr.updateUserData():
print ('\n===newComment===\n', comment)
self.connection.privmsg(self.channel, ('['+comment['name']+']: ' + comment['content']))
sleep(0.1)
def __init__(self, channel, nickname, server, port=6667):
## [TEMPORARY]
## WARNING: Do not share token, userLogin or session. They can be used to hijack your Mixlr user.
## Add your authenticity_token (HTTP POST Data) [Optional?]
token = ''
## Add your mixlr_user_login (Cookie) [Optional?]
userLogin = ''
## Add your mixlr_session (Cookie) [Required for sendFunctions]
session = ''
broadcasterName = 'jeff-gerstmann'
self.mixlr = broadcast(broadcasterName, session, userLogin, token)
irc.bot.SingleServerIRCBot.__init__(self, [(server, port)], nickname, nickname)
self.channel = channel
self.connection.execute_every(2, self.checkMixlrComments)
def on_nicknameinuse(self, c, e):
c.nick(c.get_nickname() + "_")
def on_welcome(self, c, e):
c.join(self.channel)
## Some example code that might be useful later
# def on_privmsg(self, c, e):
# self.do_command(e, e.arguments[0])
# def on_pubmsg(self, c, e):
# a = e.arguments[0].split(":", 1)
# if len(a) > 1 and irc.strings.lower(a[0]) == irc.strings.lower(self.connection.get_nickname()):
# self.do_command(e, a[1].strip())
# return
# def do_command(self, e, cmd):
# nick = e.source.nick
# c = self.connection
# if cmd == "disconnect":
# self.disconnect()
# elif cmd == "die":
# self.die()
# elif cmd == "stats":
# for chname, chobj in self.channels.items():
# c.notice(nick, "--- Channel statistics ---")
# c.notice(nick, "Channel: " + chname)
# users = chobj.users()
# users.sort()
# c.notice(nick, "Users: " + ", ".join(users))
# opers = chobj.opers()
# opers.sort()
# c.notice(nick, "Opers: " + ", ".join(opers))
# voiced = chobj.voiced()
# voiced.sort()
# c.notice(nick, "Voiced: " + ", ".join(voiced))
# elif cmd == "dcc":
# dcc = self.dcc_listen()
# c.ctcp("DCC", nick, "CHAT chat %s %d" % (
# ip_quad_to_numstr(dcc.localaddress),
# dcc.localport))
# else:
# c.notice(nick, "Not understood: " + cmd)
def main():
import sys
if len(sys.argv) != 4:
print("Usage: chatbox <server[:port]> <channel> <nickname>")
sys.exit(1)
s = sys.argv[1].split(":", 1)
server = s[0]
if len(s) == 2:
try:
port = int(s[1])
except ValueError:
print("Error: Erroneous port.")
sys.exit(1)
else:
port = 6667
channel = sys.argv[2]
nickname = sys.argv[3]
bot = ircBot(channel, nickname, server, port)
bot.start()
if __name__ == "__main__":
main()
|
therealtakeshi/jefflr
|
pymixlr/chatbox.py
|
Python
|
mit
| 3,106
|
# -*- encoding: utf-8 -*-
import socket
class conversador:
def __init__(self):
self.PORT = 50007
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.data = ''
def connectar(self, identitat, adreca):
"""
El constructor de la classe
estableix la connexió amb el servidor que es troba a ladreca (si es el
mateix computador serà localhost) i en el port 50007. La comunicació
es fa amb el protocol internet (socket.AF_INET) i en mode
stream (socket.SOCK_STREAM). Un cop establerta la connexió,
envia al servidor la seva identificació, el segon paràmetre
del constructor. La comunicació serà bloquejant.
"""
try:
self.s.connect((adreca, self.PORT))
print 'Connectat amb exit!'
except:
print 'No es pot connectar al servidor'
#Ara enviem la identitat del comunicador
print '\\I ' + identitat + chr(3)
self.s.send('\\I ' + identitat + chr(3)) #Caracter \I per començar la comunicacio
def parla(self, miss):
"""
Envia el missatge al servidor
"""
try:
self.s.send(miss + chr(3))
except:
print 'No es pot enviar el missatge'
def escolta(self):
"""
Envia al servidor el missatge "\\M", acabat amb el caràcter
chr(3). Llavors, espera rebre un missatge del servidor que
(acabat amb el sentinella chr(3)). La funció retorna el
missatge rebut. Si el missatge que rep és "", vol dir que
no havia missatge pendent de rebre per part del servidor.
"""
self.s.send('\\M' + chr(3))
try:
self.data = self.s.recv(1024)
except:
print 'Cap missatge pendent'
if self.data == '':
return ''
else:
return self.data
def tanca(self):
"""
Envia al client el missatge "\\F", acabat amb chr(3) i tanca el socket
inicialitzat pel constructor de la classe.
"""
self.s.send('\\F' + chr(3)) #finalitzem la sessio
self.s.close()
|
kitusmark/bash-python
|
Chat Project/conversador.py
|
Python
|
mit
| 2,145
|
#!/usr/bin/env python
from setuptools import setup, find_packages
from imp import load_source
setup(
name='cmis',
version=load_source('', 'cmis/_version.py').__version__,
description='A server architecture built on top of a solid foundation '
'provided by flask, sqlalchemy, and various extensions.',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.3',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
],
author='Concordus Applications',
author_email='support@concordusapps.com',
url='http://github.com/concordusapps/python-cmis',
packages=find_packages('.'),
dependency_links=[
'git+git://github.com/concordusapps/python-cmislib.git@topics/py3k'
'#egg=cmislib-dev',
],
install_requires=[
"cmislib == dev"
],
)
|
concordusapps/python-cmis
|
setup.py
|
Python
|
mit
| 1,105
|
# -*- coding: utf-8 -*-
import mock
import falcon
import falcon.testing
from keyard.app import utils
from keyard.app import errors
class TestUtils(falcon.testing.TestBase):
@mock.patch('keyard.app.utils._add_error_handlers')
def test_prepare_app(self, handler_mock):
app = mock.MagicMock()
utils.prepare_app(app)
handler_mock.assert_called_with(app)
def test_add_error_handlers(self):
app = mock.MagicMock()
utils._add_error_handlers(app)
calls = [mock.call.add_error_handler(Exception,
errors.handle_default_errors),
mock.call.add_error_handler(falcon.HTTPError,
errors.handle_falcon_errors),
mock.call.add_error_handler(AssertionError,
errors.handle_assertion_errors)]
app.assert_has_calls(calls)
|
rzanluchi/keyard
|
tests/app/test_utils.py
|
Python
|
mit
| 941
|
from django.conf import settings
def google_credentials(request):
return {
'GOOGLE_ANALYTICS_KEY': getattr(settings, 'GOOGLE_ANALYTICS_KEY', False),
'GOOGLE_TAG_MANAGER': getattr(settings, 'GOOGLE_TAG_MANAGER', False),
'GOOGLE_MAPS_API_KEY': getattr(settings, 'GOOGLE_MAPS_API_KEY', False),
}
def baseurl(request):
"""
Return a BASE_URL template context for the current request.
"""
if request.is_secure():
scheme = 'https://'
else:
scheme = 'http://'
return {'BASE_URL': scheme + request.get_host()}
def api_companies_endpoint(request):
"""
Return a URL to get the JSON of the existing companies
"""
if settings.DEBUG:
url = '/api/data.json'
else:
url = '/api/companies/?format=json'
return {'API_COMPANIES_ENDPOINT': url}
|
springload/madewithwagtail
|
core/context_processors.py
|
Python
|
mit
| 845
|
import os
from AppKit import *
import vanilla
from defconAppKit.controls.placardScrollView import DefconAppKitPlacardNSScrollView, PlacardPopUpButton
# -------
# Sorting
# -------
def fontFileNameSort(fonts):
sortable = []
noPathCounter = 0
for font in fonts:
if font.path is not None:
s = os.path.basename(font.path)
else:
noPathCounter += 1
s = []
if font.info.familyName is not None:
s = font.info.familyName
else:
s = "Untitled Family"
if font.info.styleName is not None:
s += "-" + font.info.styleName
else:
s += "-Untitled Style"
sortable.append((s, font))
fonts = [item[-1] for item in sorted(sortable)]
return fonts
def _isItalic(font):
isItalic = False
if font.info.styleMapStyleName is not None and "italic" in font.info.styleMapStyleName:
isItalic = True
elif font.info.italicAngle != 0:
isItalic = True
return isItalic
def fontWidthWeightSort(fonts):
sortable = []
for font in fonts:
isItalic = _isItalic(font)
fileName = None
if font.path is not None:
fileName = os.path.basename(font.path)
s = (
font.info.familyName,
font.info.openTypeOS2WidthClass,
font.info.openTypeOS2WeightClass,
isItalic,
font.info.styleName,
fileName,
font
)
sortable.append(s)
fonts = [item[-1] for item in sorted(sortable)]
return fonts
# -----------
# Main Object
# -----------
class FontList(vanilla.List):
"""
This object presents the user with a standard list showing fonts.
It follows the same API as vanilla.List. When you set objects into
the view, you always pass font objects. The object will then extract
the relevant data to display.
Constructor Arguments:
All of the vanilla.List contstructor arguments apply, with the
following modifications.
columnDescriptions
This sets up the columns in the list. These follow the same format
of the column descriptions in vanilla.List. The only exception is that
you need to provide an "attribute" key/value pair. This is the font
attribute that the list will extract display values from. For example:
dict(title="Font Path", key="fontPath", attribute="path")
If no columnDescriptions is provided, the font will be shown in a single
single column represented with its file name or a combination of its
family and style names.
The list may show an "Options..." placard if either of the following is given:
placardSortItems
A list of dictionaries describing font sorting options. The dictionaries
must follow this form:
dict(title=string, callback=callback)
The title must begin with "Sort by" for this to work properly. The callback
must accept one argument: fonts. This will be a list of all fonts in the list.
The callback should return a list of sorted fonts.
placardItems
A list of dictionaries describing arbitrary items to show in the placard.
The dictionaries must follow this form:
dict(title=string, callback=callback)
The callback must accept one argument, sender, which will be the font list.
"""
nsScrollViewClass = DefconAppKitPlacardNSScrollView
def __init__(self, posSize, items,
placardSortItems=[
dict(title="Sort by File Name", callback=fontFileNameSort),
dict(title="Sort by Weight and Width", callback=fontWidthWeightSort),
],
placardItems=[],
**kwargs):
# make default column descriptions if needed
if not kwargs.get("columnDescriptions"):
kwargs["columnDescriptions"] = [fontListFontNameColumnDescription]
kwargs["showColumnTitles"] = False
# set some defaults
kwargs["autohidesScrollers"] = False
# build the internal column reference
self._keyToAttribute = {}
self._orderedListKeys = []
self._wrappedListItems = {}
for columnDescription in kwargs["columnDescriptions"]:
title = columnDescription["title"]
key = columnDescription.get("key", title)
attribute = columnDescription["attribute"]
self._keyToAttribute[key] = attribute
self._orderedListKeys.append(key)
# wrap the items
items = [self._wrapFontForList(font) for font in items]
# start the list
super(FontList, self).__init__(posSize, items, **kwargs)
# set the initial sort mode
self._sortMode = None
self._placardSortOptions = {}
self._placardOptions = {}
# placard
if len(placardSortItems) + len(placardItems):
# build the sort options
if placardSortItems:
self._sortMode = placardSortItems[0]["title"]
for d in placardSortItems:
title = d["title"]
assert title.startswith("Sort by")
self._placardSortOptions[title] = d["callback"]
# build the other options
if placardItems:
for d in placardItems:
self._placardOptions[d["title"]] = d["callback"]
# build
placardW = 65
placardH = 16
self._placard = vanilla.Group((0, 0, placardW, placardH))
# make a default item
item = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_("Options...", None, "")
item.setHidden_(True)
items = [item]
# add the items
items += [d["title"] for d in placardSortItems]
items += [d["title"] for d in placardItems]
self._placard.optionsButton = PlacardPopUpButton((0, 0, placardW, placardH), items,
callback=self._placardCallback, sizeStyle="mini")
button = self._placard.optionsButton.getNSPopUpButton()
button.setTitle_("Options...")
self._nsObject.setPlacard_(self._placard.getNSView())
# update the sort
self._updateSort()
def _breakCycles(self):
for font in self._wrappedListItems.keys():
del self._wrappedListItems[font]
self._unsubscribeFromFont(font)
self._placard = None
self._placardSortOptions = {}
super(FontList, self)._breakCycles()
def setSortMode(self, mode):
"""
Set the sort mode in the popup.
"""
self._sortMode = mode
self._updateSort()
# -------------------
# Placard and Sorting
# -------------------
def _placardCallback(self, sender):
index = sender.get()
title = sender.getItems()[index]
# title item
if title == "Options...":
return
# sorting
elif title.startswith("Sort by"):
self._sortMode = title
self._updateSort()
# other
else:
self._placardOptions[title](self)
sender.set(0)
def _updateSort(self):
if self._sortMode is None:
return
# gather the wrappers and the selection states
oldSelection = self.getSelection()
fontToWrapper = {}
for index, wrapper in enumerate(self._arrayController.content()):
fontToWrapper[wrapper["_font"]] = (wrapper, index in oldSelection)
# sort the fonts
fonts = fontToWrapper.keys()
sortFunction = self._placardSortOptions[self._sortMode]
fonts = sortFunction(fonts)
# clear the list
count = len(self)
for index in range(count):
count -= 1
super(FontList, self).__delitem__(count)
# reset the items
sortedWrappers = []
newSelection = []
for index, font in enumerate(fonts):
wrapper, selected = fontToWrapper[font]
sortedWrappers.append(wrapper)
if selected:
newSelection.append(index)
super(FontList, self).set(sortedWrappers)
# reset the selection
self.setSelection(newSelection)
# -------------
# list behavior
# -------------
def _subscribeToFont(self, font):
font.addObserver(self, "_fontChanged", "Font.Changed")
def _unsubscribeFromFont(self, font):
font.removeObserver(self, "Font.Changed")
def _fontChanged(self, notification):
font = notification.object
if font not in self._wrappedListItems:
return
d = self._wrappedListItems[font]
for key, attr in self._keyToAttribute.items():
if attr == defaultFontIDAttribute:
value = makeDefaultIDString(font)
else:
value = getattr(font, attr)
d[key] = value
# editing
def _listEditCallback(self, sender):
# skip if in an edit loop
if self._listEditChangingFont is not None:
return
if not self.getSelection():
return
columnIndex, rowIndex = sender.getEditedColumnAndRow()
if columnIndex == -1 or rowIndex == -1:
rowIndex = self.getSelection()[0]
editedKey = None
editedAttribute = None
else:
editedKey = self._orderedListKeys[columnIndex]
editedAttribute = self._keyToAttribute[editedKey]
item = super(FontList, self).__getitem__(rowIndex)
font = item["_font"]()
self._listEditChangingAttribute = editedAttribute
self._listEditChangingFont = font
# known attribute. procees it individually.
if editedAttribute is not None:
# set the attribute
value = item[editedKey]
fontValue = getattr(font, editedAttribute)
if value != fontValue:
setattr(font, editedAttribute, value)
# unknown attribute. process all.
else:
for key, attribute in self._keyToAttribute.items():
value = getattr(font, attribute)
if value != item[key]:
setattr(font, attribute, item[key])
# update the dict contents
for key, attribute in self._keyToAttribute.items():
if key == editedKey and attribute == editedAttribute:
continue
value = getattr(font, attribute)
if value != item[key]:
item[key] = value
self._listEditChangingAttribute = None
self._listEditChangingFont = None
# wrapping
def _wrapFontForList(self, font):
changed = False
if font in self._wrappedListItems:
d = self._wrappedListItems[font]
else:
d = NSMutableDictionary.dictionary()
self._subscribeToFont(font)
for key, attribute in self._keyToAttribute.items():
if attribute == defaultFontIDAttribute:
value = makeDefaultIDString(font)
else:
value = getattr(font, attribute)
if not key in d or d.get(key) != value:
d[key] = value
changed = True
d["_font"] = font
if changed:
self._wrappedListItems[font] = d
return d
def _unwrapListItems(self, items=None):
if items is None:
items = super(FontList, self).get()
fonts = [d["_font"] for d in items]
return fonts
# standard API
def __contains__(self, font):
return font in self._wrappedListItems
def __getitem__(self, index):
item = super(FontList, self).__getitem__(index)
font = self._unwrapListItems([item])[0]
return font
def __setitem__(self, index, font):
existing = self[index]
item = self._wrapFontForList(font)
super(FontList, self).__setitem__(index, font)
if not super(FontList, self).__contains__(existing):
otherFont = existing["_font"]
del self._wrappedListItems[otherFont]
self._unsubscribeFromFont(otherFont)
def __delitem__(self, index):
item = super(FontList, self).__getitem__(index)
super(FontList, self).__delitem__(index)
if not super(FontList, self).__contains__(item):
font = item["_font"]
del self._wrappedListItems[font]
self._unsubscribeFromFont(font)
def append(self, font):
item = self._wrapFontForList(font)
super(FontList, self).append(item)
def remove(self, font):
item = self._wrappedListItems[font]
super(FontList, self).remove(item)
if not super(FontList, self).__contains__(item):
font = item["_font"]
del self._wrappedListItems[font]
self._unsubscribeFromFont(font)
def index(self, font):
item = self._wrappedListItems[font]
return super(FontList, self).index(item)
def insert(self, index, font):
item = self._wrapFontForList(font)
super(FontList, self).insert(index, item)
def extend(self, fonts):
items = [self._wrapFontForList(font) for font in fonts]
super(FontList, self).extend(items)
def set(self, fonts):
"""
Set the fonts in the list.
"""
# remove removed wrapped items
removedFonts = set(self._wrappedListItems) - set(fonts)
for font in removedFonts:
del self._wrappedListItems[font]
self._unsubscribeFromFont(font)
# wrap the fonts for the list
wrappedFonts = [self._wrapFontForList(font) for font in fonts]
# set the list
super(FontList, self).set(wrappedFonts)
def get(self):
"""
Get the fonts in the list.
"""
return self._unwrapListItems()
# --------------------------
# Formatters, Cells and Such
# --------------------------
class DirtyStatusIndicatorCell(NSActionCell):
def drawWithFrame_inView_(self, frame, view):
value = self.objectValue()
if not value:
image = _drawDirtyStateImage(value)
image = _drawDirtyStateImage(value)
image.drawAtPoint_fromRect_operation_fraction_(frame.origin, ((0, 0), (13, 17)), NSCompositeSourceOver, 1.0)
def _drawDirtyStateImage(value):
if value:
imageName = "defconAppKitFontListDirtyStateTrue"
else:
imageName = "defconAppKitFontListDirtyStateFalse"
image = NSImage.imageNamed_(imageName)
if image is None:
# make the image
width = 13
height = 17
image = NSImage.alloc().initWithSize_((width, height))
image.lockFocus()
# draw if dirty
if value:
rect = ((2, 4), (9, 9))
path = NSBezierPath.bezierPathWithOvalInRect_(rect)
path.addClip()
# colors
color1 = NSColor.colorWithCalibratedRed_green_blue_alpha_(1.0, 0.1, 0.1, 1)
color2 = NSColor.colorWithCalibratedRed_green_blue_alpha_(0.5, 0.0, 0.0, 1)
# fill
color1.set()
path.fill()
# shadow
try:
gradient = NSGradient.alloc().initWithColors_([color1, color2])
gradient.drawInBezierPath_angle_(path, -90)
except NameError:
pass
# stroke
color2.set()
path.setLineWidth_(2)
path.stroke()
image.unlockFocus()
image.setName_(imageName)
image = NSImage.imageNamed_(imageName)
return image
class FilePathFormatter(NSFormatter):
def stringForObjectValue_(self, obj):
if obj is None or isinstance(obj, NSNull):
return ""
return obj
def attributedStringForObjectValue_withDefaultAttributes_(self, obj, attrs):
if obj is None or isinstance(obj, NSNull):
obj = ""
paragraph = NSMutableParagraphStyle.alloc().init()
paragraph.setLineBreakMode_(NSLineBreakByTruncatingHead)
attrs = dict(attrs)
attrs[NSParagraphStyleAttributeName] = paragraph
return NSAttributedString.alloc().initWithString_attributes_(obj, attrs)
def objectValueForString_(self, string):
return string
def makeDefaultIDString(font):
if font.path is None:
if font.info.familyName is not None:
s = font.info.familyName
else:
s = "Untitled Family"
if font.info.styleName is not None:
s += "-" + font.info.styleName
else:
s += "-Untitled Style"
return s
else:
return os.path.basename(font.path)
# --------------------------
# Common Column Descriptions
# --------------------------
defaultFontIDAttribute = "defconAppKitFontIDString"
fontListFontNameColumnDescription = dict(title="Font", attribute=defaultFontIDAttribute, editable=False)
fontListFontPathColumnDescription = dict(title="Path", attribute="path", editable=False, formatter=FilePathFormatter.alloc().init())
fontListDirtyStateColoumnDescription = dict(title="Dirty", attribute="dirty", cell=DirtyStatusIndicatorCell.alloc().init(), width=13, editable=False)
|
Ye-Yong-Chi/defconAppKit
|
Lib/defconAppKit/controls/fontList.py
|
Python
|
mit
| 17,293
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
下载国家气象局所有气候数据 (VPN连接)
"""
import os
import re
import threading
from ftplib import FTP
from time import sleep
def ftp_get_data(tid, n, step):
# year: 1901 - 2017
# pattern = r"[12][09][0-9]{2}"
start = n
end = n + step - 1
if n == 1:
start = 0
pattern = "[12][09][{}-{}][0-9]".format(start, end)
print(pattern)
match_year = re.compile(pattern)
# 下载路径
# 采集站点编号: ftp://ftp.ncdc.noaa.gov/pub/data/noaa/isd-history.txt
output = "bj"
bj_id = "545110" # USAF
if not os.path.exists(output):
os.mkdir(output)
else:
print("{} is exists".format(output))
while True:
try:
with FTP() as ftp:
ftp.set_debuglevel(2)
ftp.connect("ftp.ncdc.noaa.gov", 21, 60)
ftp.login() # 匿名登录(user=anonymous, passwd='')
ftp.getwelcome()
ftp.cwd("pub/data/noaa/")
ftp.set_debuglevel(0)
files = ftp.nlst()
for name in files:
result = match_year.match(name)
if result is not None:
for gzfile in ftp.nlst(name):
print("[thread-{}] check {}".format(tid, gzfile))
ret = re.search(bj_id, gzfile)
if ret is None:
continue
year_dir = output + "/" + name
if not os.path.exists(year_dir):
os.mkdir(year_dir)
print("[thread-{}]Downloading:{} ".format(tid, gzfile))
outfile = output + "/" + gzfile
if os.path.exists(outfile):
continue
with open(outfile, 'wb') as f:
ftp.retrbinary("RETR " + gzfile, f.write, 2048)
# 下载气候文件格式说明文档
formatdoc = "ish-format-document.pdf"
doc = output + "/" + formatdoc
if not os.path.exists(doc):
with open(doc, "wb") as f:
ftp.retrbinary("RETR " + formatdoc, f.write, 1024)
break
except Exception as err:
print(err)
sleep(3)
if __name__ == "__main__":
# ftp 服务器最大允许2个线程访问
threads = []
step = 5
nloops = range(0, 9, step)
for i in nloops:
t = threading.Thread(target=ftp_get_data, args = (i, i, step))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
|
qrsforever/workspace
|
python/test/noaa/get_beijing_weather.py
|
Python
|
mit
| 2,835
|
import json
import re
import os
import time
from google.appengine.api import memcache
from google.appengine.api import urlfetch
import jinja2
import webapp2
import yaml
from google.appengine.ext import vendor
vendor.add('lib')
import twitter
def rel(path):
return os.path.join(os.path.dirname(__file__),path)
try:
with open(rel("secrets.yaml")) as fh:
config = yaml.load(fh)
except Exception as e:
print e
config = {}
if "twitter" in config:
auth=twitter.OAuth( config['twitter']['access_token'],
config['twitter']['access_token_secret'],
config['twitter']['consumer_key'],
config['twitter']['consumer_secret'])
t = twitter.Twitter(auth=auth)
t_upload = twitter.Twitter(domain='upload.twitter.com',auth=auth)
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(rel("templates")))
def isDev():
# return False
return os.environ.get('SERVER_SOFTWARE','').startswith('Development')
def getJson(url,cache_secs=None):
mc_key = "url:"+url
if cache_secs is not None:
result = memcache.get(mc_key)
if result is not None:
return result
result = json.loads(urlfetch.fetch(url, headers = {'Cache-Control' : 'max-age=0'}).content)
if cache_secs is not None:
memcache.set(key=mc_key,value=result,time=cache_secs)
return result
def getForecast():
url = "https://api.forecast.io/forecast/%s/%s,%s" % (config['forecast_io']['api_key'],config['weather']['lat'],config['weather']['lon'])
return getJson(url,120)
def describeConditions(f):
if f['precipProbability']>config['weather']['precip_threshold']:
if f['precipType'] in ['rain','hail']:
return 'raining'
else:
return 'snowing'
elif f['cloudCover']>config['weather']['cloud_threshold']:
return 'cloudy'
else:
return 'clear'
def getSoxStatus():
if False:
return "sox-champs"
elif False:
return "sox-rainout"
def getSunClass(f):
now = time.time()
if now > f['sunriseTime'] and now < f['sunsetTime']:
return 'daytime'
else:
return 'nighttime'
def buildStatus():
f = getForecast()
weather = describeConditions(f['currently'])
status = {
'beacon': getSoxStatus() or weather,
'weather': weather,
'time': getSunClass(f['daily']['data'][0])
}
memcache.set(key='status',value=status,time=15*60)
return status
def getStatus():
if isDev():
try:
return getJson("https://weather-beacon.appspot.com/v0/status.json",60)
except:
return {'beacon': "clear", 'weather': "clear", 'time': "daytime"}
result = memcache.get('status')
if result is not None:
return result
result = buildStatus()
return result
def tweetStatus(status):
tweet_text = {
'clear': "Steady blue, clear view",
'cloudy': "Flashing blue, clouds due",
'raining': "Steady red, rain ahead",
'snowing': "Flashing red, snow instead",
'sox-rainout': "Flashing red, the Sox game is cancelled",
'sox-champs': "Flashing blue and red, the Boston Red Sox are world champions!"
}[status['beacon']]
t.statuses.update(status=tweet_text)
# with open(rel("tweet_gifs/%s.gif"%status['beacon']),"rb") as fh:
# gif_data = fh.read()
# gif_media_id = t_upload.media.upload(media=gif_data)["media_id_string"]
# t.statuses.update(status=tweet_text, media_ids=gif_media_id)
def RefreshStatus():
old_status = memcache.get('status')
status = buildStatus()
if old_status is None or old_status['beacon'] != status['beacon']:
tweetStatus(status)
class MainPage(webapp2.RequestHandler):
def get(self):
status = getStatus()
if isDev():
status = {k:re.sub(r"[^\w-]","",self.request.get(k,v)) for k,v in status.iteritems()}
template = JINJA_ENVIRONMENT.get_template('index.html')
self.response.write(template.render(status))
class RefreshHandler(webapp2.RequestHandler):
def get(self):
RefreshStatus()
self.response.write("u did the thing")
class StatusJson(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'application/json'
self.response.write(json.dumps(getStatus()))
application = webapp2.WSGIApplication([
('/', MainPage),
('/admin/refresh', RefreshHandler),
('/v0/status.json', StatusJson),
], debug=True)
|
hancock-lighting/hancock.lighting
|
src/backend/weatherbeacon.py
|
Python
|
mit
| 4,640
|
# coding=utf8
class Config(object):
DEBUG = False
TESTING = False
class ProductionConfig(Config):
pass
class DevelopmentConfig(Config):
DEBUG = True
class TestingConfig(Config):
TESTING = True
|
torpedoallen/amazing
|
config.py
|
Python
|
mit
| 218
|
"""View of the notification window."""
from gi.repository import Gtk
from lib.mvc.bases import WindowViewBase
from lib.exception_feedback import add_default_exception_handling
from lib.helpers import getuid
class NotificationWindowView(Gtk.Window, WindowViewBase):
def __init__(self, app, model):
"""Ctor of NotificationWindowView."""
Gtk.Window.__init__(self)
WindowViewBase.__init__(self, app, model)
self.open_usage_event = None
@add_default_exception_handling('Failed to initialize Quota Indicator')
def initialize(self):
"""Create the actual view with all widgets."""
self.connect("delete-event", self.cb_close)
# change size
self.resize(200, 150)
# create vbox
vbox = Gtk.VBox(spacing=0)
# create hbox
hbox = Gtk.HBox(spacing=0)
# add image to hbox
self.image = Gtk.Image()
self.image.set_from_file(self.getIcon())
hbox.pack_start(self.image, True, True, 0)
# create vbox_right
vbox_right = Gtk.VBox(spacing=0)
# add text to vbox_right
self.text_label = Gtk.Label(
' '.join([self.model.text, self.model.precentage]))
vbox_right.pack_start(self.text_label, True, True, 0)
# add vbox_right to hbox
hbox.pack_start(vbox_right, True, True, 25)
# add hbox to vbox
vbox.pack_start(hbox, True, True, 0)
# create hbox_buttons
hbox_buttons = Gtk.HBox(spacing=0)
# add close button to hbox_buttons
button = Gtk.Button()
button.set_label("Show Usage")
button.connect("clicked", self.open_usage, "Show Usage")
hbox_buttons.pack_start(button, True, True, 0)
# add close button to hbox_buttons
button = Gtk.Button()
button.set_label("Ok")
button.connect("clicked", self.cb_close, "Ok")
hbox_buttons.pack_start(button, True, True, 0)
# add hbox_buttons to vbox
vbox.pack_start(hbox_buttons, True, True, 0)
# add vbox to window
self.add(vbox)
@add_default_exception_handling()
def register_open_usage_event(self, func):
"""Register open_usage event."""
self.open_usage_event = func
@add_default_exception_handling()
def open_usage(self, *args):
"""Open usage event."""
self.open_usage_event()
@add_default_exception_handling()
def cb_close(self, w, data):
"""On close event."""
self.hide()
return True
@add_default_exception_handling()
def cb_show(self, w, data):
"""On show event."""
self.update()
self.show_all()
return True
@add_default_exception_handling('Failed to update notification window')
def update(self):
"""Update the notification window view."""
self.image.set_from_file(self.getIcon())
self.text_label.set_markup(
'<span font_weight="bold" foreground="Black" size="large">{text}</span> <span font_weight="bold" color="{color}" background="black" size="large">{precentage}</span>'.
format(
text=self.model.text,
precentage=self.model.precentage,
color=self.model.precentage_color))
self.present()
return True
|
realm01/gnome-quota-indicator
|
lib/mvc/notification_window/view.py
|
Python
|
mit
| 3,331
|
from collections import deque
try:
from lxml.html import fromstring, tostring
import difflib
except ImportError: fromstring = tostring = None
class Element(object):
tag = None
format = '<{tag}{attrs}>{text}{children}</{tag}>'
attr_remap = {}
def __init__(self, *args, **kwargs):
self._root = self._parent = None
self.attrs, self.children = {}, []
attrs, elements, self.text = self._parse_args(args)
if attrs: self.attrs.update(attrs)
if kwargs: self.attrs.update(kwargs)
if elements: self._add_children(elements)
self.__getattribute__ = self.__getattribute
def __call__(self, *args, **kwargs):
attrs, elements, text = self._parse_args(args)
if attrs: self.attrs.update(attrs)
if kwargs: self.attrs.update(kwargs)
if elements: self._add_children(elements)
if text: self.text = text
return self
def __getattribute(self, attr):
elements = [x for x in self.children if x.tag == attr]
if elements: return elements
else: return object.__getattribute__(self, attr)
def __getattr__(self, attr):
if attr.startswith('_') or attr == 'trait_names':
raise AttributeError()
elif '(' in attr:
return self.__getattribute__(attr.split('(')[0])
else:
elements = [e for e in self.children if e.tag == attr]
if elements:
return elements
if attr in self.valid_tags:
e = self.valid_tags[attr]()
self._add_child(e)
return e
raise InvalidElement(attr)
def __setattr__(self, attr, item):
if not attr.startswith('_') and isinstance(item, Element):
for c in self.children:
if c.tag == e.tag:
self.children.remove(c)
self._add_child(item)
self.__dict__[attr] = item
def __getitem__(self, key):
if isinstance(key, int):
return self.children[key]
if isinstance(key, slice):
return self.children[key]
return self.select(key)
def __str__(self):
return self.render(pretty=True, this=True)
def __repr__(self):
name = '`%s` ' % self.text[:20] if self.text else ''
return '<Element.{0} object {1}at {2}>'.format(self.tag, name, hex(id(self)))
def _parse_args(self, args):
attrs, elements, text = {}, [], ''
for arg in args:
if issubclass(type(arg), Element):
elements.append(arg)
elif isinstance(arg, dict):
attrs.update(arg)
elif hasattr(arg, '__iter__'):
for e in arg:
# try to instantiate object in case we are passed a class
elements.append(e())
else:
text = arg
return attrs, elements, text
def _add_child(self, e):
e._parent, e._root = self, self._root or self
e._update_children()
self.__dict__[e.tag] = e
self.children.append(e)
def _add_children(self, elements):
for e in elements:
if e._root and e._root not in self.children:
e = e._root
self._add_child(e)
def _get_children(self):
return sum([self.children] + [e._get_children() for e in self.children], [])
def _update_children(self):
for e in self._get_children():
e._root = self._root or self
def _search_elements(self, path, elements):
if '[' in path:
tag, path = path[:-1].split('[')
elements = [e for e in elements if e.tag == tag]
if not elements:
return []
if '=' in path:
k, v = path.split('=')
if k == 'text':
v = v.lower()
return [e for e in elements if v in e.text.lower()]
return [e for e in elements if e.attrs.get(k) == v]
return [e for e in elements if e.tag == path]
def select(self, query):
if query.startswith('//'):
return self._search_elements(query.strip('/'), self._get_children())
paths = query.strip('/').split('/')
paths = deque(paths)
elements = self.children
while paths:
elements = self._search_elements(paths.popleft(), elements)
if not elements: break
return elements
def render_attrs(self):
attrs = ['='.join([self.attr_remap.get(k, k), '"%s"' % v]) for k,v in self.attrs.iteritems()]
out = ' '.join(attrs)
return ' %s' % out if out else ''
def render(self, pretty=False, this=False):
if self._root and not this:
return self._root.render(pretty)
return prettify(self.render_this()) if pretty and tostring else self.render_this()
def render_children(self):
return ''.join(e.render_this() for e in self.children)
def render_this(self):
return self.format.format(
tag=self.tag,
attrs=self.render_attrs(),
text=self.text,
children=self.render_children(),
)
def clear(self):
tags = set(e.tag for e in self.children)
for tag in tags:
delattr(self, tag)
self.children = []
def pop(self):
if self._parent:
self._parent.children.remove(self)
if [e for e in self._parent.children if e.tag == self.tag]:
try: delattr(self._parent, self.tag)
except AttributeError: pass
for e in self._get_children():
e._root = self
self._root = self._parent = None
return self
@property
def valid_tags(self):
if not hasattr(Element, '_valid_tags'):
tags = __import__('elemental.tags', fromlist=['tags'])
Element._valid_tags = dict((x, getattr(tags, x)) for x in dir(tags) if not x.startswith('_'))
return self._valid_tags
class InvalidElement(Exception):
def __init__(self, value):
self.parameter = value
def __str__(self):
return repr(self.parameter)
def prettify(orig):
"""user lxml's pretty_print feature but only insert whitespace"""
pretty = tostring(fromstring(orig), pretty_print=True)
opcodes = (x for x in difflib.SequenceMatcher(None, orig, pretty, autojunk=False).get_opcodes() if x[0] == 'insert')
offset = 0
for _, a1, a2, b1, b2 in opcodes:
chars = pretty[b1:b2]
# lxml has a habit of fucking shit up so make sure we're only inserting whitespace
if not chars.strip():
orig = ''.join([orig[:a1+offset], chars, orig[a2+offset:]])
offset += b2 - b1
return orig
|
zeekay/elemental
|
elemental/core.py
|
Python
|
mit
| 6,783
|
import json
import time
import os
import libgreader as gr
class JsonArchive(object):
def __init__(self, fn):
self.fn = fn
self.loaded = set()
self.items = []
self.min_time = None
self.max_time = None
if not os.path.exists(fn):
open(fn, "wb").close()
def read(self):
min_time, max_time = time.time(), 0
with open(self.fn, "rb") as f:
for l in f:
item = json.loads(l)
timestamp = item.get('time')
if timestamp is not None and timestamp < min_time:
min_time = time
if timestamp is not None and timestamp < max_time:
max_time = time
self.loaded.add(item['id'])
self.min_time, self.max_time = min_time, max_time
def is_loaded(self, id):
return (id in self.loaded)
def add(self, item):
if item['id'] not in self.loaded:
self.loaded.add(item['id'])
self.items.append(item)
def write(self):
with open(self.fn, "ab") as f:
for item in self.items:
f.write(json.dumps(item))
f.write("\n")
self.items = []
class DirectoryStore(object):
def __init__(self, root):
root = os.path.abspath(os.path.expanduser(root))
try:
os.makedirs(root)
except OSError as e:
if e.errno != 17:
raise e
self.root = root
def open(self, name):
path = os.path.join(self.root, name)
archive = JsonArchive(path)
archive.read()
return archive
class GReaderArchiver(object):
def __init__(self, reader, store):
self.reader = reader
self.store = store
self.subs = None
def _extract_feed(self, feed):
return {
"id": feed.id,
"title": feed.title,
"siteUrl": feed.siteUrl,
"feedUrl": feed.feedUrl,
"categories": [cat.id for cat in feed.categories]
}
def archive_subscriptions(self):
if self.subs is None:
s = self.reader.buildSubscriptionList()
assert s
self.subs = self.reader.getSubscriptionList()
archive = self.store.open('subscriptions')
for feed in self.subs:
ex_feed = self._extract_feed(feed)
archive.add(ex_feed)
archive.write()
def archive_feed(self, feed, chunk_size=20, start_time=None):
archive_name = feed.id.replace('/', '-').replace(':', '-')
archive = self.store.open(archive_name)
continuation = None
while True:
c = self.reader.getFeedContent(
feed, continuation=continuation, loadLimit=chunk_size,
since=start_time)
for item in c['items']:
archive.add(item)
archive.write()
continuation = c.get('continuation')
if continuation is None:
break
def archive_account(self):
self.archive_subscriptions()
for feed in self.subs:
print feed.title
self.archive_feed(feed, chunk_size=100)
|
deactivated/google-jawa
|
google_jawa/__init__.py
|
Python
|
mit
| 3,236
|
import copy
from nylas.client.restful_model_collection import RestfulModelCollection
from nylas.client.restful_models import Scheduler
from nylas.client.scheduler_models import (
SchedulerTimeSlot,
SchedulerBookingConfirmation,
)
class SchedulerRestfulModelCollection(RestfulModelCollection):
def __init__(self, api):
# Make a copy of the API as we need to change the base url for Scheduler calls
scheduler_api = copy.copy(api)
scheduler_api.api_server = "https://api.schedule.nylas.com"
RestfulModelCollection.__init__(self, Scheduler, scheduler_api)
def get_google_availability(self):
return self._execute_provider_availability("google")
def get_office_365_availability(self):
return self._execute_provider_availability("o365")
def get_page_slug(self, slug):
page_response = self.api._get_resource_raw(
self.model_class, slug, extra="info", path="schedule"
).json()
return Scheduler.create(self.api, **page_response)
def get_available_time_slots(self, slug):
response = self.api._get_resource_raw(
self.model_class, slug, extra="timeslots", path="schedule"
).json()
return [
SchedulerTimeSlot.create(self.api, **x) for x in response if x is not None
]
def book_time_slot(self, slug, timeslot):
response = self.api._post_resource(
self.model_class, slug, "timeslots", timeslot.as_json(), path="schedule"
)
return SchedulerBookingConfirmation.create(self.api, **response)
def cancel_booking(self, slug, edit_hash, reason):
return self.api._post_resource(
self.model_class,
slug,
"{}/cancel".format(edit_hash),
{"reason": reason},
path="schedule",
)
def confirm_booking(self, slug, edit_hash):
booking_response = self.api._post_resource(
self.model_class, slug, "{}/confirm".format(edit_hash), {}, path="schedule"
)
return SchedulerBookingConfirmation.create(self.api, **booking_response)
def _execute_provider_availability(self, provider):
return self.api._get_resource_raw(
self.model_class,
None,
extra="availability/{}".format(provider),
path="schedule",
).json()
|
nylas/nylas-python
|
nylas/client/scheduler_restful_model_collection.py
|
Python
|
mit
| 2,374
|
from datetime import datetime, date, timedelta
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.template.loader import render_to_string
from django.views.generic import View
from app.models import Task
def get_day_of_week(date):
""" Returns the day of the week for the date object passed. """
return date.strftime("%A")
def get_full_date(date):
""" Returns a string like 'November 27, 2009' """
return date.strftime("%B %d, %Y")
def get_human(indate):
""" Attempts to return a string that best describes the date relative to the
date today. e.g. Today, Tomorrow, Yesterday, The Day efore Yesterday etc."""
today = date.today()
delta = indate - date.today()
humans = {
-2 : 'The Day before Yesterday',
-1 : 'Yesterday',
0 : 'Today',
1 : 'Tomorrow',
2 : 'The Day after Tomorrow'
}
try:
return humans[delta.days]
except KeyError:
return "Some Day"
def date_from_string(indate):
""" Returns a python datetime.date object from a string formatted as
'2012-11-21' = 'yyyy-mm-dd' """
return datetime.strptime(indate, "%Y-%m-%d").date()
def json_from_task(tasks, singleton=False):
""" Returns a JSON string of the tasks, without any date markers.
Useful if you are returning to an *aware* client, which already knows what
date it is requesting the object for. E.g. in returning data to a client
waiting on a POST request, etc..
If you want the object to be treated as a single element (the JSON will have
a single object), pass the singleton argument as True. On the other hand, if
you want the tasks to be treated as a list, (the JSON will be an array even
if there is only one element) """
if singleton:
return render_to_string('task-single.json', {'task': tasks})
else:
return render_to_string('task-list.json', {'tasks': tasks})
""" Defines a set of exceptions with proper error codes according to my
interpretation of the HTTP standard.
http://en.wikipedia.org/wiki/List_of_HTTP_status_codes
"""
class HttpResponseBadRequest(HttpResponse):
def __init__(self, *args, **kwargs):
super(HttpResponseBadRequest, self).__init__(*args, **kwargs)
self.status_code = 400
class HttpResponseCreated(HttpResponse):
def __init__(self, *args, **kwargs):
super(HttpResponseCreated, self).__init__(*args, **kwargs)
self.status_code = 201
class Home(View):
""" Defines the get/post/update/delete actions for the only view in the app.
Note that even the delete, update methods are routed through a POST request
on part of the client. This isn't very RESTful, but works on older browsers
and can degrade gracefully. """
def get(self, request, *args, **kwargs):
""" Returns a fully formatted application or alternatively a JSON of tasks
if so requested. """
context = {}
all_tasks = Task.objects.all().order_by('-pk')
if 'date' in request.GET:
req_date = date_from_string(request.GET['date'])
else:
req_date = date.today()
context['date'] = {'date': req_date.strftime("%Y-%m-%d"),
'full': get_full_date(req_date),
'day': get_day_of_week(req_date),
'human': get_human(req_date)}
if 'done' in request.GET:
tasks = all_tasks.filter(done=False)
context['date']['human'] = "Unfinished"
context['date']['date'] = "unfinished"
else:
tasks = all_tasks.order_by('-pk').filter(date=req_date)\
.filter(active=True)
context['tasks'] = tasks
if 'type' in request.GET and request.GET['type'] == 'JSON':
return render_to_response('task-list.json', context,
context_instance=RequestContext(request))
return render_to_response('planner.html', context,
context_instance=RequestContext(request))
def post(self, request, *args, **kwargs):
""" Creates, Updates or Deletes tasks as requested by the client.
"""
# Create an entirely new object
if 'pk' not in request.POST:
# Not enough data to do anything
if 'task' not in request.POST:
return HttpResponseBadRequest()
if 'date' in request.POST:
tdate = date_from_string(request.POST['date'])
else:
tdate = date.today()
task = Task.objects.create(date=tdate, task=request.POST['task'],
done=False)
task.save()
return HttpResponseCreated(json_from_task(task, True))
# This is a request for an update
if 'pk' in request.POST:
task = Task.objects.filter(pk=request.POST['pk'])
if not task.exists():
return HttpResponseBadRequest()
task = Task.objects.get(pk=request.POST['pk'])
if 'done' in request.POST:
task.done = True if request.POST['done'] == '1' else False
if 'task' in request.POST:
task.task = request.POST['task']
if 'date' in request.POST:
task.date = date_from_string(request.POST['date'])
if 'defer' in request.POST:
task.date = date.today() + timedelta(days=1) if request.POST['defer'] == '1' else date.today()
if 'active' in request.POST:
task.active = False if request.POST['active'] == '0' else True
task.save()
return HttpResponse(json_from_task(task, True))
|
schatten/planner
|
app/views.py
|
Python
|
mit
| 5,291
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-07-18 15:37
from __future__ import unicode_literals
import django.contrib.auth.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0002_auto_20170718_1517'),
]
operations = [
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username'),
),
]
|
OleaGulea/fin_manager
|
fin_manager/users/migrations/0003_auto_20170718_1837.py
|
Python
|
mit
| 754
|
from django.shortcuts import render, get_object_or_404, redirect
from optboard.settings import MEDIA_ROOT
from collections import OrderedDict
from queue import Queue
import subprocess
import base64
import io
import os
import re
import ast
from .models import Solver, Result, Project
from .forms import SolverForm, ProjectForm
def project_select(request):
""" projectの選択 """
projects = Project.objects.all()
return render(request, 'dashboard/project_select.html', {'projects': projects})
def project_edit(request, edit_project_id=None):
""" projectの編集 """
if edit_project_id:
edit_project = get_object_or_404(Project, pk=edit_project_id)
else:
edit_project = Project()
if request.method == 'POST':
form = ProjectForm(request.POST, instance=edit_project)
if form.is_valid():
edit_project = form.save(commit=False)
edit_project.save()
return redirect('dashboard:project_select')
else:
form = ProjectForm(instance=edit_project)
return render(request, 'dashboard/project_edit.html', dict(form=form, project=None, edit_project=edit_project))
def project_del(request, del_project_id):
del_project = Project.objects.get(pk=del_project_id)
del_project.delete()
return redirect('dashboard:project_select')
def main_page(request, project_id):
""" main page """
select_solver = None
sort_type = 'id'
for key, val in request.GET.items():
if key == 'solver_id':
select_solver_id = request.GET['solver_id']
select_solver = Solver.objects.get(pk=select_solver_id)
elif key == 'sort-type':
sort_type = request.GET['sort-type']
project = Project.objects.get(pk=project_id)
solvers = Solver.objects.all().filter(project_id=project_id).order_by('id')
results = Result.objects.all().filter(project_id=project_id).order_by(sort_type)
if request.method == 'POST':
solver = Solver.objects.get(name=request.POST['solver'])
params = OrderedDict()
for key in solver.get_keys():
params[key] = request.POST[key]
run(project, solver, params, request.POST['comment'])
return render(request, 'dashboard/main_page.html', {'project': project, 'solvers': solvers, 'results': results, 'select_solver': select_solver})
def result_del_all(request, project_id):
project = Project.objects.get(pk=project_id)
results = Result.objects.all().filter(project_id=project_id).order_by('id')
for r in results:
r.delete()
return redirect('dashboard:main', project_id=project.id)
def loopstr2obj(v):
""" loopを表す文字列(float, list, range)をそれらのクラスへ変換 """
p = re.compile('range\(\d+(, *\d+)?(, *\d+)?\)')
if p.match(v) is not None:
return list(eval(v)) # rangeを評価
else:
return ast.literal_eval(v)
def run(project, solver, runlist, comment):
cmds = Queue()
# 拡張子によって実行コマンドの切り替え
root, ext = os.path.splitext(str(solver.solver_file))
if ext == '.py':
cmds.put("python {}/{} ".format(MEDIA_ROOT, solver.solver_file))
else:
cmds.put("./{}/{} ".format(MEDIA_ROOT, solver.solver_file))
for k, v in runlist.items():
v = loopstr2obj(v)
ncmds = Queue()
while not cmds.empty():
cmd = cmds.get()
if isinstance(v, list) or isinstance(v, range):
v = list(v)
for t in v:
ncmds.put(cmd + "{} ".format(t))
else:
cmd += "{} ".format(v)
ncmds.put(cmd)
cmds = ncmds
while not cmds.empty():
cmd = cmds.get()
try:
res = subprocess.check_output(cmd, shell=True)
res = res.split(b'\n')
res = res[-2].strip() # 最終行の値だけ取得
except subprocess.CalledProcessError as e:
print("Error:{}".format(e))
res = "-1.0"
values = cmd.split()
params = []
for i, k in enumerate(runlist.keys()):
params.append((k, values[i + 2]))
result = Result(name="no name", project=project, params=str(params), eval_val=res, solver=solver, elapsed_time=-1.0, comment=comment)
result.save() # id付与のため一回保存
result.name = "result_{0:05d}".format(result.pk)
result.save()
def result_del(request, project_id, del_result_id):
project = Project.objects.get(pk=project_id)
del_result = Result.objects.get(pk=del_result_id)
del_result.delete()
return redirect('dashboard:main', project_id=project.id)
def solver_list(request, project_id):
""" 手法一覧 """
project = Project.objects.get(pk=project_id)
solvers = Solver.objects.all().filter(project_id=project_id).order_by('id')
return render(request, 'dashboard/solver_list.html', {'project': project, 'solvers': solvers})
def solver_edit(request, project_id, solver_id=None):
""" 手法の編集 """
project = Project.objects.get(pk=project_id)
if solver_id:
solver = get_object_or_404(Solver, pk=solver_id)
else:
solver = Solver()
if request.method == 'POST':
form = SolverForm(request.POST, request.FILES, instance=solver)
if form.is_valid():
solver = form.save(commit=False)
solver.save()
return redirect('dashboard:solver_list', project_id=project.id)
else:
form = SolverForm(instance=solver)
return render(request, 'dashboard/solver_edit.html', dict(project=project, form=form, solver_id=solver_id))
def solver_del(request, project_id, solver_id):
project = Project.objects.get(pk=project_id)
return render(request, 'dashboard/main_page.html', {'project': project})
def analysis1D(request, project_id):
project = Project.objects.get(pk=project_id)
solvers = Solver.objects.all().filter(project_id=project_id).order_by('id')
results = Result.objects.all().filter(project_id=project_id).order_by('id')
select_solver = None
select_param = None
for key, val in request.GET.items():
if key == 'solver_id':
select_solver_id = request.GET['solver_id']
select_solver = Solver.objects.get(pk=select_solver_id)
elif key == 'select_param':
select_param = request.GET['select_param']
# 選択されたパラメータと評価値とのペアを格納していく
x = []
y = []
for r in results:
params = ast.literal_eval(r.params)
for p in params:
if p[0] == select_param:
x.append(float(p[1]))
y.append(float(r.eval_val))
graphic = make_plot(x, y, select_param)
return render(request, 'dashboard/1Dplot.html', {'project': project, 'graphic': graphic, 'solvers': solvers, 'select_solver': select_solver})
def make_plot(x, y, xlabel):
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
fig = Figure()
ax = fig.add_subplot(111)
ax.plot(x, y, 'o')
ax.set_xlabel(xlabel)
ax.set_ylabel('eval_val')
canvas = FigureCanvas(fig)
buf = io.BytesIO()
canvas.print_png(buf)
graphic = buf.getvalue()
graphic = base64.b64encode(graphic)
buf.close()
return graphic
def analysis2D(request, project_id):
import sys
import numpy as np
project = Project.objects.get(pk=project_id)
solvers = Solver.objects.all().filter(project_id=project_id).order_by('id')
graphic = None
select_solver = None
select_param1 = None
select_param2 = None
IS_PARAM = False
for key, val in request.GET.items():
if key == 'solver_id':
select_solver_id = request.GET['solver_id']
select_solver = Solver.objects.get(pk=select_solver_id)
elif key == 'select_param1':
select_param1 = request.GET['select_param1']
IS_PARAM = True
elif key == 'select_param2':
select_param2 = request.GET['select_param2']
if IS_PARAM:
results = Result.objects.all().filter(project_id=project_id, solver=select_solver).order_by('id')
# 選択されたパラメータの値の最大値と最小値を取得する
xmin = sys.float_info.max
xmax = sys.float_info.min
ymin = sys.float_info.max
ymax = sys.float_info.min
for r in results:
params = ast.literal_eval(r.params)
for p in params:
if p[0] == select_param1:
xmin = min(xmin, float(p[1]))
xmax = max(xmax, float(p[1]))
elif p[0] == select_param2:
ymin = min(ymin, float(p[1]))
ymax = max(ymax, float(p[1]))
# 最大値と最小値をもとにnumpyの配列を生成
delta = 1.0
xn = int((xmax - xmin) * delta)
yn = int((ymax - ymin) * delta)
data = np.zeros((xn + 1, yn + 1))
# 配列への評価値の代入
for r in results:
params = ast.literal_eval(r.params)
for p in params:
if p[0] == select_param1:
x = int((float(p[1]) - xmin) * delta)
elif p[0] == select_param2:
y = int((float(p[1]) - ymin) * delta)
data[x][y] = r.eval_val
graphic = make_heatmap(data, select_param1, select_param2)
return render(request, 'dashboard/2Dplot.html', {'project': project, 'graphic': graphic, 'solvers': solvers, 'select_solver': select_solver})
def make_heatmap(data, xlabel, ylabel):
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
import numpy as np
fig = Figure()
ax = fig.add_subplot(111)
heatmap = ax.pcolor(data, cmap=plt.cm.Blues)
fig.colorbar(heatmap)
ax.set_xticks(np.arange(data.shape[0]) + 0.5, minor=False)
ax.set_yticks(np.arange(data.shape[1]) + 0.5, minor=False)
ax.invert_yaxis()
ax.xaxis.tick_top()
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
canvas = FigureCanvas(fig)
buf = io.BytesIO()
canvas.print_png(buf)
graphic = buf.getvalue()
graphic = base64.b64encode(graphic)
buf.close()
return graphic
|
tanutarou/OptBoard
|
dashboard/views.py
|
Python
|
mit
| 10,474
|
import random
from transitions import *
class Markov(object):
def __init__(self, agent_aux):
self.agent = agent_aux
self.markov = TransitionMatrix(self.agent.markov_matrix)
def runStep(self, markov_matrix):
self.markov.matrix = markov_matrix
currentState = self.agent.state
numberCurrentState = False
self.n = 0
for state in self.agent.machine.states:
if state == currentState:
break
self.n = self.n + 1
numberCurrentState = self.n
numberNextState = self.markov.getNextState(numberCurrentState)
if numberNextState == False:
return
listKeyStates = list(self.agent.machine.states.keys())
nextState = self.agent.machine.states[listKeyStates[numberNextState]]
for n in self.agent.triggers.keys():
if n == nextState.name:
trigger = self.agent.triggers[n]
cast = 'self.agent.'+ trigger
eval(cast)
class TransitionMatrix(object):
def __init__(self, matrix):
self.matrix = matrix
def getNextState(self, NumberCurrentState):
vector = self.matrix[NumberCurrentState]
randomNumber = random.randrange(0,101)
self.n = -1
value_aux = 0
for pos in vector:
self.n = self.n + 1
value_aux = value_aux + pos
if value_aux > randomNumber:
return self.n
return False
|
gsi-upm/soba
|
projects/oldProyects/EWESim/behaviourMarkov.py
|
Python
|
mit
| 1,237
|
"""
The DNSLookupCheckCollector does a DNS lookup and returns a check
##### Dependencies
* socket
Example config file DNSLookupCheckCollector.conf
```
enabled = True
ttl = 150
dnsAddressList = www.google.com, www.yahoo.com
```
"""
from collections import defaultdict
import diamond.collector
try:
import socket
except ImportError:
socket = None
self.log.error('Unable to import module socket')
try:
import netuitive
except ImportError:
netuitive = None
class DNSLookupCheckCollector(diamond.collector.Collector):
def __init__(self, *args, **kwargs):
super(DNSLookupCheckCollector, self).__init__(*args, **kwargs)
self.hostname = self.get_hostname()
self.ttl = self.config['ttl']
if not netuitive:
self.log.error('netuitive import failed. dnslookupcheck disabled')
self.enabled = False
return
try:
self.version = self._get_version()
self.api = netuitive.Client(self.config['netuitive_url'],
self.config['netuitive_api_key'], self.version)
except Exception as e:
self.log.debug(e)
def get_default_config_help(self):
config_help = super(
DNSLookupCheckCollector, self).get_default_config_help()
config_help.update({
'ttl': 'number of seconds until Metricly should expire the check',
'dnsAddressList':
'array of domains to lookup (ex: www.google.com, www.yahoo.com)',
})
return config_help
def get_default_config(self):
default_config = super(
DNSLookupCheckCollector, self).get_default_config()
default_config['ttl'] = 150
default_config['dnsAddressList'] = ['google.com']
return default_config
def collect(self):
"""
Overrides the Collector.collect method
"""
#check to see if the dns name returns an IP address
for dnsAddress in self.config['dnsAddressList']:
try:
addr = socket.gethostbyname(dnsAddress)
check = netuitive.Check(dnsAddress, self.hostname, self.ttl)
self.api.post_check(check)
except socket.gaierror:
self.log.error ('cannot resolve hostname')
|
Netuitive/netuitive-diamond
|
src/collectors/dnslookupcheck/dnslookupcheck.py
|
Python
|
mit
| 2,280
|
from __future__ import absolute_import
import logging
from math import ceil
import numpy as np
import subprocess
from . import liblinear_utils
import local_pyutils
ONE_BASED = 0 # until we don't support the MATLAB version
# if not ONE_BASED:
# raise NotImplementedError('ZERO_BASED not supported.')
def create_all_shuffled_files(infile, outfiles_train, outfiles_permutation, num_shuffles,
shuffle_size, ignore_y_indices=True):
""" Take a .train file and permute it according to the shuffling parameters.
Note if ignore_y_indices=True, this will simply shuffle the lines of the files. Otherwise,
we have to do some more work to shuffle lines with the same number together, and to shuffle
them in groups.
"""
logging.info('Generating shuffles')
if ignore_y_indices:
assert shuffle_size == 1, NotImplementedError
for shuffle_index in range(num_shuffles):
logging.info('Shuffling file lines {}/{}'.format(shuffle_index+1, num_shuffles))
subprocess.check_call(' '.join(['shuf', infile, '>', outfiles_train[shuffle_index]]),
shell=True)
subprocess.check_call(' '.join(['awk \'{print $1}\'', outfiles_train[
shuffle_index], '>', outfiles_permutation[shuffle_index]]), shell=True)
else:
logging.WARNING('shuffle_size > 1 ({}). Will take longer to shuffle.'.format(shuffle_size))
# Get first (non-)shuffle
X, y = liblinear_utils.read(infile, zero_based=not ONE_BASED)
randomized_indices = [idx + ONE_BASED for idx in range(int(max(y)))]
# Save original file
subprocess.check_call(['cp', infile, outfiles_train[0]])
local_pyutils.save_array(randomized_indices, outfiles_permutation[0])
# TODO(allie): make train files zero-based; generate them with the python library rather than
# the MATLAB library.
for shuffle_index in [idx for idx in range(num_shuffles)]:
# shuffle the frames
logging.info('Shuffling file lines {}/{}'.format(shuffle_index+1, num_shuffles))
create_shuffle(X, y,
outfiles_train[shuffle_index],
outfiles_permutation[shuffle_index],
shuffle_size)
def create_shuffle(X, y, outfile_train, outfile_permutation, shuffle_size):
# shuffle the frames
randomized_indices, _ = block_shuffle(y, shuffle_size)
liblinear_utils.write(X[randomized_indices,:], y[randomized_indices],
outfile_train, zero_based=not ONE_BASED)
# Save indices for debugging
local_pyutils.save_array(randomized_indices, outfile_permutation)
def block_shuffle(indices, block_size, one_based=ONE_BASED):
# TODO(allie): Handle zero-indexing here too (currently assuming first frame index = 1)
"""
Shuffles indices according to 'blocks' of size block_size. All 'blocks' start with index 1.
inputs:
indices (array-like, shape (N,)): integers to shuffle
block_size (int): 'chunk' size: how many consecutive-valued indices to shuffle together
one_based: If False, assumes 0 must be the starting index. Else, assumes 1 must be.
returns: (shuffled_indices, block_matrix):
permutation (list, shape (N,)): permutation for the indices array.
Contains all values in (0,N). permutation[indices] gives a permuted array.
indices_to_blocks (list, (block_size,M)) where ceil(block_size*M)=N:
indices_to_blocks_matrix[:,2] is the list of indices assigned to (shuffled) block #3.
Example:
shuffle.block_shuffle([1,1,1,2,2,2,3,3,4,5,6,1,1], 2)
shuffled_indices = [9, 10, 6, 7, 8, 0, 1, 2, 11, 12, 3, 4, 5]
indices_to_blocks = [[1,3,5],
[2,4,6]]
"""
one_based = int(bool(one_based)) # Handles booleans / non-0/1's
# Figure out how many blocks we need
max_index = max(indices)
num_blocks = int(ceil(float(max_index + 1 - one_based) / block_size))
# Assign each index to a block
unique_indices = np.concatenate([
np.arange(one_based, max_index + 1),
local_pyutils.nans((int(block_size * num_blocks - (max_index + 1 - one_based)),))
])
indices_to_blocks = np.reshape(unique_indices, (block_size, num_blocks), order="F")
# Make list of shuffled index values
shuffled_block_indices = np.random.permutation(num_blocks)
shuffled_unique_indices = np.reshape(indices_to_blocks[:, shuffled_block_indices],
(1, -1), order="F")
shuffled_unique_indices = shuffled_unique_indices[~np.isnan(shuffled_unique_indices)]
# Find locations of index values in indices.
permutation = list([index for unique_index in shuffled_unique_indices for index in
np.where(indices == unique_index)[0]])
return permutation, indices_to_blocks
|
alliedel/anomalyframework_python
|
anomalyframework/shuffle.py
|
Python
|
mit
| 4,986
|
import settings as s
import decimal
from csv import DictReader, DictWriter
from sys import exit
import numpy as np
from sklearn import metrics
import matplotlib.pyplot as plt
DEBUG = s.DEBUG
def get_name_list(poi_dataset):
"""
Returns list of names of POIs in order of appearance in dataset
"""
assert not isinstance(poi_dataset, basestring), 'POI dataset is not list or tuple'
poi_names = list()
for poi in poi_dataset:
poi_names.append(poi[s.NAME_KEY])
return(poi_names)
def half_even(num_val, n_places=s.DEFAULT_ROUNDING):
"""
ROUND_HALF_EVEN a point to n_places decimal places
"""
if not 0 < n_places <= 8:
print "Can only round to 1-8 decimal places. Rounding to default"
n_places = s.DEFAULT_ROUNDING
try:
rounding = str(10**int(-1 * n_places))
x = float(decimal.Decimal("%s" % num_val).quantize(decimal.Decimal(rounding),
rounding=decimal.ROUND_HALF_EVEN))
except ValueError as e:
e = "Could not round %r" % num_val
print e
raise
return x
def import_poi_csv(source_csv, lat_col=s.LAT_KEY,
lng_col=s.LNG_KEY, rounding=s.DEFAULT_ROUNDING):
""""
Reads in CSV, converting each row to a POI dictionary and attempting
to half-even round lat and lng to rounding level.
Appends to a list and returns the list for iterable in-memory processing
TODO make this more generic on column names with a lambda function
"""
if str(source_csv)[-4:] != '.csv':
print "import_poi_csv: %s is not a csv file" % source_csv
exit(1)
poi_dataset = list()
poi_count = 0
with open(source_csv) as source:
data = DictReader(source)
for row in data:
try:
row[lat_col] = half_even(row[lat_col], rounding)
row[lng_col] = half_even(row[lng_col], rounding)
except:
print "No %s, %s entries in data file %s" % (lat_col, lng_col,
source_csv)
exit(1)
poi_dataset.append(row)
poi_count += 1
if DEBUG:
print "Imported %d POIs successfully from %s" % (poi_count, source_csv)
return poi_dataset
def print_dbscan_metrics(X, n_clusters_, labels_true, labels):
"""
Print sklearn metrics on DBSCAN to screen.
"""
print "\nModel Performance and Metrics"
print "="*80
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels))
def add_zoas_to_poi_dataset(dbscan_labels, poi_dataset):
"""
Modifies a list of POI dictionaries to add ZOA values obtained via DBSCAN.
Returns a single unified dictionary for easy iteration
"""
poi_dataset_with_zoas = list()
for zoa, poi in zip(dbscan_labels, poi_dataset):
poi[s.ZOA_KEY] = zoa
poi_dataset_with_zoas.append(poi)
return poi_dataset_with_zoas
def plot_results(labels, X, core_samples_mask):
"""
Generates a matplotlib window of cluster.
POIs that make up clusters have large, colored circles
Color is driven by Spectral distribution of colors across number of clusters
POIs that are noise (i.e, outside clusters) are small black dots.
"""
print "\nPlotting Results with MatPlotLib"
print "="*80
# Black removed and is used for noise instead.
unique_labels = set(labels)
colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
fig = plt.figure()
ax = fig.add_subplot(111)
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = 'k'
class_member_mask = (labels == k)
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
plt.title('Estimated number of clusters: %d' % len(unique_labels))
plt.show()
def output_results(poi_result_set, screen=True, outfile=None):
"""
Outputs unified DBSCAN results to screen or csv file.
The screen only shows major data elements. The CSV file has the
complete dictionary (i.e., base dictionay plus ZOA attributes for each POI)
"""
assert not isinstance(poi_result_set, basestring), 'POI result set is not list or tuple'
if screen:
print "\nZOAs by POI"
print "="*80,
for poi in poi_result_set:
print "\nLocation:\t%s" % poi[s.NAME_KEY]
print "Address:\t%s" % poi[s.ADDR_KEY]
print "Neighborhood:\t%s" % poi[s.NBHD_KEY]
print "Coordinates:\t%.4f, %.4f" % (poi[s.LAT_KEY], poi[s.LNG_KEY])
print "ZOA ID:\t\t%d" % poi[s.ZOA_KEY]
if outfile:
assert isinstance (outfile, str), "Outfile name is not a string: %r" % name
if outfile[-4:] != '.csv': outfile += '.csv'
with open(outfile, 'wb') as f:
target = DictWriter(f, poi_result_set[0].keys())
target.writeheader()
target.writerows(poi_result_set)
print "\nWrote output to %s.\n" % outfile
|
JimHaughwout/GADM_DBSCAN
|
utils.py
|
Python
|
mit
| 5,820
|
########################################################################
# Helper functions
########################################################################
import os
import json
def save_wardata(wardata):
if wardata['state'] != 'notInWar':
war_id = "{0}{1}".format(wardata['clan']['tag'][1:],
wardata['preparationStartTime'])
if not os.path.exists('warlog'):
os.mkdir('warlog')
path = os.path.join('warlog', war_id)
json.dump(wardata,
open(path, 'w', encoding='utf-8'), ensure_ascii=False)
def save_latest_data(wardata, monitor):
if wardata:
save_wardata(wardata)
json.dump(wardata,
open('latest_downloaded_wardata.json',
'w',
encoding='utf-8'),
ensure_ascii=False)
########################################################################
# DB helper classes (mainly to faciliate serveress)
########################################################################
class SimpleKVDB(object):
def __init__(self, db):
self._db = db
def __contains__(self, key):
return key in self._db
def __getitem__(self, key):
return self._db[key]
def __setitem__(self, key, value):
self._db[key] = value
self._db.sync()
|
mehdisadeghi/clashogram
|
clashogram/utils.py
|
Python
|
mit
| 1,378
|
from .base import (rpartial, BaseAllocateCase, BaseSplitCase, BasePackCase,
BaseGetBandCase, BaseMergeCase, BaseUnpackCase)
from .pillow import Image, PillowTestCase
class AllocateCase(BaseAllocateCase):
def runner(self):
Image.new(self.mode, self.size)
class UnpackCase(BaseUnpackCase, PillowTestCase):
def create_test_data(self):
im = super().create_test_data()[0]
return [im, im.tobytes()]
def runner(self, im, data):
im.frombytes(data)
class PackCase(BasePackCase, PillowTestCase):
def runner(self, im):
im.tobytes()
class SplitCase(BaseSplitCase, PillowTestCase):
def runner(self, im):
im.split()
class GetBandCase(BaseGetBandCase, PillowTestCase):
def runner(self, im):
self.getchannel(im, self.band)
class MergeCase(BaseMergeCase, PillowTestCase):
def runner(self, bands):
Image.merge(self.mode, bands)
cases = [
rpartial(AllocateCase, 'L'),
rpartial(AllocateCase, 'LA'),
rpartial(AllocateCase, 'RGB'),
rpartial(AllocateCase, 'RGBA'),
rpartial(UnpackCase, 'L'),
rpartial(UnpackCase, 'LA'),
rpartial(UnpackCase, 'RGB'),
rpartial(UnpackCase, 'RGBA'),
rpartial(PackCase, 'L'),
rpartial(PackCase, 'LA'),
rpartial(PackCase, 'RGB'),
rpartial(PackCase, 'RGBA'),
rpartial(SplitCase, 'LA'),
rpartial(SplitCase, 'RGB'),
rpartial(SplitCase, 'RGBA'),
rpartial(GetBandCase, 'RGB', 0),
rpartial(GetBandCase, 'RGBA', 3),
rpartial(MergeCase, 'LA'),
rpartial(MergeCase, 'RGB'),
rpartial(MergeCase, 'RGBA'),
]
|
python-pillow/pillow-perf
|
testsuite/cases/pillow_allocate.py
|
Python
|
mit
| 1,605
|
import os, sys
cmdstring = "dmd "
currentDirName = os.path.basename( os.getcwd() )
if sys.platform.startswith( "win" ):
slash = "\\"
outfileSuffix = ".exe"
else:
slash = "/"
outfileSuffix = ".bin"
# Recursively add all files in "./" and below for compilation.
for dirpath, dirlist, filelist in os.walk( "." ):
for file in filelist:
if file.endswith( ".d" ):
cmdstring += dirpath + slash + file + " "
# Append any dmd options.
for option in sys.argv[ 1: ]:
if option.startswith( "-" ):
cmdstring += option + " "
if option == "-lib":
outfileSuffix = ".lib"
# Name output file as the name of our current working directory,
outfileName = currentDirName + outfileSuffix
cmdstring += "-of" + outfileName
# Run the compilation, and remove temp files.
print( cmdstring )
os.system( cmdstring )
tempfile = currentDirName + ".obj"
if os.path.exists( tempfile ):
os.remove( tempfile )
# Move the output file to "/bin".
outfileLocation = "." + slash + "bin" + slash
if not os.path.exists( outfileLocation ):
os.mkdir( outfileLocation )
os.replace( outfileName, outfileLocation + outfileName )
|
jackoblades/D_MarkovChain
|
make.py
|
Python
|
mit
| 1,113
|
# views.py - Django views for notification management web interface
#
# Copyright (c) 2014, 2015, 2017 Jim Fenton
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from django import forms
from django.contrib.auth.decorators import login_required
from django.contrib.auth import logout
from django.core.exceptions import SuspiciousOperation
from django.db import models
from django.forms import ModelForm, Textarea
from django.forms.models import modelformset_factory
from django.http import HttpResponse, HttpResponseRedirect
from django.http import Http404
from django.shortcuts import get_object_or_404, render
from django.template import loader
from django.utils import timezone
from django.views.decorators.csrf import csrf_exempt
from mgmt.models import Authorization, Priority, Notification, Userext, Method, Rule
import uuid
# TODO: Need a much better place to specify this!
NOTIF_HOST = "altmode.net:5342"
class SettingsForm(ModelForm):
class Meta:
model = Userext
widgets = {
'twilio_token': forms.PasswordInput(render_value=True),
}
fields = ['email_username', 'email_server', 'email_port', 'email_authentication', 'email_security', 'twilio_sid', 'twilio_token', 'twilio_from']
class MethodForm(ModelForm):
class Meta:
model = Method
fields = ['active', 'name', 'type', 'address', 'preamble',]
@login_required
def auth(request):
authorization_list = Authorization.objects.filter(user=request.user, deleted=False).order_by('description')
template = loader.get_template('mgmt/auth.html')
return HttpResponse(template.render({
'page': 'auth',
'authorization_list': authorization_list,
'priority_choices': Priority.PRIORITY_CHOICES
}, request))
@login_required
def authdetail(request, address):
try:
authorization = Authorization.objects.get(address=address)
except Authorization.DoesNotExist:
raise Http404
else:
if (request.method == "POST"):
authorization = authupdate(request, address)
if (authorization == None): #Means authorization was deleted
return HttpResponseRedirect("/auth")
return HttpResponseRedirect(authorization.address)
return render(request, 'mgmt/authdetail.html', {
'authorization': authorization,
'priority_choices': Priority.PRIORITY_CHOICES })
@login_required
def authupdate(request, address):
a = get_object_or_404(Authorization, address=address)
if 'active' in request.POST:
a.active = True
else:
a.active = False
if 'Delete' in request.POST:
a.deleted = True
else:
a.description = request.POST['description']
a.maxpri = request.POST['maxpri']
a.save()
if a.deleted:
return None
else:
return a
@login_required
@csrf_exempt
def authorize(request):
name = ""
domain = ""
maxpri = 3
redirect = ""
if (request.method == "POST"):
if "name" in request.POST:
name = request.POST['name']
if "maxpri" in request.POST:
maxpri = request.POST['maxpri']
if "domain" not in request.POST:
raise SuspiciousOperation
domain = request.POST['domain']
if "redirect" not in request.POST:
raise SuspiciousOperation
redirect = request.POST['redirect']
return render(request,'mgmt/authnew.html', {
'page': 'auth',
'name': name,
'domain': domain,
'maxpri': int(maxpri),
'redirect': redirect,
'priority_choices': Priority.PRIORITY_CHOICES })
@login_required
def authcreate(request):
try:
name = request.POST['description']
domain = request.POST['domain']
maxpri = request.POST['maxpri']
except (KeyError):
raise SuspiciousOperation("Missing POST parameter")
else:
if name=="":
name="[unnamed]"
a = Authorization(user=request.user,
address=str(uuid.uuid4()),
domain=domain,
description=name,
maxpri=maxpri,
active=True)
a.save()
redirect = ""
if "redirect" in request.POST:
redirect = request.POST['redirect']
if redirect == "":
return HttpResponseRedirect(a.address)
return HttpResponseRedirect(request.POST['redirect']+"?addr="+a.address+"@"+NOTIF_HOST+"&maxpri="+maxpri)
def home(request):
template = loader.get_template('mgmt/home.html')
return HttpResponse(template.render(request))
def dologout(request):
logout(request)
return home(request)
@login_required
def notif(request):
notification_list = Notification.objects.exclude(read=True).order_by('priority')
# above will add .filter(username=request.user.username)
template = loader.get_template('mgmt/notif.html')
return HttpResponse(template.render({
'page': 'notif',
'notification_list': notification_list,
'priority_choices': Priority.PRIORITY_CHOICES
}, request))
@login_required
def notifall(request):
notification_list = Notification.objects.order_by('priority')
# above will add .filter(username=request.user.username)
template = loader.get_template('mgmt/notifall.html')
return HttpResponse(template.render({
'page': 'notif',
'notification_list': notification_list,
'priority_choices': Priority.PRIORITY_CHOICES
}, request))
@login_required
def notifdetail(request, notID):
try:
notification = Notification.objects.get(notid=notID)
except Notification.DoesNotExist:
raise Http404
else:
if (request.method == "POST"):
if 'Unread' in request.POST:
notification.read = False
notification.save()
if 'Delete' in request.POST:
notification.deleted = True
notification.save()
return HttpResponseRedirect("/notif")
notification.read = True
notification.save()
return render(request, 'mgmt/notifdetail.html', {
'page': '',
'notification': notification,
'priority_choices': Priority.PRIORITY_CHOICES })
@login_required
def settings(request):
try:
settings = Userext.objects.get(user=request.user)
except Userext.DoesNotExist:
# Create a user settings record with default values
settings = Userext(user=request.user)
settings.save()
else:
if (request.method == "POST"):
form = SettingsForm(request.POST)
if form.is_valid():
settings.email_username = form.cleaned_data['email_username']
settings.email_server = form.cleaned_data['email_server']
settings.email_port = form.cleaned_data['email_port']
settings.email_authentication = form.cleaned_data['email_authentication']
settings.email_security = form.cleaned_data['email_security']
settings.twilio_sid = form.cleaned_data['twilio_sid']
settings.twilio_token = form.cleaned_data['twilio_token']
settings.twilio_from = form.cleaned_data['twilio_from']
settings.save()
return HttpResponseRedirect("/settings")
form = SettingsForm(instance=settings)
return render(request, 'mgmt/settings.html', { 'page': 'settings', 'form': form, 'settings': settings })
@login_required
def methods(request):
MethodFormSet = modelformset_factory(Method, extra=1, exclude=('user',), can_delete = True)
if (request.method == "POST"):
formset = MethodFormSet(request.POST)
if formset.is_valid():
for form in formset:
if 'id' in form.cleaned_data:
m = form.cleaned_data['id']
if (m==None):
m = Method(user=request.user)
m.active = form.cleaned_data['active']
m.name = form.cleaned_data['name']
m.type = form.cleaned_data['type']
m.address = form.cleaned_data['address']
if 'preamble' in form.cleaned_data:
m.preamble = form.cleaned_data['preamble']
else:
m.preamble = ""
m.save()
return HttpResponseRedirect("methods")
else:
formset = MethodFormSet(queryset=Method.objects.filter(user=request.user))
return render(request, 'mgmt/methods.html', { 'page': 'methods', 'formset': formset })
@login_required
def rules(request):
RuleFormSet = modelformset_factory(Rule, extra=1, exclude=('user',), can_delete = True)
if (request.method == "POST"):
formset = RuleFormSet(request.POST, initial=[
{'user': request.user,}])
if formset.is_valid():
for form in formset:
if 'id' in form.cleaned_data:
r = form.cleaned_data['id']
if (r==None):
r = Rule(user=request.user)
r.active = form.cleaned_data['active']
r.priority = form.cleaned_data['priority']
if r.priority == '':
r.priority = 0
r.domain = form.cleaned_data['domain']
r.method = form.cleaned_data['method']
r.save()
return HttpResponseRedirect("rules")
else:
formset = RuleFormSet(queryset=Rule.objects.filter(user=request.user))
return render(request, 'mgmt/rules.html', { 'page': 'rules', 'formset': formset })
|
jimfenton/notif-mgmt
|
mgmt/views.py
|
Python
|
mit
| 10,845
|