repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
bgroveben/python3_machine_learning_projects | oreilly_GANs_for_beginners/oreilly_GANs_for_beginners/introduction_to_ml_with_python/mglearn/mglearn/plot_knn_regression.py | Python | mit | 1,285 | 0.000778 | import numpy as np
import matplotlib.pyplot as plt
from sklearn.neighbors import KNeighborsRegressor
from sklearn.metrics import euclidean_distances
from .datasets import make_wave
from .plot_helpers import cm3
def plot_knn_regression(n_neighbors=1):
X, y = make_wave(n_samples=40)
X_test = np.array([[-1.5], [0.9], [1.5]])
dist = euclidean_distances(X, X_test)
closest = np.argsort(dist, axis=0)
plt.figure(figsize=(10, 6))
reg = KNeighborsRegressor(n_neighbors=n_neighbors).fit(X, y)
y_pred = reg.predict(X_test)
for x, y_, neighbors in zip(X_test, y_pred, closest.T):
for neighbor in neighbors[:n_neighbors]:
plt.arrow(x[0], y_, X[ne | ighbor, 0] - x[0], y[neighbor] - y_,
head_width=0, fc='k', ec='k')
train, = plt.plot(X, y, 'o', c=cm3(0))
test, = plt.plot(X_test, -3 * np.ones(len(X_test)), '*', c=cm3(2),
markersize=20)
pred, = plt.plot(X_test, y_pred, '*', c=cm3(0), markersize=20)
plt.vlines(X_test, -3.1, 3.1, linestyle="--")
plt.legend([train, test, pred],
["training data/target", "test data", "test prediction"],
ncol=3, loc=(.1, 1.02 | 5))
plt.ylim(-3.1, 3.1)
plt.xlabel("Feature")
plt.ylabel("Target")
|
songmonit/CTTMSONLINE | addons/account/report/account_aged_partner_balance.py | Python | agpl-3.0 | 21,185 | 0.005806 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import osv
from openerp.report import report_sxw
from common_report_header import common_report_header
class aged_trial_report(report_sxw.rml_parse, common_report_header):
def __init__(self, cr, uid, name, context):
super(aged_trial_report, self).__init__(cr, uid, name, context=context)
self.total_account = [ | ]
self.localcontext.update({
'time': time,
'get_lines_with_out_partner': self._get_lines_with_out_partner,
'get_lines': self._get_lines,
'get_total': self._get_total,
'get_direction': self._get_direction,
'get_for_period': self._get_for_period,
'get_company': self._get_company,
'get_currency': self._get_currency,
| 'get_partners':self._get_partners,
'get_account': self._get_account,
'get_fiscalyear': self._get_fiscalyear,
'get_target_move': self._get_target_move,
})
def set_context(self, objects, data, ids, report_type=None):
obj_move = self.pool.get('account.move.line')
ctx = data['form'].get('used_context', {})
ctx.update({'fiscalyear': False, 'all_fiscalyear': True})
self.query = obj_move._query_get(self.cr, self.uid, obj='l', context=ctx)
self.direction_selection = data['form'].get('direction_selection', 'past')
self.target_move = data['form'].get('target_move', 'all')
self.date_from = data['form'].get('date_from', time.strftime('%Y-%m-%d'))
if (data['form']['result_selection'] == 'customer' ):
self.ACCOUNT_TYPE = ['receivable']
elif (data['form']['result_selection'] == 'supplier'):
self.ACCOUNT_TYPE = ['payable']
else:
self.ACCOUNT_TYPE = ['payable','receivable']
return super(aged_trial_report, self).set_context(objects, data, ids, report_type=report_type)
def _get_lines(self, form):
res = []
move_state = ['draft','posted']
if self.target_move == 'posted':
move_state = ['posted']
self.cr.execute('SELECT DISTINCT res_partner.id AS id,\
res_partner.name AS name \
FROM res_partner,account_move_line AS l, account_account, account_move am\
WHERE (l.account_id=account_account.id) \
AND (l.move_id=am.id) \
AND (am.state IN %s)\
AND (account_account.type IN %s)\
AND account_account.active\
AND ((reconcile_id IS NULL)\
OR (reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s )))\
AND (l.partner_id=res_partner.id)\
AND (l.date <= %s)\
AND ' + self.query + ' \
ORDER BY res_partner.name', (tuple(move_state), tuple(self.ACCOUNT_TYPE), self.date_from, self.date_from,))
partners = self.cr.dictfetchall()
## mise a 0 du total
for i in range(7):
self.total_account.append(0)
#
# Build a string like (1,2,3) for easy use in SQL query
partner_ids = [x['id'] for x in partners]
if not partner_ids:
return []
# This dictionary will store the debit-credit for all partners, using partner_id as key.
totals = {}
self.cr.execute('SELECT l.partner_id, SUM(l.debit-l.credit) \
FROM account_move_line AS l, account_account, account_move am \
WHERE (l.account_id = account_account.id) AND (l.move_id=am.id) \
AND (am.state IN %s)\
AND (account_account.type IN %s)\
AND (l.partner_id IN %s)\
AND ((l.reconcile_id IS NULL)\
OR (l.reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s )))\
AND ' + self.query + '\
AND account_account.active\
AND (l.date <= %s)\
GROUP BY l.partner_id ', (tuple(move_state), tuple(self.ACCOUNT_TYPE), tuple(partner_ids), self.date_from, self.date_from,))
t = self.cr.fetchall()
for i in t:
totals[i[0]] = i[1]
# This dictionary will store the future or past of all partners
future_past = {}
if self.direction_selection == 'future':
self.cr.execute('SELECT l.partner_id, SUM(l.debit-l.credit) \
FROM account_move_line AS l, account_account, account_move am \
WHERE (l.account_id=account_account.id) AND (l.move_id=am.id) \
AND (am.state IN %s)\
AND (account_account.type IN %s)\
AND (COALESCE(l.date_maturity, l.date) < %s)\
AND (l.partner_id IN %s)\
AND ((l.reconcile_id IS NULL)\
OR (l.reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s )))\
AND '+ self.query + '\
AND account_account.active\
AND (l.date <= %s)\
GROUP BY l.partner_id', (tuple(move_state), tuple(self.ACCOUNT_TYPE), self.date_from, tuple(partner_ids),self.date_from, self.date_from,))
t = self.cr.fetchall()
for i in t:
future_past[i[0]] = i[1]
elif self.direction_selection == 'past': # Using elif so people could extend without this breaking
self.cr.execute('SELECT l.partner_id, SUM(l.debit-l.credit) \
FROM account_move_line AS l, account_account, account_move am \
WHERE (l.account_id=account_account.id) AND (l.move_id=am.id)\
AND (am.state IN %s)\
AND (account_account.type IN %s)\
AND (COALESCE(l.date_maturity,l.date) > %s)\
AND (l.partner_id IN %s)\
AND ((l.reconcile_id IS NULL)\
OR (l.reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s )))\
AND '+ self.query + '\
AND account_account.active\
AND (l.date <= %s)\
GROUP BY l.partner_id', (tuple(move_state), tuple(self.ACCOUNT_TYPE), self.date_from, tuple(partner_ids), self.date_from, self.date_from,))
t = self.cr.fetchall()
for i in t:
future_past[i[0]] = i[1]
# Use one query per period and store results in history (a list variable)
# Each history will contain: history[1] = {'<partner_id>': <partner_debit-credit>}
history = []
for i in range(5):
args_list = (tuple(move_state), tuple(self.ACCOUNT_TYPE), tuple(partner_ids),self.date_from,)
dates_query = '(COALESCE(l.date_maturity,l.date)'
if form[str(i)]['start'] and form[str(i)]['stop']:
dates_query + |
stanford-mast/nn_dataflow | nn_dataflow/nns/lstm_phoneme.py | Python | bsd-3-clause | 1,086 | 0.000921 | """ $lic$
Copyright (C) 2016-2020 by Tsinghua University and The Board of Trustees of
Stanford University
This pro | gram is free software: you can redistribute it and/or modify it under
the terms of the Modified BSD-3 License as published by the Op | en Source
Initiative.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the BSD-3 License for more details.
You should have received a copy of the Modified BSD-3 License along with this
program. If not, see <https://opensource.org/licenses/BSD-3-Clause>.
"""
from nn_dataflow.core import Network
from nn_dataflow.core import InputLayer, FCLayer
from nn_dataflow.nns import add_lstm_cell
'''
LSTM for phoneme classification.
Graves and Schmidhuber, 2005
'''
NN = Network('PHONEME')
NN.set_input_layer(InputLayer(26, 1))
# Input.
NN.add('We', FCLayer(26, 140), prevs=(NN.INPUT_LAYER_KEY,))
# LSTM.
C, H = add_lstm_cell(NN, 'cell', 140, 'We')
# Output.
NN.add('Wd', FCLayer(140, 61), prevs=(H,))
|
UXE/local-edx | lms/djangoapps/instructor_task/tests/test_api.py | Python | agpl-3.0 | 9,831 | 0.003764 | """
Test for LMS instructor background task queue management
"""
from bulk_email.models import CourseEmail, SEND_TO_ALL
from courseware.tests.factories import UserFactory
from xmodule.modulestore.exceptions import ItemNotFoundError
from instructor_task.api import (
get_running_instructor_tasks,
get_instructor_task_history,
submit_rescore_problem_for_all_students,
submit_rescore_problem_for_student,
submit_reset_problem_attempts_for_all_students,
submit_delete_problem_state_for_all_students,
submit_bulk_course_email,
submit_calculate_students_features_csv,
submit_cohort_students,
)
from instructor_task.api_helper import AlreadyRunningError
from instructor_task.models import InstructorTask, PROGRESS
from instructor_task.tests.test_base import (InstructorTaskTestCase,
InstructorTaskCourseTestCase,
InstructorTaskModuleTestCase,
TestReportMixin,
TEST_COURSE_KEY)
class InstructorTaskReportTest(InstructorTaskTestCase):
"""
Tests API methods that involve the reporting of status for background tasks.
"""
def test_get_running_instructor_tasks(self):
# when fetching running tasks, we get all running tasks, and only running tasks
for _ in range(1, 5):
self._create_failure_entry()
self._create_success_entry()
progress_task_ids = [self._create_progress_entry().task_id for _ in range(1, 5)]
task_ids = [instructor_task.task_id for instructor_task in get_running_instructor_tasks(TEST_COURSE_KEY)]
self.assertEquals(set(task_ids), set(progress_task_ids))
def test_get_instructor_task_history(self):
# when fetching historical tasks, we get all tasks, including running tasks
expected_ids = []
for _ in range(1, 5):
expected_ids.append(self._create_failure_entry().task_id)
expected_ids.append(self._create_success_entry().task_id)
expected_ids.append(self._create_progress_entry().task_id)
task_ids = [instructor_task.task_id for instructor_task
in get_instructor_task_history(TEST_COURSE_KEY, usage_key=self.problem_url)]
self.assertEquals(set(task_ids), set(expected_ids))
# make the same call using explicit task_type:
task_ids = [instructor_task.task_id for instructor_task
in get_instructor_task_history(
TEST_COURSE_KEY,
usage_key=self.problem_url,
task_type='rescore_problem'
)]
self.assertEquals(set(task_ids), set(expected_ids))
# make the same call using a non-existent task_type:
task_ids = [instructor_task.task_id for instructor_task
in get_instructor_task_history(
TEST_COURSE_KEY,
usage_key=self.problem_url,
task_type='dummy_type'
)]
self.assertEquals(set(task_ids), set())
class InstructorTaskModuleSubmitTest(InstructorTaskModuleTestCase):
"""Tests API methods that involve the submission of module-based background tasks."""
def setUp(self):
self.initialize_course()
self.student = UserFactory.create(username="student", email="student@edx.org")
self.instructor = UserFactory.create(username="instructor", email="instructor@edx.org")
def test_submit_nonexistent_modules(self):
# confirm that a rescore of a non-existent module returns an exception
problem_url = InstructorTaskModuleTestCase.problem_location("NonexistentProblem")
course_id = self.course.id
request = None
with self.assertRaises(ItemNotFoundError):
submit_rescore_problem_for_student(request, problem_url, self.student)
with self.assertRaises(ItemNotFoundError):
submit_rescore_problem_for_all_students(request, problem_url)
with self.assertRaises(ItemNotFoundError):
submit_reset_problem_attempts_for_all_students(request, problem_url)
with self.assertRaises(ItemNotFoundError):
submit_delete_problem_state_for_all_students(request, problem_url)
def test_submit_nonrescorable_modules(self):
# confirm that a rescore of an existent but unscorable module returns an exception
# (Note that it is easier to test a scoreable but non-rescorable module in test_tasks,
# where we are creating real modules.)
problem_url = self.problem_section.location
course_id = self.course.id
request = None
with self.assertRaises(NotImplementedError):
submit_rescore_problem_for_student(request, problem_url, self.student)
with self.assertRaises(NotImplementedError):
submit_rescore_problem_for_all_students(request, problem_url)
def _test_submit_with_long_url(self, task_function, student=None):
problem_url_name = 'x' * 255
self.define_option_problem(problem_url_name)
location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
with self.assertRaises(ValueError):
if student is not None:
task_function(self.create_task_request(self.instructor), location, student)
else:
task_function(self.create_task_request(self.instructor), location)
def test_submit_rescore_all_with_long_url(self):
self._test_submit_with_long_url(submit_rescore_problem_for_all_students)
def test_submit_rescore_student_with_long_url(self):
self._test_submit_with_long_url(submit_rescore_problem_for_student, self.student)
def test_submit_reset_all_with_long_url(self):
self._test_submit_with_long_url(submit_reset_problem_attempts_for_all_students)
def test_submit_delete_all_with_long_url(self):
self._test_submit_with_long_url(submit_delete_problem_state_for_all_students)
def _test_submit_task(self, task_function, student=None):
# tests submit, and then tests a second identical submission.
problem_url_name = 'H1P1'
self.define_option_problem(problem_url_name)
location = InstructorTaskModuleTestCase.problem_location(problem_url_name)
if student is not None:
instructor_task = task_function(self.create_task_request(self.instructor), location, student)
else:
instructor_task = task_function(self.create_task_request(self.instructor), location)
# test resubmitting, by updating the existing record:
instructor_task = InstructorTask.objects.get(id=instructor_task.id)
instructor_task.task_state = PROGRESS
instructor_task.save()
with self.assertRaises(AlreadyRunningError):
if student is not None:
task_function(self.create_task_request(self.instructor), location, student)
else:
| task_function(self.create_task_request(self.instructor), location)
def test_submit_rescore_all(self):
self._test_submit_task(submit_rescore_problem_for_all_students)
def test_submit_rescore_student(self):
self._test_ | submit_task(submit_rescore_problem_for_student, self.student)
def test_submit_reset_all(self):
self._test_submit_task(submit_reset_problem_attempts_for_all_students)
def test_submit_delete_all(self):
self._test_submit_task(submit_delete_problem_state_for_all_students)
class InstructorTaskCourseSubmitTest(TestReportMixin, InstructorTaskCourseTestCase):
"""Tests API methods that involve the submission of course-based background tasks."""
def setUp(self):
self.initialize_course()
self.student = UserFactory.create(username="student", email="student@edx.org")
self.instructor = UserFactory.create(username="instructor", email="instructor@edx.org")
def _define_course_email(self):
"""Create CourseEmail object for testing."""
course_email = CourseEmail.create(self.course.i |
junwoo091400/MyCODES | Projects/FootPad_Logger/logged_data_analyzer_LSTM/Train.py | Python | gpl-3.0 | 4,440 | 0.04009 | from Base import *
from RNN_LSTM import *
from Data_manipulation import *
import ipdb
def log_2_Batch_Y32(log):
# Encodes Y as 0~31.
print('log_2_Batch. log Data raw count:',len(log.Data)*200,str(log))
X = []#Must be 5 seconds/ each.
Y = []
idx = 0
for datum in log.Data:#Each server uploads -> 20 seconds.
for ts in datum[2]:
X.append(Encrpted_to_List5(ts))#int -> list.
Y.append(ts)#RAW int value.
idx += 1
X = X[:-5]
Y = Y[1:]
print('log_2_Batch. x,y len : ',len(X),len(Y))
return (X,Y)
def isIgnored(log):
if(log.State == DateHandler.NOT_VALID):
return True
if(log.State != DateHandler.LUNCH):
return True
if(Datestr_to_Int(log.Date) < 171107):
return True
if(Datestr_to_Int(log.Date) > 171111):
return True
return False
def plot(loss_list, predictions_series, batchX, batchY, backprop_length, num_classes):
plt.subplot(2, 3, 1)
plt.cla()
plt.plot(loss_list)
for batch_series_idx in range(5):
one_hot_output_series = np.array(predictions_series)[:,batch_series_idx, :]#Becomes (50,32)
single_output_series = np.array([find_Max | Value_Idx(out) for out in one_hot_output_series])
#ipdb.set_trace()
plt.subplot(2, 3, batch_series_idx + 2)
plt.cla()
plt.axis([0, backprop_length, 0, num_classes])
left_offset = range(backprop_length)
#plt.bar(left_offset, batchX[batch_series_idx, :], width=1, color="blue")
plt.bar(left_offset, batchY[batch_series_idx, :], width=1, color="red")
plt.bar(left_offset, | single_output_series * 0.5, width=1, color="green")
#ipdb.set_trace()#REMOVE me.
plt.draw()
plt.pause(0.0001)
def main():
logs = get_Logs('Mega.csv')
X_bat, Y_bat = [], []
Log_bat = []
for flg in logs:
if( not isIgnored(flg)):
x,y = log_2_Batch_Y32(flg)
X_bat.append(x)
Y_bat.append(y)
Log_bat.append(flg)
print('Total X_bat count : ', len(X_bat) )
#
backprop_len = 50
batch_size = len(X_bat)
total_len = min([len(x_batch) for x_batch in X_bat]) # In 'unit' of 1 TIMESTAMPs!
#total_len = total_len - total_len % backprop_len # Make it 'multiple' of backprop_len. It is already multiple of 5.
X_bat = [x_bat[:total_len] for x_bat in X_bat]
Y_bat = [y_bat[:total_len] for y_bat in Y_bat]#There should exist one y value for each timestamp in X.
state_size = 10
num_class = 32
print('*'*10)
for log in Log_bat:
print(str(log))
print('*'*10)
print('batch_size ',batch_size)
print('total_len ',total_len)
print('backprop_len ',backprop_len)
print('state_size ',state_size)
print('num_class ',num_class)
print('*'*10)
input("These are the details. Proceed?")
#
pad_length = 5
num_epochs = 501 # SET ME!
#
batchX_placeholder, batchY_placeholder, cell_state, hidden_state, current_state, predictions_series, W2, b2, cell, train_step, total_loss = RNN_LSTM(batch_size, total_len, pad_length, backprop_len, state_size, num_class)
loss_printer = open('v4LossPrint.csv','w',1)#Line buffered.
loss_printer.write('v4epoch_idx,batch_idx,_total_loss\n')
saver = tf.train.Saver(max_to_keep=None)
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
plt.ion()
plt.figure()
plt.show()
loss_list = []
x = np.array(X_bat).reshape(batch_size,-1,pad_length)
y = np.array(Y_bat).reshape(batch_size,-1)
for epoch_idx in range(num_epochs):
_current_cell_state = np.zeros((batch_size, state_size))
_current_hidden_state = np.zeros((batch_size, state_size))
print("New data, epoch", epoch_idx)
for batch_idx in range(total_len // backprop_len):
start_idx = batch_idx * backprop_len
end_idx = start_idx + backprop_len
batchX = x[:,start_idx:end_idx]
batchY = y[:,start_idx:end_idx]
_total_loss, _train_step, _current_state, _predictions_series = sess.run(
[total_loss, train_step, current_state, predictions_series],
feed_dict={
batchX_placeholder: batchX,
batchY_placeholder: batchY,
cell_state: _current_cell_state,
hidden_state: _current_hidden_state
})
_current_cell_state, _current_hidden_state = _current_state
loss_printer.write('{},{},{}\n'.format(epoch_idx,batch_idx,_total_loss))
loss_list.append(_total_loss)
if batch_idx%100 == 0:
print("Step",batch_idx, "Batch loss", _total_loss)
plot(loss_list, _predictions_series, batchX, batchY,backprop_len,num_class)
if(epoch_idx % 10 == 0):
saver.save(sess, "saver/v4epoch_{}.ckpt".format(epoch_idx))
plt.ioff()
plt.show()
if __name__ == '__main__':
main() |
mitdbg/modeldb | client/verta/verta/configuration/__init__.py | Python | mit | 293 | 0 | # -*- coding: utf-8 -*-
"""Utilities for configuration | versioning."""
from verta._internal_utils import documentation
from ._configuration import _Configuration
from ._hyperparameters import Hyperparameters
documentation.reassign_module(
[Hyperpa | rameters],
module_name=__name__,
)
|
jeffbuttars/upkg | upkg/cmds/status.py | Python | gpl-2.0 | 1,988 | 0.003018 | import logging
logger = logging.getLogger('upkg')
from blessings import Terminal
from cmds.base import BaseCmd
from upkg.lib import Repo
class Cmd(BaseCmd):
"""Docstring for Search """
name = 'status'
help_text = ("Get the status of what's installed")
aliases = ['st', 'stat']
def build(self):
"""todo: Docstring for build
:return:
:rtype:
"""
self._cmd_parser.add_argument(
'status',
type=str,
default=None,
nargs="*",
help=(""),
)
return super(Cmd, self).build()
#build()
def exec(self, args):
"""todo: Docstring for exec
:param args: arg description
:type args: type description
:return:
:rtype:
"""
logger.debug("status %s", args.status)
self.status(args.status)
#exec()
def status(self, repos):
"""
:param repos: arg description
:type repos: type description
:return:
:rtype:
"""
| logger.debug("repos: %s", repos)
if repos:
rlist = [Repo(name=x) for x in repos]
else:
# Update them all!
rlist = Repo.installed_list()
logger.debug("repo list: %s", rlist)
t = Terminal()
for r in rlist:
logger.debug("calling status on: %s", r)
rp = r.repo_dir
rpw = len(rp)
w = t.width
bw = int(((w - rpw) / 2) - 1)
nw = int(((w - len(r.na | me)) / 2) - 1)
print(t.magenta("*" * w))
print(t.magenta("{} {} {}".format(" " * nw, r.name, " " * nw)))
if (rpw + 2) < w:
print(t.magenta("{} {} {}".format(" " * bw, rp, " " * bw)))
else:
print(t.magenta(rpw))
print(t.magenta("*" * w))
r.status()
print("\n")
# end for r in rlist
#status()
# Cmd
|
nipunreddevil/bayespy | bayespy/utils/tests/test_linalg.py | Python | gpl-3.0 | 6,989 | 0.011447 | ######################################################################
# Copyright (C) 2013 Jaakko Luttinen
#
# This file is licensed under Version 3.0 of the GNU General Public
# License. See LICENSE for a text of the license.
######################################################################
######################################################################
# This file is part of BayesPy.
#
# BayesPy is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# BayesPy is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BayesPy. If not, see <http://www.gnu.org/licenses/>.
######################################################################
"""
Unit tests for bayespy.utils.utils module.
"""
import numpy as np
from ..utils import TestCase
from .. import utils
from .. import linalg
class TestDot(TestCase):
def test_dot(self):
"""
Test dot product multiple multi-dimensional arrays.
"""
# If no arrays, return 0
self.assertAllClose(linalg.dot(),
0)
# If only one array, return itself
self.assertAllClose(linalg.dot([[1,2,3],
[4,5,6]]),
[[1,2,3],
[4,5,6]])
# Basic test of two arrays: (2,3) * (3,2)
self.assertAllClose(linalg.dot([[1,2,3],
[4,5,6]],
[[7,8],
[9,1],
[2,3]]),
[[31,19],
[85,55]])
# Basic test of four arrays: (2,3) * (3,2) * (2,1) * (1,2)
self.assertAllClose(linalg.dot([[1,2,3],
[4,5,6]],
[[7,8],
[9,1],
[2,3]],
[[4],
[5]],
[[6,7]]),
[[1314,1533],
[3690,4305]])
# Test broadcasting: (2,2,2) * (2,2,2,2)
self.assertAllClose(linalg.dot([[[1,2],
[3,4]],
[[5,6],
[7,8]]],
[[[[1,2],
[3,4]],
[[5,6],
[7,8]]],
[[[9,1],
[2,3]],
[[4,5],
[6,7]]]]),
[[[[ 7, 10],
[ 15, 22]],
[[ 67, 78],
[ 91, 106]]],
[[[ 13, 7],
[ 35, 15]],
[[ 56, 67],
[ 76, 91]]]])
# Inconsistent shapes: (2,3) * (2,3)
self.assertRaises(ValueError,
linalg.dot,
[[1,2,3],
[4,5,6]],
[[1,2,3],
[4,5,6]])
# Other axes do not broadcast: (2,2,2) * (3,2,2)
self.assertRaises(ValueError,
linalg.dot,
[[[1,2],
[3,4]],
[[5,6],
[7,8]]],
[[[1,2],
[3,4]],
[[5,6],
[7,8]],
[[9,1],
[2,3]]])
# Do not broadcast matrix axes: (2,1) * (3,2)
self.assertRaises(ValueError,
linalg.dot,
| [[1],
[2]],
[[1,2,3],
[4,5,6]])
# Do not accept less than 2-D arrays: (2) * (2,2)
self.assertRaises(ValueError,
| linalg.dot,
[1,2],
[[1,2,3],
[4,5,6]])
class TestBandedSolve(TestCase):
def test_block_banded_solve(self):
"""
Test the Gaussian elimination algorithm for block-banded matrices.
"""
#
# Create a block-banded matrix
#
# Number of blocks
N = 40
# Random sizes of the blocks
#D = np.random.randint(5, 10, size=N)
# Fixed sizes of the blocks
D = 5*np.ones(N)
# Some helpful variables to create the covariances
W = [np.random.randn(D[i], 2*D[i])
for i in range(N)]
# The diagonal blocks (covariances)
A = [np.dot(W[i], W[i].T) for i in range(N)]
# The superdiagonal blocks (cross-covariances)
B = [np.dot(W[i][:,-1:], W[i+1][:,:1].T) for i in range(N-1)]
C = utils.block_banded(A, B)
# Create the system to be solved: y=C*x
x_true = np.random.randn(np.sum(D))
y = np.dot(C, x_true)
x_true = np.reshape(x_true, (N, -1))
y = np.reshape(y, (N, -1))
#
# Run tests
#
# The correct inverse
invC = np.linalg.inv(C)
# Inverse from the function that is tested
(invA, invB, x, ldet) = linalg.block_banded_solve(np.asarray(A),
np.asarray(B),
np.asarray(y))
# Check that you get the correct number of blocks
self.assertEqual(len(invA), N)
self.assertEqual(len(invB), N-1)
# Check each block
i0 = 0
for i in range(N-1):
i1 = i0 + D[i]
i2 = i1 + D[i+1]
# Check diagonal block
self.assertTrue(np.allclose(invA[i], invC[i0:i1, i0:i1]))
# Check super-diagonal block
self.assertTrue(np.allclose(invB[i], invC[i0:i1, i1:i2]))
i0 = i1
# Check last block
self.assertTrue(np.allclose(invA[-1], invC[i0:, i0:]))
# Check the solution of the system
self.assertTrue(np.allclose(x_true, x))
# Check the log determinant
self.assertAlmostEqual(ldet/np.linalg.slogdet(C)[1], 1)
|
toomore/grs | test_unittest.py | Python | mit | 5,685 | 0.000177 | # -*- coding: utf-8 -*-
''' Unittest '''
import grs
import unittest
from datetime import datetime
from types import BooleanType
from types import NoneType
class TestGrs(unittest.TestCase):
def get_data(self):
self.stock_no = '2618'
self.data = grs.Stock(self.stock_no)
def test_stock(self):
self.get_data()
assert self.data.info[0] == self.stock_no
def test_best_buy_or_sell(self):
self.get_data()
assert isinstance(grs.BestFourPoint(self.data).best_four_point(),
(tuple, NoneType))
def test_m | oving_average(self):
self.get_data()
result = self.data.moving_average(3)
assert isinstance(result[0], list)
assert isinstance(result[1], int)
assert result == self.data.MA(3)
def test_moving_average_value(self):
self.get_data()
result = self | .data.moving_average_value(3)
assert isinstance(result[0], list)
assert isinstance(result[1], int)
assert result == self.data.MAV(3)
def test_moving_average_bias_ratio(self):
self.get_data()
result = self.data.moving_average_bias_ratio(6, 3)
assert isinstance(result[0], list)
assert isinstance(result[1], int)
assert result == self.data.MAO(6, 3)
def test_check_moving_average_bias_ratio(self):
self.get_data()
param = (self.data.moving_average_bias_ratio(6, 3)[0], True)
result = self.data.check_moving_average_bias_ratio(*param)[0]
assert isinstance(result, BooleanType)
assert result == self.data.CKMAO(*param)[0]
def test_CKMAO_classmethod(self):
self.get_data()
result = grs.fetch_data.SimpleAnalytics.CKMAO(self.data.MAO(3, 6)[0])
assert isinstance(result, tuple)
assert len(result) == 3
def test_stock_value(self):
self.get_data()
assert isinstance(self.data.price, list)
assert isinstance(self.data.openprice, list)
assert isinstance(self.data.value, list)
@staticmethod
def test_twse_no():
twse_no = grs.TWSENo()
assert isinstance(twse_no.all_stock, dict)
result = twse_no.search(u'中')
# 1701 中化
assert '1701' in result
result = twse_no.searchbyno(17)
assert '1701' in result
@staticmethod
def test_twse_code_comps():
twseno = grs.TWSENo()
industry_code = twseno.industry_code
industry_comps = twseno.industry_comps
for i in industry_comps:
assert i in industry_code
@staticmethod
def test_twse_open():
is_open = grs.TWSEOpen()
result = is_open.d_day(datetime(2014, 1, 1))
assert result is False
@staticmethod
@unittest.skip('Known issues.')
def test_realtime():
real_time = grs.RealtimeStock('2618')
assert real_time.real['no'] == '2618'
real_time = grs.RealtimeWeight()
assert real_time.real['no'] == '1'
real_time = grs.RealtimeStock('0050')
assert real_time.real['no'] == '0050'
try:
real_time = grs.RealtimeStock(0050)
except AssertionError:
pass
@staticmethod
def test_countdown():
result = grs.Countdown().countdown
assert isinstance(result, int)
@staticmethod
def test_taiwan_50():
stock = grs.Stock('0050')
assert u'元大台灣50' == stock.info[1]
try:
stock = grs.Stock(0050)
except AssertionError:
pass
class TestGrsOTC(unittest.TestCase):
def get_data(self):
self.stock_no = '8446'
self.data = grs.Stock(self.stock_no)
def test_stock(self):
self.get_data()
assert self.data.info[0] == self.stock_no
def test_best_buy_or_sell(self):
self.get_data()
assert isinstance(grs.BestFourPoint(self.data).best_four_point(),
(tuple, NoneType))
def test_moving_average(self):
self.get_data()
result = self.data.moving_average(3)
assert isinstance(result[0], list)
assert isinstance(result[1], int)
def test_moving_average_value(self):
self.get_data()
result = self.data.moving_average_value(3)
assert isinstance(result[0], list)
assert isinstance(result[1], int)
def test_moving_average_bias_ratio(self):
self.get_data()
result = self.data.moving_average_bias_ratio(6, 3)
assert isinstance(result[0], list)
assert isinstance(result[1], int)
def test_check_moving_average_bias_ratio(self):
self.get_data()
result = self.data.check_moving_average_bias_ratio(
self.data.moving_average_bias_ratio(3, 6)[0],
positive_or_negative=True)[0]
assert isinstance(result, BooleanType)
def test_stock_value(self):
self.get_data()
assert isinstance(self.data.price, list)
assert isinstance(self.data.openprice, list)
assert isinstance(self.data.value, list)
@staticmethod
def test_otc_no():
otc_no = grs.OTCNo()
assert isinstance(otc_no.all_stock, dict)
result = otc_no.search(u'華')
# 8446 華研
assert '8446' in result
result = otc_no.searchbyno(46)
assert '8446' in result
@staticmethod
def test_otc_code_comps():
twseno = grs.OTCNo()
industry_code = twseno.industry_code
industry_comps = twseno.industry_comps
for i in industry_comps:
assert i in industry_code
if __name__ == '__main__':
unittest.main()
|
accuen/rqt | lib/rqt/cli_parser.py | Python | apache-2.0 | 6,256 | 0.006394 | # Copyright 2014 Accuen
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import argparse
import logging
from textwrap import dedent
from .version import __version__
from . import actions
from .help import mk_help_text
logger = logging.getLogger(__name__)
COMMANDS = [
"help",
"create-config",
"run-query",
"show-query",
"show-plan",
"run-psql",
]
def do_help(*pargs, **kwargs):
print mk_help_text(COMMANDS)
def add_help_subparser(subparsers):
parser = subparsers.add_parser("help",
help="Display extended help.",
add_help=False # no -h for help command
)
parser.set_defaults(func=do_help)
# FINISH: Make "pgm help command" work as an alias for "pgm command -h".
return parser
def add_create_config_subparser(subparsers):
description = dedent("""\
Creates an uninitialized configuration file if none exists.
The file is written to ~/.rqt-config.
""")
parser = subparsers.add_parser("create-config",
description=description,
help="Creates a configuration file if none exists.")
parser.set_defaults(func=actions.do_create_config)
return parser
def add_show_query_subparser(subparsers):
description = dedent("""\
Shows the query resulting from template expansion.
""")
parser = subparsers.add_parser("show-query",
description=description,
help="Shows the query resulting from template expansion.")
parser.set_defaults(func=actions.do_show_query)
parser.add_argument("qt_filename",
metavar="QUERY_FILE",
help="the query template file")
# To do: New feature; command line defines
#parser.add_argument("--define", "-d",
# action="append",
# metavar="K=V",
# help="add var. defn. to namespace")
parser.add_argument("--json_params", "-p",
metavar="JSON_FILE",
help="JSON file containing variables to add to the template namespace")
return parser
def add_show_plan_subparser(subparsers):
description = dedent("""\
Shows the engine's query execution plan.
""")
parser = subparsers.add_parser("show-plan",
description=description,
help="Shows the engine's query execution plan.")
parser.set_defaults(func=actions.do_show_plan)
parser.add_argument("qt_filename", metavar="QUERY_FILE", help="the query template file")
parser.add_argument("--json_params", "-p", metavar="JSON_FILE",
help="JSON file containing variables to add to the template namespace")
return parser
def add_run_query_subparser(subparsers):
description = dedent("""\
Runs a query and downloads result to a file.")
""")
parser = subparsers.add_parser("run-query",
description=description,
help="Runs a query and downloads result to a file.")
parser.set_defaults(func=actions.do_run_query)
parser.add_argument("qt_filename", metavar="QUERY_FILE", help="the query template file")
parser.add_argument("out_filename", metavar="OUT_FILE", help="the output file (must have .csv extension)")
parser.add_argument("--json_params", metavar="JSON_FILE",
help="JSON file containing variables to add to the template namespace")
return parser
def add_run_psql_subparser(subparsers):
description = dedent("""\
Starts up a psql session with Redshift.
""")
parser = subparsers.add_parser("run-psql",
description=description,
help="Starts up a psql session with Redshift.")
parser.set_defaults(func=actions.do_run_psql)
return parse | r
def mk_argparser():
desc = "Utility for running Redshift queries."
epi = dedent("""\
Try "rqt SUBCOMMAND -h" for help on a spec | ific subcommand.
Try "rqt help" for extended help.
""")
parser = argparse.ArgumentParser(description=desc,
epilog=epi,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("--config", metavar="JSON_FILE", default=None,
help="JSON file containing main configuration")
parser.add_argument("--debug", action="store_true", default=False,
help="display debug level log messages")
parser.add_argument("--verbose", action="store_true", default=False,
help="display debug level log messages")
parser.add_argument("--query_group",
metavar="GROUP",
help="the Redshift query_group to use (default is taken from config)")
parser.add_argument("--connection", metavar="CONNECTION",
help="the connection parameters to use from the config (default is taken from config)",
default="default")
metavar = "SUBCOMMAND"
subparsers = parser.add_subparsers(description="Use 'rqt SUBCOMMAND ...' to run rqt.",
dest="mode", metavar=metavar)
add_help_subparser(subparsers)
add_create_config_subparser(subparsers)
add_run_query_subparser(subparsers)
add_show_query_subparser(subparsers)
add_show_plan_subparser(subparsers)
add_run_psql_subparser(subparsers)
return parser
|
opennorth/inventory | inventory/migrations/0006_auto_20150217_2002.py | Python | mit | 1,132 | 0 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('inventory', '0005_remove_distribution_validation_extension'),
]
operations = [
migrations.RenameField(
model_name='distribution',
old_name='validation_errors',
new_name='errors',
),
migrations.RenameField(
model_n | ame='distribution',
old_name='validation_encoding',
new_name='http_charset',
),
migrations.RenameField(
mo | del_name='distribution',
old_name='validation_content_type',
new_name='http_content_type',
),
migrations.RenameField(
model_name='distribution',
old_name='validation_headers',
new_name='http_headers',
),
migrations.AddField(
model_name='distribution',
name='http_content_length',
field=models.BigIntegerField(null=True),
preserve_default=True,
),
]
|
MartinSeeler/euler-python | problem-2.py | Python | mit | 252 | 0.007937 | sum_of_even = 0
prev | = 0
current = 1
next = current
while (prev+current) <= 4000000:
next = prev + current
prev = current
current = next
if next % 2 == 0:
sum_of_even += next
print "The sum of all even is %s" % s | um_of_even
|
51reboot/actual_09_homework | 09/liubaobao/cmdb_V4/manage.py | Python | mit | 121 | 0.033058 | #encoding:utf- | 8
from user import app
if __name__ == '__main__':
ap | p.run(host='0.0.0.0',port=9002,debug=True) |
sjsucohort6/openstack | python/venv/lib/python2.7/site-packages/openstack/tests/unit/network/v2/test_quota.py | Python | mit | 1,695 | 0 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed | on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either | express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from openstack.network.v2 import quota
IDENTIFIER = 'IDENTIFIER'
EXAMPLE = {
'floatingip': 1,
'network': 2,
'port': 3,
'tenant_id': '4',
'router': 5,
'subnet': 6,
}
class TestQuota(testtools.TestCase):
def test_basic(self):
sot = quota.Quota()
self.assertEqual('quota', sot.resource_key)
self.assertEqual('quotas', sot.resources_key)
self.assertEqual('/quotas', sot.base_path)
self.assertEqual('network', sot.service.service_type)
self.assertFalse(sot.allow_create)
self.assertFalse(sot.allow_retrieve)
self.assertFalse(sot.allow_update)
self.assertFalse(sot.allow_delete)
self.assertTrue(sot.allow_list)
def test_make_it(self):
sot = quota.Quota(EXAMPLE)
self.assertEqual(EXAMPLE['floatingip'], sot.floating_ip)
self.assertEqual(EXAMPLE['network'], sot.network)
self.assertEqual(EXAMPLE['port'], sot.port)
self.assertEqual(EXAMPLE['tenant_id'], sot.project_id)
self.assertEqual(EXAMPLE['router'], sot.router)
self.assertEqual(EXAMPLE['subnet'], sot.subnet)
|
zenodo/invenio | invenio/legacy/elmsubmit/scripts/elmsubmit.py | Python | gpl-2.0 | 2,040 | 0.00049 | #!@PYTHON@
# -*- mode: python; coding: utf-8; | -*-
#
# This file is part of In | venio.
# Copyright (C) 2004, 2005, 2006, 2007, 2008, 2010, 2011 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from __future__ import print_function
import sys
import getopt
from invenio.base.factory import with_app_context
def usage(exitcode=1, msg=""):
"""Prints usage info."""
if msg:
sys.stderr.write("Error: %s.\n" % msg)
sys.stderr.write("Usage: %s [options]\n" % sys.argv[0])
sys.stderr.write("General options:\n")
sys.stderr.write(" -h, --help \t\t Print this help.\n")
sys.stderr.write(" -V, --version \t\t Print version information.\n")
sys.stderr.write("""Description: read specially-formatted email message from stdin
and upload the records it contains to the system.\n""")
sys.exit(exitcode)
@with_app_context()
def main():
import invenio.legacy.elmsubmit.api as elmsubmit
try:
opts, args = getopt.getopt(sys.argv[1:], "hV", ["help", "version"])
except getopt.GetoptError as err:
usage(1, err)
try:
for opt in opts:
if opt[0] in ["-h", "--help"]:
usage(0)
elif opt[0] in ["-V", "--version"]:
print(elmsubmit.__revision__)
sys.exit(0)
except StandardError as e:
usage(e)
return elmsubmit.process_email(sys.stdin.read())
|
Evzdrop/celery-2619-repro | celery_repro/repro/tasks/incoming.py | Python | mit | 524 | 0.005725 | import json
import requests
from celery import shared_task
from repro.monitoring.custom_metrics import custom_metric_timed_nod | e
from celery.utils.log import get_task_logger
logger = get_task_logger(__name__)
@shared_task()
@custom_metric_timed_node('Custom/incoming/twitter/2')
def incoming(tweet, *args, **kwargs):
parsed = json.loads(tweet)
return parsed
@custom_metric_timed_node('Custom/incoming/infer_location/3 | ')
def infer_location(post):
requests.get('https://yahoo.com') # Fake IO request
return
|
95subodh/Leetcode | 020. Valid Parentheses.py | Python | mit | 474 | 0.048523 | #Given a string containing just the characters '(', ')', '{', '}', ' | [' and ']', determine if the input string is valid.
#
#The brackets must close in the correct order, "()" and "()[]{}" are all valid but "(]" and "([)]" are not.
class Solution(object):
def isValid(self, s):
"""
:type s: str
:rtype: bool
"""
l=[]
for i in s:
if i in '[({':
l.append(i)
el | if len(l) and ord(i)<ord(l[-1])+3:
l.pop()
else:
return False
return not len(l) |
JioCloud/horizon | openstack_dashboard/dashboards/admin/domains/tables.py | Python | apache-2.0 | 6,160 | 0.000325 | # Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils.http import urlencode
from django.utils.translation import ugettext_lazy as _
from keystoneclient import exceptions
from horizon import messages
from horizon import tables
from openstack_dashboard import api
from openstack_dashboard.dashboards.admin.domains import constants
LOG = logging.getLogger(__name__)
class ViewGroupsLink(tables.LinkAction):
name = "groups"
verbose_name = _("Modify Groups")
url = "horizon:admin:domains:update"
classes = ("ajax-modal",)
icon = "pencil"
def get_link_url(self, domain):
step = 'update_group_members'
base_url = reverse(self.url, args=[domain.id])
param = urlencode({"step": step})
return "?".join([base | _url, param])
class CreateDomainLink(tables.LinkAction):
name = "create"
verbose_name = _("Create Domain")
url = constants.DOMAINS_CREATE_URL
classes = | ("ajax-modal",)
icon = "plus"
policy_rules = (('identity', 'identity:create_domain'),)
def allowed(self, request, domain):
return api.keystone.keystone_can_edit_domain()
class EditDomainLink(tables.LinkAction):
name = "edit"
verbose_name = _("Edit")
url = constants.DOMAINS_UPDATE_URL
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (('identity', 'identity:update_domain'),)
def allowed(self, request, domain):
return api.keystone.keystone_can_edit_domain()
class DeleteDomainsAction(tables.DeleteAction):
name = "delete"
data_type_singular = _("Domain")
data_type_plural = _("Domains")
policy_rules = (('identity', 'identity:delete_domain'),)
def allowed(self, request, datum):
return api.keystone.keystone_can_edit_domain()
def delete(self, request, obj_id):
domain = self.table.get_object_by_id(obj_id)
if domain.enabled:
msg = _('Domain "%s" must be disabled before it can be deleted.') \
% domain.name
messages.error(request, msg)
raise exceptions.ClientException(409, msg)
else:
LOG.info('Deleting domain "%s".' % obj_id)
api.keystone.domain_delete(request, obj_id)
class DomainFilterAction(tables.FilterAction):
def allowed(self, request, datum):
multidomain_support = getattr(settings,
'OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT',
False)
return multidomain_support
def filter(self, table, domains, filter_string):
"""Naive case-insensitive search."""
q = filter_string.lower()
def comp(domain):
if q in domain.name.lower():
return True
return False
return filter(comp, domains)
class SetDomainContext(tables.Action):
name = "set_domain_context"
verbose_name = _("Set Domain Context")
url = constants.DOMAINS_INDEX_URL
preempt = True
policy_rules = (('identity', 'admin_required'),)
def allowed(self, request, datum):
multidomain_support = getattr(settings,
'OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT',
False)
if not multidomain_support:
return False
ctx = request.session.get("domain_context", None)
if ctx and datum.id == ctx:
return False
return True
def single(self, table, request, obj_id):
if ('domain_context' not in request.session or
request.session['domain_context'] != obj_id):
try:
domain = api.keystone.domain_get(request, obj_id)
request.session['domain_context'] = obj_id
request.session['domain_context_name'] = domain.name
messages.success(request,
_('Domain Context updated to Domain %s.') %
domain.name)
except Exception:
messages.error(request,
_('Unable to set Domain Context.'))
class UnsetDomainContext(tables.Action):
name = "clear_domain_context"
verbose_name = _("Clear Domain Context")
url = constants.DOMAINS_INDEX_URL
preempt = True
requires_input = False
policy_rules = (('identity', 'admin_required'),)
def allowed(self, request, datum):
ctx = request.session.get("domain_context", None)
return ctx is not None
def single(self, table, request, obj_id):
if 'domain_context' in request.session:
request.session.pop("domain_context")
request.session.pop("domain_context_name")
messages.success(request, _('Domain Context cleared.'))
class DomainsTable(tables.DataTable):
name = tables.Column('name', verbose_name=_('Name'))
description = tables.Column(lambda obj: getattr(obj, 'description', None),
verbose_name=_('Description'))
id = tables.Column('id', verbose_name=_('Domain ID'))
enabled = tables.Column('enabled', verbose_name=_('Enabled'), status=True)
class Meta:
name = "domains"
verbose_name = _("Domains")
row_actions = (SetDomainContext, ViewGroupsLink, EditDomainLink,
DeleteDomainsAction)
table_actions = (DomainFilterAction, CreateDomainLink,
DeleteDomainsAction, UnsetDomainContext)
|
staticfloat/pybladeRF | bladeRF/tools/waterfall.py | Python | gpl-3.0 | 919 | 0.005441 | import numpy
import matplotlib.pyplot
import matplotlib.animation
HEIGHT = 500
WIDTH | = 1024
fig = matplotlib.pyplot.figure()
im_data = numpy.zeros((HEIGHT, WIDTH), dtype=numpy.float32)
im = matplotlib.pyplot.imshow(im_data, cmap=matplotlib.pyplot.get_cmap('gray'))
im.set_clim(0.0, 1.0)
data = numpy.zeros(FFTPOINTS, dtype=numpy.float32)
def init_image():
im.set_array(numpy.zeros((HEIGHT, WIDTH), dtype=numpy.float32))
return (im,)
def update_image(i):
dat | a[:] = numpy.fromstring(
sys.stdin.read(8192),
dtype=numpy.complex64,
)
fft = numpy.fft.rfft(data)
line = numpy.sqrt(numpy.real(fft)**2+numpy.imag(fft)**2)
im_data[:,:WIDTH-1] = im_data[:,1:]
im_data[:,WIDTH-1] = line
im.set_array(im_data)
return (im,)
ani = matplotlib.animation.FuncAnimation(
fig, update_image, init_func=init_image,
interval=0, blit=True)
matplotlib.pyplot.show()
|
agx/libvirt-sandbox-debian | libvirt-sandbox/image/template.py | Python | lgpl-2.1 | 4,199 | 0.001191 | #
# -*- coding: utf-8 -*-
# Authors: Daniel P. Berrange <berrange@redhat.com>
#
# Copyright (C) 2015 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
import urlparse
import importlib
import re
class Template(object):
def __init__(self,
source, protocol,
hostname, port,
username, password,
path, params):
"""
:param source: template source name
:param protocol: network transport protocol or None
:param hostname: registry hostname or None
:param port: registry port or None
:param username: username or None
:param password: password or None
:param path: template path identifier
:param params: template parameters
docker:///ubuntu
docker+https://index.docker.io/ubuntu?tag=latest
virt-builder:///fedora-20
"""
self.source = source
self.protocol = protocol
self.hostname = hostname
self.port = port
self.username = username
self.password = password
self.path = path
self.params = params
if self.params is None:
self.params = {}
@classmethod
def _get_source_impl(klass, source):
try:
p = re.compile("\W")
sourcemod = "".join(p.split(source))
sourcename = "".join([i.capitalize() for i in p.split(source)])
mod = importlib.import_module(
"libvirt_sandbox.image.sources." + sourcemod)
classname = sourcename + "Source"
classimpl = getattr(mod, classname)
return classimpl()
except Exception as e:
print e
raise Exception("Invalid source: '%s'" % source)
def get_source_impl(self):
if self.source == "":
raise Exception("Missing scheme in image URI")
return self._get_source_impl(self.source)
def __repr__(self):
if self.protocol is not None:
scheme = self.source + "+" + self.protocol
else:
scheme = self.source
if self.hostname:
if self.port:
netloc = "%s:%d" % (self.hostname, self.port)
else:
netloc = self.hostname
if self.username:
if self.password:
auth = self.username + ":" + self.password
else:
auth = self.username
netloc = auth + "@" + netloc
else:
netloc = None
query = "&".join([key + "=" + self.params[key] for key in self.params.keys()])
ret = urlparse.urlunparse((scheme, netloc, self.path, None, query, None))
return ret
@classmethod
def from_uri(klass, uri):
o = urlparse.u | rlparse(uri)
idx = o.scheme.find("+")
if idx == -1:
source = o.scheme
protocol = None
else:
source = o.scheme[0:idx]
protocol = o.scheme[idx + 1:]
query = {}
if o.query is not None and o.query != "":
for param in o.query. | split("&"):
(key, val) = param.split("=")
query[key] = val
return klass(source, protocol,
o.hostname, o.port,
o.username, o.password,
o.path, query)
@classmethod
def get_all(klass, source, templatedir):
impl = klass._get_source_impl(source)
return impl.list_templates(templatedir)
|
nburn42/tensorflow | tensorflow/python/keras/utils/generic_utils_test.py | Python | apache-2.0 | 2,330 | 0.006438 | # Copyright 2016 The Ten | sorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras generic Python utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import keras
from tensorflow.python.platform import test
class HasArgTest(test.TestCase):
def test_has_arg(self):
def f_x(x):
return x
def f_x_args(x, *args):
_ = args
return x
def f_x_kwargs(x, **kwargs):
_ = kwargs
return x
self.assertTrue(keras.utils.generic_utils.has_arg(
f_x, 'x', accept_all=False))
self.assertFalse(keras.utils.generic_utils.has_arg(
f_x, 'y', accept_all=False))
self.assertTrue(keras.utils.generic_utils.has_arg(
f_x_args, 'x', accept_all=False))
self.assertFalse(keras.utils.generic_utils.has_arg(
f_x_args, 'y', accept_all=False))
self.assertTrue(keras.utils.generic_utils.has_arg(
f_x_kwargs, 'x', accept_all=False))
self.assertFalse(keras.utils.generic_utils.has_arg(
f_x_kwargs, 'y', accept_all=False))
self.assertTrue(keras.utils.generic_utils.has_arg(
f_x_kwargs, 'y', accept_all=True))
class TestCustomObjectScope(test.TestCase):
def test_custom_object_scope(self):
def custom_fn():
pass
class CustomClass(object):
pass
with keras.utils.generic_utils.custom_object_scope(
{'CustomClass': CustomClass, 'custom_fn': custom_fn}):
act = keras.activations.get('custom_fn')
self.assertEqual(act, custom_fn)
cl = keras.regularizers.get('CustomClass')
self.assertEqual(cl.__class__, CustomClass)
if __name__ == '__main__':
test.main()
|
PrairieLearn/PrairieLearn | testCourse/questions/brokenGrading/server.py | Python | agpl-3.0 | 156 | 0.019231 | import random, copy
def generate(data):
data['correct_answ | ers']['x'] = 3
def grade(data):
raise Exception('d | eliberately broken grading function')
|
miminus/youtube-dl | youtube_dl/extractor/videomega.py | Python | unlicense | 1,920 | 0.002083 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_urllib_request
class VideoMegaIE(InfoExtractor):
_VALID_URL = r'(?:videomega:|https?://(?:www\.)?videomega\.tv/(?:(?:view|iframe|cdn)\.php)?\?ref=)(?P<id>[A-Za-z0-9]+)'
_TESTS = [{
'url': 'http://videomega.tv/cdn.php?ref=AOSQBJYKIDDIKYJBQSOA',
'md5': 'cc1920a58add3f05c6a93285b84fb3aa',
'info_dict': {
'id': 'AOSQBJYKIDDIKYJBQSOA',
'ext': 'mp4',
'title': '1254207',
'thumbnail': 're:^https?://.*\.jpg$',
}
}, {
'url': 'http://videomega.tv/cdn.php?ref=AOSQBJYKIDDIKYJBQSOA&width=1070&height=600',
'only_matching': True,
}, {
'url': 'http://videomega.tv/view.php?ref=090051111052065112106089103052052103089106112065052111051090',
'only_matching': True,
}]
def _real_extra | ct(self, url):
video_id = self._match_id(url)
iframe_url = 'http://videomega.tv/cdn.php?ref=%s' % video_id
req = compat_urllib_request.Request(iframe_url)
req.add_header('Referer', url)
req.add_heade | r('Cookie', 'noadvtday=0')
webpage = self._download_webpage(req, video_id)
title = self._html_search_regex(
r'<title>(.+?)</title>', webpage, 'title')
title = re.sub(
r'(?:^[Vv]ideo[Mm]ega\.tv\s-\s*|\s*-\svideomega\.tv$)', '', title)
thumbnail = self._search_regex(
r'<video[^>]+?poster="([^"]+)"', webpage, 'thumbnail', fatal=False)
video_url = self._search_regex(
r'<source[^>]+?src="([^"]+)"', webpage, 'video URL')
return {
'id': video_id,
'title': title,
'url': video_url,
'thumbnail': thumbnail,
'http_headers': {
'Referer': iframe_url,
},
}
|
sergiopasra/megaradrp | megaradrp/processing/sky.py | Python | gpl-3.0 | 2,585 | 0.002321 | #
# Copyright 2019-2020 Universidad Complutense de Madrid
#
# This file is part of Megara DRP
#
# SPDX-License-Identifier: GPL-3.0+
# License-Filename: LICENSE.txt
#
import logging
import numpy
import megaradrp.instrument.focalplane as fp
from numina.frame.utils import copy_img
def subtract_sky(img, ignored_sky_bundles=None, logger=None):
# Sky subtraction
if logger is None:
logger = logging.getLogger(__name__)
logger.info('obtain fiber information')
sky_img = copy_img(img)
final_img = copy_img(img)
fp_conf = fp.FocalPlaneConf.from_img(sky_img)
# Sky fibers
skyfibs = fp_conf.sky_fibers(valid_only=True,
ignored_bundles=ignored_sky_bundles)
logger.debug('sky fibers are: %s', skyfibs)
# Create empty sky_data
target_data = img[0].data
target_map = img['WLMAP'].data
sky_data = numpy.zeros_like(img[0].data)
sky_map = numpy.zeros_like(img['WLMAP'].data)
sky_img[0].data = sky_data
for fibid in skyfibs:
rowid = fibid - 1
sky_data[rowid] = target_data[rowid]
sky_map[rowid] = target_map[rowid]
# Sum
coldata = sky_data.sum(axis=0)
colsum = sky_map.sum(axis=0)
# Divide only where map is > 0
mask = colsum > 0
avg_sky = numpy.zeros_like(coldata)
avg_sky[mask] = coldata[mask] / colsum[mask]
# This should be done only on valid fibers
logger.info('ignoring invalid fibers: %s', fp_conf.invalid_fibers())
for fibid in fp_conf.valid_fibers():
rowid = fibid - 1
final_img[0].data[rowid, mask] = img[0].data[rowid, mask] - avg_sky[mask]
# Update headers
#
return final_img, img, | sky_img
def subtract_sky_rss(img, sky_img, ignored_sky_bundles=None, logger=None):
"""Subtract a sky image from an image""" |
# Sky subtraction
if logger is None:
logger = logging.getLogger(__name__)
#logger.info('obtain fiber information')
final_img = copy_img(img)
# fiberconf_sky = dm.get_fiberconf(sky_img)
# fiberconf_target = dm.get_fiberconf(img)
logger.debug('using WLMAP extension to compute valid regions')
v_map = img['WLMAP'].data > 0
sky_map = numpy.zeros_like(img['WLMAP'].data)
sky_data = sky_img[0].data
sky_map[:] = v_map[:]
# This should be done only on valid fibers
#logger.info('ignoring invalid fibers: %s', fiberconf_target.invalid_fibers())
final_img[0].data[v_map] = img[0].data[v_map] - sky_data[v_map]
final_img[0].data[~v_map] = 0.0
# Update headers
#
return final_img, img, sky_img |
texastribune/txlege84 | txlege84/bills/views.py | Python | mit | 1,812 | 0 | from django.http import JsonResponse
from django.views.generic import DetailView, ListView, TemplateView
from bills.mixins import AllSubjectsMixin
from core.mixins import ConveneTimeMixin
from committees.mixins import AllCommitteesMixin
from legislators.mixins import AllLegislatorsMixin, ChambersMixin
from bills.models import Bill, Subject
class BillDetail(AllSubjectsMixin, AllLegislatorsMixin,
ConveneTimeMixin, DetailView):
model = Bill
template_name = 'pages/bill.html'
class NewLawsListDetail(AllSubjectsMixin, AllLegislatorsMixin,
ConveneTimeMixin, ListView):
queryset = Bill.objects.filter(became_law__isnull=False)
template_name = 'pages/new-laws.html'
class VetoedListDetail(AllSubjectsMixin, AllLegislatorsMixin,
ConveneTimeMixin, ListView):
queryset = Bill.objects.filter(vet | oed__isnull=False)
template_name = 'pages/vetoed-bills.html'
class SubjectDetail(AllSubjectsMixin, AllLegislatorsMixin,
ConveneTimeMixin, DetailView):
model = Subject
template_name = 'pages/subject.html'
class SubjectListDetail(ConveneTimeMixin, ListView):
model = Subject
template_name = 'pages/subject-list.html'
class BillSearchView(AllLegisl | atorsMixin, AllSubjectsMixin,
AllCommitteesMixin, ConveneTimeMixin, TemplateView):
template_name = 'pages/find-bills.html'
class BillSearchJson(ListView):
queryset = Bill.objects.all().values('name', 'slug')
def render_to_response(self, context, **kwargs):
return JsonResponse(list(context['object_list']), safe=False)
class LegeStreamDetail(AllSubjectsMixin, AllLegislatorsMixin,
ChambersMixin, ConveneTimeMixin, TemplateView):
template_name = 'pages/legestream.html'
|
bradhowes/pyslimp3 | server/ClientPersistence.py | Python | gpl-3.0 | 3,504 | 0.015126 | #
# Copyright (C) 2009 Brad Howes.
#
# This file is part of Pyslimp3.
#
# Pyslimp3 is free software; you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation; either version 3, or (at your option) any later version.
#
# Pyslimp3 is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# Pyslimp3; see the file COPYING. If not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
# USA.
#
import cPickle
from datetime import datetime
import Client
import Settings
#
# Maintainer of client settings. Uses Python's cPickle module to save/restore
# setting dictionaries for Client objects. Writes to disk when a setting
# changes.
#
class ClientPersistence( object ):
def __init__( self, path = 'pyslimp3.pkl' ):
self.path = path # Location of the pickle file to use
self.clients = {} # Mapping of active Client objects
self.settings = {} # Mapping of available Settings objects
self.restore()
#
| # If any Client settings have changed, write the entire collection to disk.
# Also, detect any Client objects that have not received any heartbeats
# since some amount of time and cull them.
#
def save( self ):
| now = datetime.now()
changed = False
stale = []
#
# Visit all of the clients looking for changed state or staleness
#
for key, client in self.clients.items():
if client.settingsAreDirty():
changed = True
elif client.isStale( now ):
print( '*** detected stale client', key )
stale.append( key )
for key in stale:
client = self.clients[ key ]
client.close()
del self.clients[ key ]
if changed:
print( '... saving updated Client settings' )
cPickle.dump( self.settings, open( self.path, 'wb' ) )
#
# Read the last saved collection of Settings objects.
#
def restore( self ):
try:
self.settings = cPickle.load( open( self.path ) )
print( '... restored', len( self.settings ), 'settings:',
self.settings.keys() )
except IOError:
pass
#
# For a given host/port IP address pair, locate a Client object. If none
# found, create a new one.
#
def getClient( self, server, addr ):
key = addr[ 0 ]
client = self.clients.get( key, None )
if client is None:
#
# Get the settings object last seen for this address. If not found,
# create a new one with default values.
#
settings = self.settings.get( key, None )
if settings is None:
settings = Settings.Settings()
self.settings[ key ] = settings
#
# Create new Client object for the address.
#
print( 'getClient:', addr )
client = Client.Client( server, addr, settings )
self.clients[ key ] = client
else:
client.setHardwareAddress( addr )
return client
|
openwns/wrowser | openwns/wrowser/probeselector/DirectoryReaders.py | Python | gpl-2.0 | 2,528 | 0.001978 | ###############################################################################
# This file is part of openWNS (open Wireless Network Simulator)
# _____________________________________________________________________________
#
# Copyright (C) 2004-2007
# Chair of Communication Networks (ComNets)
# Kopernikusstr. 16, D-52074 Aachen, Germany
# phone: ++49-241-80-27910,
# fax: ++49-241-80-22242
# email: info@openwns.org
# www: http://www.openwns.org
# _____________________________________________________________________________
#
# openWNS is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License version 2 as published by the
# Free Software Foundation;
#
# openWNS is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from FileReaders import ProbeReader, ScenarioReader
import Repr | esentations
import os
class CampaignReader:
def __init__(self,
directory,
progressNotify = None):
self.path | = os.path.abspath(directory)
self.progressNotify = progressNotify
self.stopped = False
def stop(self):
self.stopped = True
def read(self):
directories = [self.path]
for base, subDirs, files in os.walk(self.path):
if callable(self.progressNotify):
self.progressNotify(1, 1000, "scanning\n" + base)
for subDir in subDirs:
directories += [os.path.join(base, subDir)]
scenarios = []
maxIndex = len(directories)
for index, directory in enumerate(directories):
if self.stopped :
break
Scenario = Representations.Scenario
scenario = Scenario(ScenarioReader(os.path.abspath(directory)),
{"directory": directory},
True)
if len(scenario.probes) > 0:
scenarios.append(scenario)
if callable(self.progressNotify):
self.progressNotify(index + 1, maxIndex, "reading\n" + directory)
return scenarios, ["directory"]
|
LodewijkSikkel/paparazzi | sw/tools/parrot/parrot_utils.py | Python | gpl-2.0 | 2,820 | 0.00461 | #
# Copyright (C) 2012-2014 T | he Paparazzi Team
#
# This file is part of Paparazzi.
#
# Paparazzi is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# Paparazzi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILIT | Y or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with paparazzi; see the file COPYING. If not, see
# <http://www.gnu.org/licenses/>.
#
from __future__ import print_function
import socket
import telnetlib
import sys
from ftplib import FTP
import ftplib
# Check if IP is valid
def is_ip(address):
try:
socket.inet_aton(address)
ip = True
except socket.error:
ip = False
return ip
# Helper function
def split_into_path_and_file(name):
if name.count('/') <= 0:
return ["./", name]
return name.rsplit('/', 1)
# Execute a command
def execute_command(tn, command):
tn.write(command + '\n')
return tn.read_until('# ')[len(command) + 2:-4]
# Check the version
def check_version(tn, directory):
return execute_command(tn, 'cat ' + directory + '/version.txt')
# Check what currently is running on the drone
def check_running(tn):
ps_aux = execute_command(tn, 'ps')
running = ""
if 'program.elf' in ps_aux:
running += ' Native (program.elf),'
if 'dragon-prog' in ps_aux:
running += ' Native (dragon-prog),'
if 'ap.elf' in ps_aux:
running += ' Paparazzi (ap.elf),'
if 'gst-launch' in ps_aux:
running += ' GStreamer (gst-launch)'
return running[1:]
# Check the filesystem
def check_filesystem(tn):
return execute_command(tn, 'df -h')
# Reboot the drone
def reboot(tn):
execute_command(tn, 'reboot')
# Upload ftp and catch memory-full error
def uploadfile(ftp, filename, content):
try:
ftp.storbinary("STOR " + filename, content)
except ftplib.error_temp:
print("FTP UPLOAD ERROR: Uploading FAILED: Probably your ARDrone memory is full.")
sys.exit()
except:
print("FTP UPLOAD ERROR: Maybe your ARDrone memory is full?", sys.exc_info()[0])
sys.exit()
# Connect with telnet and ftp, wait until login
def connect(host):
try:
tn = telnetlib.Telnet(host, timeout=3)
ftp = FTP(host)
ftp.login()
tn.read_until('# ')
return tn, ftp
except:
print('Could not connect to Parrot UAV (host: ' + host + ')')
exit(2)
# Close the telnet and ftp
def disconnect(tn, ftp):
tn.close()
ftp.close()
|
AtsushiSakai/PyAdvancedControl | inverted_pendulum_mpc_control/inverted_pendulum_mpc_control.py | Python | mit | 4,100 | 0 | """
Inverted Pendulum MPC control
author: Atsushi Sakai
"""
import matplotlib.pyplot as plt
i | mport numpy as np
import math
import time
import cvxpy
print("cvxpy version:", cvxpy.__version__)
l_bar = 2.0 # length of bar
M = 1.0 # [kg]
m = 0.3 # [kg]
g = 9.8 # [m/s^2]
Q = np.diag([0.0, 1.0, 1.0, 0.0])
R = np.diag([0.01])
nx = 4 # number of state
nu = 1 | # number of input
T = 30 # Horizon length
delta_t = 0.1 # time tick
animation = True
def main():
x0 = np.array([
[0.0],
[0.0],
[0.3],
[0.0]
])
x = np.copy(x0)
for i in range(50):
ox, dx, otheta, dtheta, ou = mpc_control(x)
u = ou[0]
x = simulation(x, u)
if animation:
plt.clf()
px = float(x[0])
theta = float(x[2])
show_cart(px, theta)
plt.xlim([-5.0, 2.0])
plt.pause(0.001)
def simulation(x, u):
A, B = get_model_matrix()
x = np.dot(A, x) + np.dot(B, u)
return x
def mpc_control(x0):
x = cvxpy.Variable((nx, T + 1))
u = cvxpy.Variable((nu, T))
A, B = get_model_matrix()
cost = 0.0
constr = []
for t in range(T):
cost += cvxpy.quad_form(x[:, t + 1], Q)
cost += cvxpy.quad_form(u[:, t], R)
constr += [x[:, t + 1] == A * x[:, t] + B * u[:, t]]
# print(x0)
constr += [x[:, 0] == x0[:, 0]]
prob = cvxpy.Problem(cvxpy.Minimize(cost), constr)
start = time.time()
prob.solve(verbose=False)
elapsed_time = time.time() - start
print("calc time:{0} [sec]".format(elapsed_time))
if prob.status == cvxpy.OPTIMAL:
ox = get_nparray_from_matrix(x.value[0, :])
dx = get_nparray_from_matrix(x.value[1, :])
theta = get_nparray_from_matrix(x.value[2, :])
dtheta = get_nparray_from_matrix(x.value[3, :])
ou = get_nparray_from_matrix(u.value[0, :])
return ox, dx, theta, dtheta, ou
def get_nparray_from_matrix(x):
"""
get build-in list from matrix
"""
return np.array(x).flatten()
def get_model_matrix():
# Model Parameter
A = np.array([
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, m * g / M, 0.0],
[0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, g * (M + m) / (l_bar * M), 0.0]
])
A = np.eye(nx) + delta_t * A
B = np.array([
[0.0],
[1.0 / M],
[0.0],
[1.0 / (l_bar * M)]
])
B = delta_t * B
return A, B
def flatten(a):
return np.array(a).flatten()
def show_cart(xt, theta):
cart_w = 1.0
cart_h = 0.5
radius = 0.1
cx = np.matrix([-cart_w / 2.0, cart_w / 2.0, cart_w /
2.0, -cart_w / 2.0, -cart_w / 2.0])
cy = np.matrix([0.0, 0.0, cart_h, cart_h, 0.0])
cy += radius * 2.0
cx = cx + xt
bx = np.matrix([0.0, l_bar * math.sin(-theta)])
bx += xt
by = np.matrix([cart_h, l_bar * math.cos(-theta) + cart_h])
by += radius * 2.0
angles = np.arange(0.0, math.pi * 2.0, math.radians(3.0))
ox = [radius * math.cos(a) for a in angles]
oy = [radius * math.sin(a) for a in angles]
rwx = np.copy(ox) + cart_w / 4.0 + xt
rwy = np.copy(oy) + radius
lwx = np.copy(ox) - cart_w / 4.0 + xt
lwy = np.copy(oy) + radius
wx = np.copy(ox) + float(bx[0, -1])
wy = np.copy(oy) + float(by[0, -1])
plt.plot(flatten(cx), flatten(cy), "-b")
plt.plot(flatten(bx), flatten(by), "-k")
plt.plot(flatten(rwx), flatten(rwy), "-k")
plt.plot(flatten(lwx), flatten(lwy), "-k")
plt.plot(flatten(wx), flatten(wy), "-k")
plt.title("x:" + str(round(xt, 2)) + ",theta:" +
str(round(math.degrees(theta), 2)))
plt.axis("equal")
def visualize_test():
# x = 1.0
# theta = math.radians(10.0)
# show_cart(x, theta)
# plt.show()
angles = np.arange(-math.pi / 2.0, math.pi / 2.0, math.radians(1.0))
xl = [2.0 * math.cos(i) for i in angles]
for x, theta in zip(xl, angles):
plt.clf()
show_cart(x, theta)
plt.pause(0.001)
if __name__ == '__main__':
main()
# visualize_test()
|
GirlsCodePy/girlscode-coursebuilder | modules/assessment_tags/questions.py | Python | gpl-3.0 | 14,631 | 0.000478 | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for implementing question tags."""
__author__ = 'sll@google.com (Sean Lip)'
import base64
import logging
import os
import jinja2
import appengine_config
from common import jinja_utils
from common import schema_fields
from common import tags
from models import custom_modules
from models import models as m_models
from models import resources_display
from models import transforms
RESOURCES_PATH = '/modules/assessment_tags/resources'
@appengine_config.timeandlog('render_question', duration_only=True)
def render_question(
quid, instanceid, embedded=False, weight=None, progress=None):
"""Generates the HTML for a question.
Args:
quid: String. The question id.
instanceid: String. The unique reference id for the question instance
(different instances of the same question in a page will have
different instanceids).
embedded: Boolean. Whether this question is embedded within a container
object.
weight: number. The weight to be used when grading the question in a
scored lesson. This value is cast to a float and, if this cast
fails, defaults to 1.0.
progress: None, 0 or 1. If None, no progress marker should be shown. If
0, a 'not-started' progress marker should be shown. If 1, a
'complete' progress marker should be shown.
Returns:
a Jinja markup string that represents the HTML for the question.
"""
try:
question_dto = m_models.QuestionDAO.load(quid)
except Exception: # pylint: disable=broad-except
logging.exception('Invalid question: %s', quid)
return '[Invalid question]'
| if not question_dto:
return '[Question deleted]'
if weight is Non | e:
weight = 1.0
else:
try:
weight = float(weight)
except ValueError:
weight = 1.0
template_values = question_dto.dict
template_values['embedded'] = embedded
template_values['instanceid'] = instanceid
template_values['resources_path'] = RESOURCES_PATH
if progress is not None:
template_values['progress'] = progress
template_file = None
js_data = {
'quid': quid
}
if question_dto.type == question_dto.MULTIPLE_CHOICE:
template_file = 'templates/mc_question.html'
multi = template_values['multiple_selections']
template_values['button_type'] = 'checkbox' if multi else 'radio'
choices = [{
'text': choice['text'], 'score': choice['score'],
'feedback': choice.get('feedback')
} for choice in template_values['choices']]
js_data['choices'] = choices
js_data['defaultFeedback'] = template_values.get('defaultFeedback')
js_data['permuteChoices'] = (
template_values.get('permute_choices', False))
js_data['showAnswerWhenIncorrect'] = (
template_values.get('show_answer_when_incorrect', False))
js_data['allOrNothingGrading'] = (
template_values.get('all_or_nothing_grading', False))
elif question_dto.type == question_dto.SHORT_ANSWER:
template_file = 'templates/sa_question.html'
js_data['graders'] = template_values['graders']
js_data['hint'] = template_values.get('hint')
js_data['defaultFeedback'] = template_values.get('defaultFeedback')
# The following two lines are included for backwards compatibility with
# v1.5 questions that do not have the row and column properties set.
template_values['rows'] = template_values.get(
'rows',
resources_display.SaQuestionConstants.DEFAULT_HEIGHT_ROWS)
template_values['columns'] = template_values.get(
'columns',
resources_display.SaQuestionConstants.DEFAULT_WIDTH_COLUMNS)
else:
return '[Unsupported question type]'
# Display the weight as an integer if it is sufficiently close to an
# integer. Otherwise, round it to 2 decimal places. This ensures that the
# weights displayed to the student are exactly the same as the weights that
# are used for grading.
weight = (int(round(weight)) if abs(weight - round(weight)) < 1e-6
else round(weight, 2))
template_values['displayed_weight'] = weight
if not embedded:
js_data['weight'] = float(weight)
template_values['js_data'] = base64.b64encode(transforms.dumps(js_data))
template = jinja_utils.get_template(
template_file, [os.path.dirname(__file__)])
return jinja2.utils.Markup(template.render(template_values))
class QuestionTag(tags.BaseTag):
"""A tag for rendering questions."""
binding_name = 'question'
def get_icon_url(self):
return '/modules/assessment_tags/resources/question.png'
@classmethod
def name(cls):
return 'Question'
@classmethod
def vendor(cls):
return 'gcb'
def render(self, node, handler):
"""Renders a question."""
quid = node.attrib.get('quid')
weight = node.attrib.get('weight')
instanceid = node.attrib.get('instanceid')
progress = None
if (hasattr(handler, 'student') and not handler.student.is_transient
and not handler.lesson_is_scored):
progress = handler.get_course().get_progress_tracker(
).get_component_progress(
handler.student, handler.unit_id, handler.lesson_id,
instanceid)
html_string = render_question(
quid, instanceid, embedded=False, weight=weight,
progress=progress)
return tags.html_string_to_element_tree(html_string)
def get_schema(self, handler):
"""Get the schema for specifying the question."""
reg = schema_fields.FieldRegistry('Question')
if handler is None:
reg.add_property(schema_fields.SchemaField(
'quid', 'Question', 'string', optional=True, i18n=False))
reg.add_property(schema_fields.SchemaField(
'weight', 'Weight', 'number', optional=True, i18n=False))
return reg
reg.add_property(schema_fields.SchemaField(
'quid', None, 'string', hidden=True, optional=True, i18n=False))
reg.add_property(schema_fields.SchemaField(
'qu_type', None, 'string', hidden=True, optional=True, i18n=False))
reg.add_property(schema_fields.SchemaField(
'weight', None, 'number', hidden=True, optional=True, i18n=False))
select_schema = schema_fields.FieldRegistry(
'Select',
extra_schema_dict_values={'className': 'select-container'})
question_list = [(
unicode(q.id), # q.id is an int; schema requires a string
q.description) for q in m_models.QuestionDAO.get_all()]
if question_list:
select_schema.add_property(
schema_fields.SchemaField(
'quid', 'Question', 'string', optional=True, i18n=False,
select_data=[
('', '-- Select Existing Question --')] + question_list
))
else:
select_schema.add_property(
schema_fields.SchemaField(
'unused_id', '', 'string', optional=True,
editable=False, extra_schema_dict_values={
'value': 'No questions available'}))
course = handler.get_course()
mc_schema = resources_display.ResourceMCQuestion.get_schema(
course, None, f |
sbarbett/ssp-sdk-python | src/blacklists.py | Python | apache-2.0 | 1,822 | 0.017014 | # Copyright 2017 NeuStar, Inc.All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import jso | n
from .blacklist_id import BlacklistId
from .sponsor import Sponsor
from .account import Account
class Blacklists:
def __init__(self, connection, base_uri):
self.connection = connection
self.base_uri = base_uri+"/blacklists"
def get(self):
"""Get a list of blacklists."""
return self.connection.get(self.base_uri)
def post(self, account_id, name, **kwargs):
"""Create a new Blacklist.
Arguments:
account_id -- The account ID associated with the blacklist.
name -- Th | e name of the blacklist.
Keyword Arguments:
sponsorId -- The sponsor ID associated with the blacklist (only required for NeustarAdmins)
description -- A description of the blacklist.
"""
properties = {"accountId": account_id, "name": name}
if kwargs is not None:
properties.update(kwargs)
return self.connection.post(self.base_uri, json.dumps(properties))
def blacklist_id(self, blacklist_id):
"""Create a Blacklist Id object."""
return BlacklistId(self.connection, self.base_uri, blacklist_id)
def sponsor(self):
"""Create a Sponsor object."""
return Sponsor(self.connection, self.base_uri)
def account(self):
"""Create an Account object."""
return Account(self.connection, self.base_uri) |
TieWei/openstack-kit | openstackkit/ping_working_public.py | Python | mit | 5,255 | 0.000761 | #! /usr/bin/python
# @author: wtie
import subprocess
import sys
import time
import argparse
DIFF = False
FIRST = []
def get_floating_ips():
sql = """SELECT fip.floating_ip_address
FROM neutron.floatingips | AS fip
JOIN neutron.ports AS p
JOIN neutron.securitygroupportbindings AS sgb
JOIN neutron.securitygrouprules AS sgr
JOIN
(
SELECT ins.uuid ,
Count(p.id) AS count
FROM nova.instances AS ins
JOIN neutron.ports AS p
where ins.uuid=p.device_id
AND ins.deleted=0
AND ins.vm_state='active'
AND | ins.task_state IS NULL
GROUP BY ins.uuid ) AS i
WHERE fip.fixed_port_id=p.id
AND p.admin_state_up=1
AND sgb.port_id=p.id
AND sgb.security_group_id=sgr.security_group_id
AND sgr.direction='ingress'
AND sgr.protocol='icmp'
AND sgr.remote_ip_prefix='0.0.0.0/0'
AND p.device_id=i.uuid
AND i.count=1;"""
floating_ips = [ip for ip in subprocess.Popen(
["mysql", "-sNe", sql],
stdout=subprocess.PIPE).communicate()[0].split("\n") if ip]
return floating_ips
def get_public_ips(net_uuid):
if not net_uuid:
return None
sql = """SELECT ipa.ip_address
FROM neutron.ports AS p
JOIN neutron.ipallocations AS ipa
JOIN neutron.securitygroupportbindings AS sgb
JOIN neutron.securitygrouprules AS sgr
JOIN
(
SELECT ins.uuid ,
Count(p.id) AS count
FROM nova.instances AS ins
JOIN neutron.ports AS p
where ins.uuid=p.device_id
AND ins.deleted=0
AND ins.vm_state='active'
AND ins.task_state IS NULL
GROUP BY ins.uuid ) AS i
WHERE ipa.network_id='""" + net_uuid + """'
AND ipa.port_id=p.id
AND p.admin_state_up=1
AND p.device_owner LIKE "compute:%"
AND sgb.port_id=p.id
AND sgb.security_group_id=sgr.security_group_id
AND sgr.direction='ingress'
AND sgr.protocol='icmp'
AND sgr.remote_ip_prefix='0.0.0.0/0'
AND p.device_id=i.uuid
AND i.count=1;"""
public_ips = [ip for ip in subprocess.Popen(
["mysql", "-sNe", sql],
stdout=subprocess.PIPE).communicate()[0].split("\n") if ip]
return public_ips
def ping(ip):
return subprocess.call(["ping", "-c", "1", "-w", "1", ip],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def ping_loop(net_uuid=None):
pingable_ips = get_public_ips(net_uuid) if net_uuid else []
pingable_ips += get_floating_ips()
total = len(pingable_ips)
fail_list = []
global DIFF
global FIRST
for ip in pingable_ips:
if DIFF and FIRST and ip in FIRST:
result = "?"
else:
result = ping(ip)
sys.stdout.write(str(result))
sys.stdout.flush()
if result == 1:
fail_list.append(ip)
#simple way to remove duplicate ips, need to improve
fail_list = list(set(fail_list))
if DIFF:
if FIRST:
diff_list = [ip for ip in fail_list if ip not in FIRST]
print "\n@DIFF: [%s] %s/%s: %s" % (total, len(diff_list),
len(fail_list), diff_list)
else:
FIRST = fail_list
print "\nFIRST: [%s] %s/%s: %s" % (total, len(fail_list),
len(fail_list), fail_list)
else:
print "\n[%s] %s: %s" % (total, len(fail_list), fail_list)
return fail_list
def print_report(failed_map, least_interval):
report = {}
for ip in failed_map:
if failed_map[ip] == 1:
pass
if failed_map[ip] in report:
report[failed_map[ip]].append(ip)
else:
report[failed_map[ip]] = [ip]
print "REPORT:\n"
for count in report:
outage = least_interval * (count - 1)
print("~%s :\n %s\n" % (outage, report[count]))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--net_id", help="Include netwrok <net-id>")
parser.add_argument("--diff", action="store_true",
help="Only print diff ips compare with first round",
default=False)
args = parser.parse_args()
public_network_uuid = args.net_id if args.net_id else None
least_interval = 10
if args.diff:
DIFF = True
while True:
try:
start = time.time()
print time.strftime("%x %X")
failed_map = {}
fail_list = ping_loop(public_network_uuid)
for ip in fail_list:
if ip in failed_map:
failed_map[ip] += 1
else:
failed_map[ip] = 1
end = time.time()
if (end-start) < least_interval:
time.sleep(least_interval - (end-start))
except KeyboardInterrupt:
print_report(failed_map,least_interval)
sys.exit(0)
|
MakerLabPC/makerlinks | makerlinks.py | Python | gpl-3.0 | 1,532 | 0.035901 | import os
import sqlite3
from flask import Flask, request, session, g, redirect, url_for, abort, \
render_template, flash
# create makerlinks application
app = Flask(__name__)
app.config.from_object(__name__)
app.config.update(dict(
DATABASE = os.path.join(app.root_path, 'makerlinks-dev.db'),
DEBUG = True,
APP_NAME = "MakerLinks",
SECRET_KEY = 'development key', |
USERNAME = 'admin',
PASSWORD = 'default'
))
app.config.from_envvar('MAKERLINKS_SETTINGS', silent = True)
def connect_db():
rv = sqlite3.connect(app.config['DATABASE'])
rv.row_factory = sqlite3.Row
return rv
def get_db():
if not hasattr(g, 'sqlite_db'):
g.sqlite_db = connect_db()
return g.sqlite_db
def init_db():
with app.app_context():
db = get_db | ()
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
@app.teardown_appcontext
def close_db(error):
if hasattr(g, 'sqlite_db'):
g.sqlite_db.close()
@app.route('/')
def show_links():
db = get_db()
cur = db.execute('select link, submitter from links order by id desc')
links = cur.fetchall()
return render_template('show_links.html', links=links)
@app.route('/add', methods=['POST'])
def add_link():
db = get_db()
db.execute('insert into links (link, submitter) values (?, ?)',
[request.form['link'], request.form['submitter']])
db.commit()
flash('New link was successfully posted')
return redirect(url_for('show_links'))
if __name__ == "__main__":
if not os.path.exists(app.config['DATABASE']):
init_db()
app.run()
|
GHubgenius/Beebeeto-framework | demo/baseframe.py | Python | gpl-2.0 | 4,824 | 0.002695 | #!/usr/bin/env python
# coding=utf-8
# Create: 2014-07-15
# Author: www.beebeeto.com
# Team: n0tr00t security team
import os
import sys
import json
import traceback
from pprint import pprint
from optparse import OptionParser, OptionGroup
from utils import http
BEEBEETO_STATEMENT = \
"This POC is created for security research. "\
"It cannot be used in illegal ways, the user should be responsible for the usage of it."\
"All Rights Reserved by BeeBeeTo.com."
class BaseFrame(object):
poc_info = {
# id/name to be edit by BeeBeeto
'poc': {
'id': None,
'name': None,
'author': 'Beebeeto',
'create_date': '2014-07-15',
},
# to be edit by you
'protocol': {
'name': None, # 'openssl' e.g.
'port': None, # must be int type, 443 e.g.
'layer4_protocol': ['tcp'],
},
# to be edit by you
'vul': {
'app_name': None,
'vul_version': None,
'type': None,
'tag': [],
'desc': None,
'references': [],
},
}
def __init__(self, run_in_shell=True):
if run_in_shell:
self._init_parser()
self.run_in_shell = run_in_shell
def _init_parser(self, do_parse=True):
usage = 'usage: %prog [options] arg1 arg2'
self.base_parser = OptionParser(usage=usage, description=BEEBEETO_STATEMENT)
self.user_parser = OptionGroup(self.base_parser,
title='POC Specified Options',
description='These options are specified by the author'
' of this poc, so they are available'
' only in this poc.')
self.base_parser.add_option_group(self.user_parser)
self.__init_base_parser()
self._init_user_parser()
if do_parse:
(self.options, self.args) = self.base_parser.parse_args()
| if not self.options.target:
print '\n[*] No target input!\n'
self.base_parser.print_help()
sys.exit()
def __init_base_parser(self):
self.base_parser.add_option('-t', '--target', | action='store', dest='target',
default=None, help='the target to be checked by this poc.')
self.base_parser.add_option('-v', '--verify',
action='store_true', dest='verify', default=True,
help='run poc in verify mode.')
self.base_parser.add_option('-e', '--exploit',
action='store_false', dest='verify',
help='run poc in exploit mode.')
self.base_parser.add_option('--verbose', action='store_true', dest='verbose',
default=False, help='print verbose debug information.')
self.base_parser.add_option('--info', action='callback', callback=self.__cb_print_poc_info,
help='print poc information.')
def _init_user_parser(self):
#self.user_parser.add_option('-x', help='example')
pass
def __cb_print_poc_info(self, option, opt, value, parser):
print(json.dumps(self.poc_info, ensure_ascii=False, indent=2))
sys.exit()
@classmethod
def normalize_target(cls, target):
if cls.poc_info['protocol']['name'] == 'http':
return http.normalize_url(target)
elif cls.poc_info['protocol']['name'] == 'https':
return http.normalize_url(target, https=True)
else:
return target
def run(self, options=None, debug=False):
options = self.options.__dict__ if self.run_in_shell else options
options['target'] = self.normalize_target(options['target'])
args = {
'options': options,
'success': False,
'poc_ret': {},
}
result = {}
try:
if options['verify']:
args = self.verify(args)
else:
args = self.exploit(args)
result.update(args)
except Exception, err:
if debug:
traceback.print_exc()
sys.exit()
result.update(args)
result['exception'] = str(err)
return result
@classmethod
def verify(cls, args):
'''
main code here.
'''
return args
@classmethod
def exploit(cls, args):
'''
main code here.
'''
return args
if __name__ == '__main__':
from pprint import pprint
bf = BaseFrame()
pprint(bf.run()) |
alanmcruickshank/superset-dev | superset/migrations/versions/ad82a75afd82_add_query_model.py | Python | apache-2.0 | 2,354 | 0.002974 | """Update models to support storing the queries.
Revision ID: ad82a75afd82
Revises: f162a1dea4c4
Create Date: 2016-07-25 17:48:12.771103
"""
# revision identifiers, used by Alembic.
revision = 'ad82a75afd82'
down_revision = 'f162a1dea4c4'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table('query',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('client_id', sa.String(length=11), nullable=False),
sa.Column('database_id', sa.Integer(), nullable=False),
sa.Column('tmp_table_name', sa.String(length=256), nullable=True),
sa.Column('tab_name', sa.String(length=256),nullable=True),
sa.Column('sql_editor_id', sa.String(length=256), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('status', sa.String(length=16), nullable=True),
sa.Column('name', sa.String(length=256), nullable=True),
sa.Column('schema', sa.String(length=256), nullable=True),
sa.Column('sql', sa.Text(), nullable=True),
sa.Column('select_sql', sa.Text(), nullable=True),
sa.Column('executed_sql', sa.Text(), nullable=True),
sa.Column('limit', sa.Integer(), nullable=True),
sa.Column('limit_used', sa.Boolean(), nullable=True),
sa.Column('select_as_cta', sa.Boolean(), nullable=True),
sa.Column('select_as_cta_used', sa.Boolean(), nullable=True),
sa.Column('progress', sa.Integer(), nullable=True),
sa.Column('rows', sa.Integer(), nullable=True),
sa.Column('error_message', sa.Text(), nullable=True),
sa.Column('start_time', sa.Numeric(precision=20, scale=6), nullable=True),
sa.Column('changed_on', sa.DateTime(), nullable=True),
sa.Colum | n('end_time', sa.Numeric(precision=20, scale=6), nullable=True),
sa.ForeignKeyConstraint(['database_id'], [u'dbs.id'], ),
sa.ForeignKeyConstraint(['user_id'], [u'ab_user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.add_column('dbs', sa.Column('select_as_create_table_as', sa.Boolean(),
nullable=True))
| op.create_index(
op.f('ti_user_id_changed_on'),
'query', ['user_id', 'changed_on'], unique=False)
def downgrade():
op.drop_table('query')
op.drop_column('dbs', 'select_as_create_table_as')
|
jwparker1797/USAR_Add_In | Install/Data/GP_tools/HQIIS_report.py | Python | gpl-3.0 | 9,879 | 0.014576 | ##############################################################
##HQIIS Updater Version 2.0
##
##*Changes for v2.0
## -Upgraded tool to work with SDSFIE 3.1 Army Adaptation
##
##This script takes data from an HQIIS excel file and can run a report from a USAR SDSFIE 3.1 geodatabase.
##
##This script only works on GIS records with the correct "rpuid".
##
##
##Created 27 July 2015.
##
## Created and tested on:
## Windows 7 (64 bit) operating system
## ESRI ArcGIS 10.2
## Python 2.7.3
##
##Author:
##Jesse Parker
##USAR ARIMD
##650.526.9418
##############################################################
def HQIIS_Report(params):
import arcpy, sys, os
from helper_functions.get_data_file import get_data_file
database = params[0].valueAsText
report_location = params[1].valueAsText
report_name = params[2].valueAsText
siteuid = params[3].value
open_report = params[4].value
# ##Split excel path
# path = os.path.split(HQIIS_excel_raw)
# HQIIS_excel = path[1]
# HQIIS_excel_location = path[0]
#
# ##Set the workspace to the folder containing the HQIIS excel file
# arcpy.env.workspace = HQIIS_excel_location
# arcpy.env.overwriteOutput = True
#
# try:
# ##Convert the HQIIS excel to .dbf
# arcpy.AddMessage("Converting excel...")
# HQIIS = arcpy.ExcelToTable_conversion(HQIIS_excel,"HQIIS_table.dbf")
# arcpy.AddMessage("Finished Converting...")
# except Exception as e:
# arcpy.AddMessage("Failed to convert HQIIS excel file.")
# sys.exit(arcpy.AddMessage(e.message))
HQIIS = get_data_file("Data.gdb\\HQIIS")
#HQIIS = os.path.split(__file__)[0] + r"\data\HQIIS.dbf"
work_folder = os.path.split(database)[0]
try:
##Create HQIIS report
arcpy.AddMessage("Creating report file...")
arcpy.env.workspace = report_location
arcpy.env.overwriteOutput = True
HQIIS_report = arcpy.TableToTable_conversion(HQIIS, work_folder, report_name + ".dbf")
fields = [f.name for f in arcpy.ListFields(HQIIS_report)]
arcpy.DeleteField_management(HQIIS_report, [fields[1],fields[2],fields[3],fields[4],fields[6],fields[10],fields[11],fields[13],fields[14],fields[15],fields[16],fields[17],fields[18],fields[19],fields[20],fields[21],fields[22],fields[23],fields[24],fields[25],fields[26],fields[29],fields[30],fields[33],fields[34],fields[35],fields[36],fields[37],fields[38],fields[39],fields[40],fields[41]])
arcpy.Ad | dField_management(HQIIS_report, "GIS_QTY","FLOAT")
arcpy.AddField_management(HQIIS_report, "GIS_UOM","TEXT")
if siteuid != 0:
site_uid_delim = arcpy.AddFieldDelimiters (HQIIS_report, "SITE_UID")
arcpy.TableSelect_analysis(HQIIS_report,report_name + "_" + "site" + ".dbf", site_uid_delim + " = " + str(siteuid))
arcpy.arcpy.Delete_management(HQIIS_report)
HQIIS_repor | t = report_location + os.sep + report_name + "_" + "site" + ".dbf"
arcpy.env.workspace = database
arcpy.env.overwriteOutput = True
##Generate Report
arcpy.AddMessage("Generating report...")
datasetlist = arcpy.ListDatasets("*", "Feature")
for dataset in datasetlist:
FC_list = arcpy.ListFeatureClasses("*","",dataset)
for FC in FC_list:
try:
#Skip centerline featureclasses for report
fc_list_wo_current = [fc for fc in FC_list if fc != FC]
if "Centerline" in FC or (FC[:-2] in [s[:-2] for s in fc_list_wo_current] and FC[-1] == 'L'):
continue
desc = arcpy.Describe(FC)
shape_type = desc.shapeType
fields = [f.name for f in arcpy.ListFields(FC)]
##Check for feature class shape type
if shape_type == "Polygon":
##Summarize the stats of the feature class
arcpy.Statistics_analysis (FC, work_folder +"\sum_stat.dbf", [["featureArea","SUM"], ["featureAreaUOM","FIRST"]], "rpuid")
with arcpy.da.SearchCursor(work_folder +"\sum_stat.dbf",["rpuid","SUM_featur","FIRST_feat"]) as cursor:
for row in cursor:
##Cursor through the summary to collect values
if row[0] != "":
try:
rpuid = int(row[0])
except:
rpuid = row[0]
rpuid = str(rpuid)
qty = row[1]
uom = row[2]
if uom in ["YD2","SYD","squareYard"]:
uom = "SY"
if uom in ["FT2","SFT","squareFoot",]:
uom = "SF"
##update report with collected values from the summary
with arcpy.da.UpdateCursor(HQIIS_report,["RPA_UID","GIS_QTY","GIS_UOM"]) as cursor:
for row in cursor:
if str(row[0]) == rpuid:
row[1] += qty
row[2] += uom
cursor.updateRow(row)
elif shape_type == "Polyline":
arcpy.Statistics_analysis (FC, work_folder +"\sum_stat.dbf", [["featureLength","SUM"], ["featureLengthUOM","FIRST"]], "rpuid")
with arcpy.da.SearchCursor(work_folder +"\sum_stat.dbf",["rpuid","SUM_featur","FIRST_feat"]) as cursor:
for row in cursor:
##Cursor through the summary to collect values
if row[0] != "":
try:
rpuid = int(row[0])
except:
rpuid = row[0]
rpuid = str(rpuid)
qty = row[1]
uom = row[2]
if uom in ["FT","foot"]:
uom = "LF"
if uom in ["YD","yard"]:
uom = "YD"
##update report with collected values from the summary
with arcpy.da.UpdateCursor(HQIIS_report,["RPA_UID","GIS_QTY","GIS_UOM"]) as cursor:
for row in cursor:
if str(row[0]) == rpuid:
row[1] += qty
row[2] += uom
cursor.updateRow(row)
elif shape_type == "Point":
arcpy.Statistics_analysis(FC, work_folder + "\sum_stat.dbf", [["rpuid","COUNT"]], "rpuid")
with arcpy.da.SearchCursor(work_folder +"\sum_stat.dbf",["rpuid","COUNT_rpui"]) as cursor:
for row in cursor:
##Cursor through the summary to collect values
if row[0] != "":
try:
rpuid = int(row[0])
except:
rpuid = row[0]
rpuid = str(rpuid)
qty = row[1]
##update report with collected values from the summary
with arcpy.da.UpdateCursor(HQIIS_report,["RPA_UID","GIS_QTY","GIS_UOM"]) as cursor:
|
alcides/rdflib | test/test_sparql/test_not_equals.py | Python | bsd-3-clause | 1,072 | 0.007463 | from rdflib.term import URIRef, Literal
from rdflib.namespace import Namespace, RDF, RDFS
from rdflib import plugin
from rdflib.parser import StringInputSource
from rdflib.graph import Graph, ReadOnlyGraphAggregate, ConjunctiveGraph
import sys
from pprint import pprint
def testSPARQLNotEquals():
NS = u"http://example.org/"
graph = ConjunctiveGraph()
graph.parse(StringInputSource("""
@prefix : <http://example.org/> .
@prefix rdf: <%s> .
:foo rdf:value 1.
:bar rdf:value 2.""" % RDF.uri), format="n3")
rt = graph.query("""SELECT ?node
WHERE {
| ?node rdf:value ?val.
FILTER (?val != 1)
}""",
initNs={'rdf': RDF.uri},
DEBUG=False)
for row in rt:
#item = row[0]
item = row
assert item == URIRef("http://example.org | /bar"), "unexpected item of '%s'" % repr(item)
if __name__ == '__main__':
testSPARQLNotEquals()
|
east825/green-type | test/test_data/resolve/local/import_chain/main.py | Python | mit | 64 | 0.015625 | import sibling as alias
|
class MyClass(alias.A.Inner):
| pass
|
rackerlabs/django-DefectDojo | dojo/tools/meterian/parser.py | Python | bsd-3-clause | 5,223 | 0.001723 | import json
from datetime import datetime
from dojo.models import Finding
class MeterianParser(object):
def get_scan_types(self):
return ["Meterian Scan"]
def get_label_for_scan_types(self, scan_type):
return scan_type
def get_description_for_scan_ | types(self, scan_type):
return "Meterian JSON report output file can be imported."
def get_findings(self, report, test):
findings = []
report_json = json.load(report)
security_reports = self.get_security_reports(report_json)
scan_date = str(datetime.fromisoformat(report_json["timestamp"]).date())
for single_security_report in security_reports | :
findings += self.do_get_findings(single_security_report, scan_date, test)
return findings
def get_security_reports(self, report_json):
if "reports" in report_json:
if "security" in report_json["reports"]:
if "reports" in report_json["reports"]["security"]:
return report_json["reports"]["security"]["reports"]
raise ValueError("Malformed report: the security reports are missing.")
def do_get_findings(self, single_security_report, scan_date, test):
findings = []
language = single_security_report["language"]
for dependency_report in single_security_report["reports"]:
lib_name = dependency_report["dependency"]["name"]
lib_ver = dependency_report["dependency"]["version"]
finding_title = lib_name + ":" + lib_ver
for advisory in dependency_report["advices"]:
severity = self.get_severity(advisory)
finding = Finding(
title=finding_title,
date=scan_date,
test=test,
severity=severity,
severity_justification="Issue severity of: **" + severity + "** from a base " +
"CVSS score of: **" + str(advisory.get('cvss')) + "**",
description=advisory['description'],
component_name=lib_name,
component_version=lib_ver,
false_p=False,
duplicate=False,
out_of_scope=False,
impact=severity,
static_finding=True,
dynamic_finding=False,
file_path="Manifest file",
unique_id_from_tool=advisory['id'],
tags=[language]
)
if 'cve' in advisory:
if "N/A" != advisory["cve"]:
finding.cve = advisory["cve"]
if "cwe" in advisory:
finding.cwe = int(advisory["cwe"].replace("CWE-", ""))
mitigation_msg = "## Remediation\n"
safe_versions = dependency_report["safeVersions"]
if "latestPatch" in safe_versions:
mitigation_msg += "Upgrade " + lib_name + " to version " + safe_versions["latestPatch"] + " or higher."
elif "latestMinor" in safe_versions:
mitigation_msg += "Upgrade " + lib_name + " to version " + safe_versions["latestMinor"] + " or higher."
elif "latestMajor" in safe_versions:
mitigation_msg += "Upgrade " + lib_name + " to version " + safe_versions["latestMajor"] + "."
else:
mitigation_msg = "We were not able to provide a safe version for this library.\nYou should consider replacing this component as it could be an issue for the safety of your application."
finding.mitigation = mitigation_msg
references = ""
for link in advisory["links"]:
ref_link = self.get_reference_url(link)
if ref_link is not None:
references += "- " + ref_link + "\n"
if references != "":
finding.references = references
findings.append(finding)
return findings
def get_severity(self, advisory):
# Following the CVSS Scoring per https://nvd.nist.gov/vuln-metrics/cvss
if 'cvss' in advisory:
if advisory['cvss'] <= 3.9:
severity = "Low"
elif advisory['cvss'] >= 4.0 and advisory['cvss'] <= 6.9:
severity = "Medium"
elif advisory['cvss'] >= 7.0 and advisory['cvss'] <= 8.9:
severity = "High"
else:
severity = "Critical"
else:
if advisory["severity"] == "SUGGEST" or advisory["severity"] == "NA" or advisory["severity"] == "NONE":
severity = "Info"
else:
severity = advisory["severity"].title()
return severity
def get_reference_url(self, link_obj):
url = link_obj["url"]
if link_obj["type"] == "CVE":
url = "https://cve.mitre.org/cgi-bin/cvename.cgi?name=" + link_obj["url"]
elif link_obj["type"] == "NVD":
url = "https://nvd.nist.gov/vuln/detail/" + link_obj["url"]
return url
|
justinlulejian/fah-gae | lib/google/cloud/storage/__init__.py | Python | mit | 1,604 | 0 | # Copyright 2014 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distri | buted on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shortcut methods for getting set up with Google Cloud S | torage.
You'll typically use these to get started with the API:
>>> from google.cloud import storage
>>> client = storage.Client()
>>> bucket = client.get_bucket('bucket-id-here')
>>> # Then do other things...
>>> blob = bucket.get_blob('/remote/path/to/file.txt')
>>> print(blob.download_as_string())
>>> blob.upload_from_string('New contents!')
>>> blob2 = bucket.blob('/remote/path/storage.txt')
>>> blob2.upload_from_filename(filename='/local/path.txt')
The main concepts with this API are:
- :class:`~google.cloud.storage.bucket.Bucket` which represents a particular
bucket (akin to a mounted disk on a computer).
- :class:`~google.cloud.storage.blob.Blob` which represents a pointer to a
particular entity in Cloud Storage (akin to a file path on a remote
machine).
"""
from google.cloud.storage.batch import Batch
from google.cloud.storage.blob import Blob
from google.cloud.storage.bucket import Bucket
from google.cloud.storage.client import Client
|
jseppi/load-cms-data | load_cms_data.py | Python | mit | 7,893 | 0.003801 | import sys, getopt
import sqlite3
import csv
import os
from create_cms_db import create_cms_db
#TODO: In blog, mention had to save this as CSV because only Excel provided
region_crosswalk_file = 'data/ZipHsaHrr11.csv'
inpatient_file = 'data/Medicare_Provider_Charge_Inpatient_DRG100_FY2011.csv'
outpatient_file = 'data/Medicare_Provider_Charge_Outpatient_APC30_CY2011.csv'
db_name = ''
def print_usage():
print "Usage: $ python load_cms_data.py -d <sqlitedb>"
sys.exit(2)
try:
opts, args = getopt.getopt(sys.argv[1:], 'd:')
except getopt.GetoptError:
print_usage()
for opt, arg in opts:
if opt == '-d':
db_name = arg
if db_name == '':
print_usage()
if not os.path.isfile(db_name):
print "Database <" + db_name + "> not found, creating new with same name."
create_cms_db(db_name)
# ------------------------------------------------------
# First load ZipCode-to-ReferralRegion data from the region crosswalk file
zip_regions = {}
ref_regions = {}
service_areas = {}
# columns = zipcode11 hsanum hsacity hsastate hrrnum hrrcity hrrstate
with open(region_crosswalk_file, 'rb') as csvfile:
r = csv.DictReader(csvfile)
for row in r:
zip_code = str(row['zipcode11']).zfill(5)
hsa_id = int(row['hsanum'])
hrr_id = int(row['hrrnum'])
zip_regions[zip_code] = {
'hsa_id': hsa_id,
'hsa_city': row['hsacity'],
'hsa_state': row['hsastate'],
'hrr_id': hrr_id,
'hrr_city': row['hrrcity'],
'hrr_state': row['hrrstate']
}
service_areas[hsa_id] = {
'city': row['hsacity'],
'state': row['hsastate']
}
ref_regions[hrr_id] = {
'city': row['hrrcity'],
'state': row['hrrstate']
}
# ------------------------------------------------------
# Next, load the drgs and inpatient payment info from the inpatient_file
drgs = {}
providers = {}
inpatient_payments = {}
# inpatient_columns = DRG Definition,Provider Id,Provider Name,Provider Street Address,
# Provider City,Provider State,Provider Zip Code,
# Hospital Referral Region Description,
# Total Discharges , Average Covered Charges , Average Total Payments
with open(inpatient_file, 'rb') as csvfile:
r = csv.reader(csvfile)
r.next() # Skip the header row
curr_inpatient_payment_id = 0
for row in r:
drg_defn = row[0]
drg_id = int(drg_defn.split(' - ')[0])
drg_name = drg_defn.split(' - ')[1]
drgs[drg_id] = {
'name': drg_name
}
provider_id = int(row[1])
provider_name = row[2]
pro | vider_street = row[3]
provider_city = row[4]
provider_state = row[5]
provider_zip = str(row[6]).zfill(5)
# unused: ref_regi | on_name = row[7]
providers[provider_id] = {
'name': provider_name,
'street': provider_street,
'city': provider_city,
'state': provider_state,
'zip': provider_zip
}
num_discharged = int(row[8])
avg_charge = float(row[9])
avg_payment = float(row[10])
inpatient_payments[curr_inpatient_payment_id] = {
'drg_id': drg_id,
'provider_id': provider_id,
'num_discharged': num_discharged,
'avg_charge': avg_charge,
'avg_payment': avg_payment
}
curr_inpatient_payment_id += 1
# ------------------------------------------------------
# Then load the apcs and outpatient payment info from the outpatient_file
apcs = {}
outpatient_payments = {}
# outpatient_columns = APC,Provider Id,Provider Name,Provider Street Address,
# Provider City,Provider State,Provider Zip Code,Hospital Referral Region Description,
# Outpatient Services,Average Estimated Submitted Charges,Average Total Payments
with open(outpatient_file, 'rb') as csvfile:
r = csv.reader(csvfile)
r.next() # Skip the header row
curr_outpatient_payment_id = 0
for row in r:
apc_defn = row[0]
apc_id = int(apc_defn.split(' - ')[0])
apc_name = apc_defn.split(' - ')[1]
apcs[apc_id] = {
'name': apc_name
}
provider_id = int(row[1])
provider_name = row[2]
provider_street = row[3]
provider_city = row[4]
provider_state = row[5]
provider_zip = str(row[6]).zfill(5)
providers[provider_id] = {
'name': provider_name,
'street': provider_street,
'city': provider_city,
'state': provider_state,
'zip': provider_zip
}
# unused: ref_region_name = row[7]
num_discharged = int(row[8])
avg_charge = float(row[9])
avg_payment = float(row[10])
outpatient_payments[curr_outpatient_payment_id] = {
'apc_id': apc_id,
'provider_id': provider_id,
'num_discharged': num_discharged,
'avg_charge': avg_charge,
'avg_payment': avg_payment
}
curr_outpatient_payment_id += 1
# ------------------------------------------------------
# Finally, write it all to the database
with sqlite3.connect(db_name) as conn:
cursor = conn.cursor()
# zip_regions
zip_id = 0
for zipcode, v in zip_regions.iteritems():
cursor.execute("""INSERT INTO zip_regions(
id, zip, hsa_id, hrr_id)
VALUES (?, ?, ?, ?)""",
(zip_id, zipcode, v['hsa_id'], v['hrr_id'])
)
zip_id += 1
# service_areas
for hsa_id, v in service_areas.iteritems():
cursor.execute("""INSERT INTO service_areas(
id, state, city)
VALUES (?, ?, ?)""",
(hsa_id, v['state'], v['city'])
)
# ref_regions
for hrr_id, v in ref_regions.iteritems():
cursor.execute("""INSERT INTO ref_regions(
id, state, city)
VALUES (?, ?, ?)""",
(hrr_id, v['state'], v['city'])
)
# providers
for prov_id, v in providers.iteritems():
cursor.execute("""INSERT INTO providers(
id, name, street, city, state, zip)
VALUES (?, ?, ?, ?, ?, ?)""",
(prov_id, v['name'], v['street'],
v['city'], v['state'], v['zip'])
)
# drgs
for drg_id, v in drgs.iteritems():
cursor.execute("""INSERT INTO drgs(
id, name) VALUES (?, ?)""",
(drg_id, v['name'])
)
# inpatient_payments
for id, v in inpatient_payments.iteritems():
cursor.execute("""INSERT INTO inpatient_payment_info(
id, procedure_id, provider_id, num_discharged,
avg_charge, avg_payment)
VALUES (?, ?, ?, ?, ?, ?)""",
(id, v['drg_id'], v['provider_id'],
v['num_discharged'], v['avg_charge'],
v['avg_payment'])
)
# apcs
for apc_id, v in apcs.iteritems():
cursor.execute("""INSERT INTO apcs(
id, name) VALUES (?, ?)""",
(apc_id, v['name'])
)
# outpatient_payments
for id, v in outpatient_payments.iteritems():
cursor.execute("""INSERT INTO outpatient_payment_info(
id, procedure_id, provider_id, num_discharged,
avg_charge, avg_payment)
VALUES (?, ?, ?, ?, ?, ?)""",
(id, v['apc_id'], v['provider_id'],
v['num_discharged'], v['avg_charge'],
v['avg_payment'])
)
cursor.close()
print "DONE!" |
openpolis/open_municipio | fabfile.sample/conf_production.py | Python | agpl-3.0 | 3,837 | 0.011989 | import os
# IP/domain name of the production server
SERVER_MACHINE = 'www.openmunicipio.it' ## CHANGEME!
# Python interpreter executable to use on virtualenv creation
PYTHON_BIN = 'python' #pyhton 2.7
PYTHON_PREFIX = '' # e.g. ``/usr``, ``/usr/local``; leave empty for default.
PYTHON_FULL_PATH = "%s/bin/%s" % (PYTHON_PREFIX, PYTHON_BIN) if PYTHON_PREFIX else PYTHON_BIN
# exclude patterns for ``rsync`` invocations
RSYNC_EXCLUDE = (
'*~',
'*.pyc',
'settings_*.py',
'urls_*.py',
)
# the name of the Django project managed by this fabfile
PROJECT_NAME = 'open_xxx' ## CHANGEME!
LOCALE = 'it_IT.UTF8'
# a unique identifier for this web application instance
# usually it's set to the primary domain from which the web application is accessed
APP_DOMAIN = 'xxx.openmunicipio.it' ## CHANGEME!
# filesystem location of project's repository on the local machine
LOCAL_REPO_ROOT = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..')
# filesystem location of Django project's files on the local machine
LOCAL_PROJECT_ROOT = os.path.join(LOCAL_REPO_ROOT, PROJECT_NAME)
# system user (on the server machine) used for managing files
# shared between OpenMunicipio's instances
WEB_USER = 'om'
# absolute filesystem path to the public SSH key being used
# to login as ``WEB_USER`` on the remote machine(s)
WEB_USER_HOSTKEY = '~/.ssh/id_rsa.pub' ## CHANGEME!
# system user (on the server machine) used for managing this OpenMunicipio's instance
OM_USER = 'om-xxx' ## CHANGEME!
# absolute filesystem path to the public SSH key being used
# to login as the ``OM_USER`` user on the remote machine(s)
OM_USER_HOSTKEY = '~/.ssh/id_rsa.pub' ## CHANGEME!
###------------------ Django ------------###
# the parent directory of domain-specific directories (on the server machine)
WEB_ROOT = '/home/open_municipio'
# the root directory for domain-specific files (on the server machine)
DOMAIN_ROOT = os.path.join(WEB_ROOT, APP_DOMAIN)
# the root directory of application-specific Python virtual environment (on the server machine)
VIRTUALENV_ROOT = os.path.join(DOMAIN_ROOT, 'private', 'venv')
# the root directory for project-specific files (on the server machine)
PROJECT_ROOT = os.path.join(DOMAIN_ROOT, 'private', PROJECT_NAME)
# import path of Django settings module for the production environment
DJANGO_SETTINGS_MODULE = '%(project)s.settings_production' % {'project': PROJECT_NAME}
# Directory where static files should be collected. This MUST equal the value
# of ``STATIC_ROOT`` attribute of the Django settings module used on the server.
STATIC_ROOT = os.path.join(DOMAIN_ROOT, 'public', 'static')
###------------------ Tomcat ------------###
# system user the Tomcat process run as
TOMCAT_USER = 'tomcat6'
# Tomcat's controller script
TOMCAT_CONTROLLER = '/etc/init.d/tomcat6'
# home dir for Catalina
CATALINA_HOME = '/etc/tomcat6/Catalina'
###------------------ Solr ------------###
# absolute filesystem path to the public SSH key being used
# to login as the ``solr`` user on the remote machine(s)
SOLR_USER_HOSTKEY = '~/.ssh/id_rsa.pub' ## CHANGEME!
# URL pointing to the Solr distribution to be installed on the server machine
# Must be a compressed tarball (i.e. a ``.tgz`` or ``.tar.gz`` file)
SOL | R_DOWNLOAD_LINK = 'http://apache.fastbull.org/lucene/solr/3.6.0/apache-solr-3.6.0.tgz'
# where Solr configuration and data reside on the server machine
SOLR_INSTALL_DIR = '/home/apache-solr-3.6.0'
# where configuration/data files for Solr reside on the server machine
SOLR_HOME = '/home/solr'
###------------------ PostgreSQL ------------###
# root dir for PostgreSQL configuration files
POSTGRES_CONF_DIR = '/etc/postgresql/8.4/main'
# PostgreSQL's controller script
POSTGRES_CONTROLLER = ' | service postgresql-8.4'
# DB username
DB_USER = OM_USER
# name of the application DB
DB_NAME = OM_USER
|
eLRuLL/scrapy | scrapy/core/downloader/handlers/s3.py | Python | bsd-3-clause | 4,259 | 0.000704 | from urllib.parse import unquote
from scrapy.core.downloader.handlers.http import HTTPDownloadHandler
from scrapy.exceptions import NotConfigured
from scrapy.utils.boto import is_botocore
from scrapy.utils.httpobj import urlparse_cached
from scrapy.utils.misc import create_instance
def _get_boto_connection():
from boto.s3.connection import S3Connection
class _v19_S3Connection(S3Connection):
"""A dummy S3Connection wrapper that doesn't do any synchronous download"""
def _mexe(self, method, bucket, key, headers, *args, **kwargs):
return headers
class _v20_S3Connection(S3Connection):
"""A dummy S3Connection wrapper that doesn't do any synchronous download"""
def _mexe(self, http_request, *args, **kwargs):
http_request.authorize(connection=self)
return http_request.headers
try:
import boto.auth # noqa: F401
except ImportError:
_S3Connection = _v19_S3Connection
else:
_S3Connection = _v20_S3Connection
return _S3Connection
class S3DownloadHandler:
def __init__(self, settings, *,
crawler=None,
aws_access_key_id=None, aws_secret_access_key=None,
httpdownloadhandler=HTTPDownloadHandler, **kw):
if not aws_access_key_id:
aws_access_key_id = settings['AWS_ACCESS_KEY_ID']
if not aws_secret_access_key:
aws_secret_access_key = settings['AWS_SECRET_ACCESS_KEY']
# If no credentials could be found anywhere,
# consider this an anonymous connection request by default;
# unless 'anon' was set explicitly (True/False).
anon = kw.get('anon')
if anon is None and not aws_access_key_id and not aws_secret_access_key:
kw['anon'] = True
self.anon = kw.get('anon')
self._signer = None
if is_botocore():
import botocore.auth
import botocore.credentials
kw.pop('anon', None)
if kw:
raise TypeError('Unexpected keyword arguments: %s' % kw)
if not self.anon:
SignerCls = botocore.auth.AUTH_TYPE_MAPS['s3']
self._signer = SignerCls(botocore.credentials.Credentials(
aws_access_key_id, aws_secret_access_key))
else:
_S3Connection = _get_boto_connection()
try:
self.conn = _S3Connection(
aws_access_key_id, aws_secret_access_key, **kw)
except Exception as ex:
raise NotConfigured(str(ex))
_http_handler = create_instance(
objcls=httpdownloadhandler,
settings=settings,
crawler=crawler,
)
self._download_http = _http_handler.download_request
@classmethod
def from_crawler(cls, crawler, **kwargs):
return cls(crawler.settings, crawler=crawler, **kwargs)
def download_request(self, request, spider):
p = urlparse_cached(request)
scheme = 'https' if request.meta.get('is_secure') else 'http'
bucket = p.hostname
path = p.path + '?' + p.query if p.query else p.path
url = '%s://%s.s3.amazonaws.com%s' % (scheme, bucket, path)
if self.anon:
request = request.replace(url=url)
elif self._signer is not None:
import botocore.awsrequest
awsrequest = botocore.awsrequest.AWSRequest(
method=request.method,
url='%s://s3.amazonaws.com/%s%s' % (scheme, bucket, path),
headers=request.headers.to_unicode_dict(),
data=request.body)
self._signer.add_auth(awsrequest)
request = request.replace(
url=url, headers=awsrequest.headers.items())
else:
signed_headers = self.conn.make_request(
method=request.method,
bucket=bucket,
key=unquote(p.path),
query_args=unquote(p.query),
headers=request.headers,
data=request.body)
request = request.rep | lace(url=url, | headers=signed_headers)
return self._download_http(request, spider)
|
AliZafar120/NetworkStimulatorSPl3 | rapidnet/plot/merge_logs.py | Python | gpl-2.0 | 12,893 | 0.02831 | #!/usr/bin/python
import os, sys
import math, gmpy
if len (sys.argv) not in [2, 3, 4]:
print 'Usage: merge_logs <logs_dir> [<route_quality(True/False)>] [<bandwidth_color(True/False)>]'
sys.exit (0)
dir = sys.argv[1]
if len (sys.argv) >= 3:
route_quality = sys.argv[2]
else :
route_quality = 'False'
if len (sys.argv) >= 4:
bandwidth_color = sys.argv[3]
else :
bandwidth_color = 'True'
if not os.path.isdir (dir):
print '%s is not a directory.' % dir
sys.exit (0)
decorator_log = os.path.join (dir, 'decorator.log')
bw_sent_points = os.path.join (dir, 'bandwidth-sent.points')
validity_points = os.path.join (dir, 'validity.points')
stretch_points = os.path.join (dir, 'stretch.points')
losses_points = os.path.join (dir, 'losses.points')
data_points = os.path.join (dir, 'dataRate.points')
output_log = os.path.join (dir, 'output.log')
events_log = os.path.join (dir, 'events.log')
ofile = open (events_log, 'w')
ipaddr = {}
ipaddrinv = {}
events = {}
linkSrc = {}
bestPath = {}
eBitVectorReply = {}
count = 0
tMessageCount = {}
currentMessage = ''
previousTime = 0
linkevents = {}
LSUevents = {}
maxLSU = 0
Bandwidth = True
Losses = True
Linkevent = True
Validity = False
Stretch = False
Bandwidth_Color = True
if route_quality == 'True':
Validity = True
Stretch = True
elif route_quality == 'False':
Validity = False
Stretch = False
if bandwidth_color == 'True':
Bandwidth_Color = True
elif bandwidth_color == 'False':
Bandwidth_Color = False
def add_event (time, event):
if time not in events:
events[time] = [event]
else:
events[time] += [event]
####################################### For LS and HSLS
def check_lsu(time, line):
global maxLSU
if line.split()[1] == 'tuple':
if line.split()[3] == '+eLSU':
node = line.split()[2]
src = line.split()[4].split(':')[1]
if src != '127.0.0.1':
t = long(time)/2000000000L
time = t * 2000000000L
smallid = min (int(node),ipaddrinv[src])
bigid = max (int(node),ipaddrinv[src])
if (time, smallid, bigid) not in LSUevents:
LSUevents[(time, smallid, bigid)] = 1
else:
LSUevents[(time, smallid, bigid)] += 1
if LSUevents[(time, smallid, bigid)] > maxLSU:
maxLSU = LSUevents[(time, smallid, bigid)]
####################################### For DSR
def add_path_dsr (line):
global count
if line.split()[1] == 'link':
if line.split()[4] == '+linkSrc':
linkSrc[count] = [line.split()[2],line.split()[3]]
count += 1
def delete_path_dsr (time, line):
if line.split()[3] == '+Clear':
for items in linkSrc.keys():
event = str(time) + 'ns link ' + linkSrc[items][0] + ' ' + linkSrc[items][1] + ' -linkSrc\n'
add_event (time, event)
linkSrc.clear()
for items in bestPath.keys():
event = str(time) + 'ns link ' + bestPath[items][0] + ' ' + bestPath[items][1] + ' -bestPath\n'
add_event (time, event)
bestPath.clear()
def update_BestPath (time, line):
if line.split()[3] == '+tBestPath' and line.split()[1] == 'tuple':
time=time+10
for items in bestPath.keys():
event = str(time) + 'ns link ' + bestPath[items][0] + ' ' + bestPath[items][1] + ' -bestPath\n'
add_event (time, event)
bestPath.clear()
time=time+100
path = line.split()[4].split('(')[1].partition (')')[0]
pathNode = path.split(',')
length = len(pathNode)
for i in range(0,length-1):
bestPath[i] = [str(int(pathNode[i].split('.')[3])-1),pathNode[i+1].split(':')[1]]
for items in bestPath.keys():
event = str(time) + 'ns link ' + bestPath[items][0] + ' ' + bestPath[items][1] + ' +bestPath\n'
add_event (time, event)
def parse_for_dsr(time,line):
add_path_dsr (line)
delete_path_dsr (time, line)
update_BestPath (time, line)
####################################### For Epidemic
def delete_eBitVectorReply (time, line):
time=time+1000000000
if line.split()[1] == 'link':
if line.split()[4] == '+eBitVectorReply':
event = str(time) + 'ns link ' + line.split()[2] + ' ' + line.split()[3] + ' -eBitVectorReply\n'
add_event (time, event)
def add_eMessage_link (time, line):
if line.split()[1] == 'tuple':
if line.split()[3] == '+eMessage':
src = line.split()[4].split(':')[1]
event = str(time) + 'ns link ' + line.split()[2] + ' ' + src + ' +recv_eMessage\n'
add_event (time, event)
time=time+1000000000
event = str(time) + 'ns link ' + line.split()[2] + ' ' + src + ' -recv_eMessage\n'
add_event (time, event)
def count_tMessage (time, line):
global currentMessage, previousTime
if line.split()[1] == 'tuple':
if line.split()[3] == '+tMessage':
messageID = line.split(':')[1]
if messageID not in tMessageCount:
currentMessage = messageID
tMessageCount[currentMessage] = 1
event = str(previousTime) + 'ns point tmessage 0\n'
add_event (previousTime, event)
timejustnow = time-100
event = str(timejustnow) + 'ns point tmessage 0\n'
add_event (timejustnow, event)
else:
tMessageCount[messageID] += 1
event = str(time) + 'ns point tmessage %d\n'%tMessageCount[currentMessage]
add_event (time, event)
previousTime = time + 1000
def parse_for_epidemic(time,line):
delete_eBitVectorReply (time, line)
add_eMessage_link (time, line)
count_tMessage (time, line)
####################################### For LinkEvent
def count_linkevents (time, line):
global linkevents
if line.split()[1] == 'link':
if line.split()[4] in ['+link', '-link']:
if int(line.split()[2]) != int(line.split()[3].split('.')[3])-1 :
t = long(time)/1000000000L
time = (t+1) * 1000000000L
if time not in linkevents:
linkevents[time] = 1
else:
linkevents[time] += 1
def add_linkevents ():
max_linkevent = 0
for time in linkevents.keys():
if linkevents[time] > max_linkevent and time > 5000000000L: # Exclude first 5 seconds
max_linkevent = linkevents[time]
add_event (time, '%ldns point linkevent %d\n' % (time, linkevents[time]))
if (route_quality == 'False | '):
ofile.write ('info graph linkevent Per-Second_Link_Events 0 %s 0 %d %d\n'%(duration, max_linkevent, 3))
##### For Chord
def parse_for_chord (time, line):
MAX = (gmpy.mpz (1) << 160) - 1
RADIUS = gmpy.mpz (700)
CX, CY = [gmpy.mpz (1000), gmpy.mpz (1000)]
node_id = line.split(' ')[2]
if (' | node_attr2' in line):
id = gmpy.mpz (line.rpartition(':')[2], 16)
angle = gmpy.mpf (math.pi) * id / MAX
x, y = CX + RADIUS * math.cos (angle), CY + RADIUS * math.sin(angle)
add_event (time, '%ldns position %s %f %f 0 0 0 0\n' % (time, node_id, x, y))
add_event (time, '%ldns state %s +chordRing\n' % (time, node_id))
####################################### For All
print 'Reading output.log...'
file = open (output_log, 'r')
count = 0
speed_low = 0
speed_up = 0
duration = 0
node_count = 0
for line in file:
count += 1
if count > 30:
continue
words = line.split (' : ')
if words[0].startswith('Duration'):
value = words[1].split(' ')[0]
duration = value
ofile.write ('info label duration Duration %s %d\n'%(value,5))
if words[0].startswith('Number of nodes'):
value = words[1].strip('\n')
node_count = value
ofile.write ('info label node_count Nodes_Count %s %d\n'%(value,3))
if words[0].startswith('RapidNet Application'):
app = words[1].strip('\n')
if app == 'SimLsPeriodic' or app == 'EmuLsPeriodic':
protocol = 'Link_State_Periodic'
elif app == 'SimLsTriggered' or app == 'EmuLsTriggered':
protocol = 'Link_State_Triggered'
elif app == 'SimHslsPeriodic' or app == 'EmuHslsPeriodic':
protocol = 'HSLS_Periodic'
elif app == 'SimHslsTriggered' or app == 'EmuHslsTriggered':
protocol = 'HSLS_Triggered'
elif app == 'Epidemic':
protocol = 'Epidemic'
elif app == 'Dsr':
protocol = 'DSR'
else:
protocol = app
ofile.write ('info label protoc |
squadran2003/filtering-searching-mineral-catalogue | filtering-searching-mineral-catalogue/minerals/tests.py | Python | mit | 3,110 | 0.052275 | from django.test import TestCase
from django.urls import reverse
# Create your tests here.
from .models import Mineral
class MineralModelTests(TestCase):
def test_new_mineral_created(self):
mineral = Mineral.objects.create(
name = "Abelsonite",
image_filename = "240px-Abelsonite_-_Green_River_Formation%2C_Uintah_County%2C_Utah%2C_USA.jpg",
image_caption = "Abelsonite from the Green River Formation, Uintah County, Utah, US",
category = "Organic",
formula = "C<sub>31</sub>H<sub>32</sub>N<sub>4</sub>Ni",
strunz_classification = "10.CA.20",
crystal_system = "Triclinic",
unit_cell= "a = 8.508 Å, b = 11.185 Åc=7.299 Å, α = 90.85°β = 114.1°, γ = 79.99°Z = 1",
color = "Pink-purple, dark greyish purple, pale purplish red, reddish brown",
crystal_symmetry = "Space group: P1 or P1Point group: 1 or 1",
cleavage = "Probable on {111}",
mohs_scale_hardness = "2–3",
luster = "Adamantine, sub-metallic",
streak = "Pink",
diaphaneity = "Semitransparent",
optical_properties = "Biaxial",
group = "Organic Minerals"
)
self.assertIn(mineral, Mineral.objects.all())
class MineralViewTests(TestCase):
def setUp(self):
self.mineral = Mineral.objects.create(
name = "Abhurite",
image_filename = "240px-Abhurite_-_Shipwreck_Hydra%2C_South_coast_of_Norway.jpg",
image_caption = "Brownish tabular crystals of abhurite from Shipwreck \"Hydra\", South coast of Norway",
category = "Halide",
formula = "Sn<sub>21</sub>O<sub>6</sub>(OH)<sub>14</sub>Cl<sub>16</sub>",
strunz_classification = "03.DA.30",
crystal_symmetry = "Trigonal",
group = "Halides"
)
def test_m | inerals_view(self):
resp = self.client.get(reverse('minerals:mineral_list'))
sel | f.assertEqual(resp.status_code,200)
self.assertIn(self.mineral, resp.context['minerals'])
self.assertTemplateUsed(resp, 'minerals/minerals.html')
def test_minerals_detail_view(self):
resp = self.client.get(reverse('minerals:mineral_detail',
kwargs={'pk':self.mineral.id }))
self.assertEqual(resp.status_code, 200)
self.assertEqual(self.mineral, resp.context['mineral'])
self.assertTemplateUsed(resp, 'minerals/mineral_detail.html')
def test_search_by_letter(self):
letter = "A".lower()
resp = self.client.get(reverse('minerals:search_by_letter',
kwargs={'letter':letter}))
self.assertEqual(resp.status_code,200)
self.assertIn(self.mineral,resp.context['minerals'])
self.assertTemplateUsed(resp,'minerals/minerals.html')
def test_search_by_text(self):
resp = self.client.post("/minerals/search/text/",{'search':'ab'})
self.assertEqual(resp.status_code,200)
self.assertIn(self.mineral,resp.context['minerals'])
self.assertTemplateUsed(resp,'minerals/minerals.html')
def test_search_by_group(self):
group = "Halides"
resp = self.client.get(reverse('minerals:search_by_group',
kwargs={'group':group}))
self.assertEqual(resp.status_code,200)
self.assertIn(self.mineral,resp.context['minerals'])
self.assertTemplateUsed(resp,'minerals/minerals.html')
|
stczhc/neupy | neupy/plots/error_plot.py | Python | mit | 1,872 | 0 | import numpy as np
import matplotlib.pyplot as plt
__all__ = ('error_plot',)
def error_plot(network, logx=False, ax=None, show=True):
""" Makes line plot that shows training progress. x-axis
is an epoch number and y-axis is an error.
Parameters
----------
logx : bool
Parameter set up logarithmic scale to x-axis.
Defaults to ``False``.
ax : object or None
Matplotlib axis object. ``None`` values means that axis equal
to the current one (the same as ``ax = plt.gca()``).
Defaults to ``None``.
show : bool
If parameter is equal to ``True`` plot will instantly shows
the plot. Defaults to ``True``.
Returns
-------
object
Matplotlib axis instance.
| """
if ax is None:
ax = plt.gca()
if not network.errors:
network.logs.warning("There is no data to plot")
return ax
train_errors = netw | ork.errors.normalized()
validation_errors = network.validation_errors.normalized()
if len(train_errors) != len(validation_errors):
network.logs.warning("Number of train and validation errors are "
"not the same. Ignored validation errors.")
validation_errors = []
if all(np.isnan(validation_errors)):
validation_errors = []
errors_range = np.arange(len(train_errors))
plot_function = ax.semilogx if logx else ax.plot
line_error_in, = plot_function(errors_range, train_errors)
if validation_errors:
line_error_out, = plot_function(errors_range, validation_errors)
ax.legend(
[line_error_in, line_error_out],
['Train', 'Validation']
)
ax.set_title('Training perfomance')
ax.set_ylim(bottom=0)
ax.set_ylabel('Error')
ax.set_xlabel('Epoch')
if show:
plt.show()
return ax
|
alard/warctozip | hanzo/httptools/messaging.py | Python | mit | 18,022 | 0.000555 | """A set of stream oriented parsers for http requests and responses, inline
with the current draft recommendations from the http working group.
http://tools.ietf.org/html/draft-ietf-httpbis-p1-messaging-17
Unlike other libraries, this is for clients, servers and proxies.
Missing:
comma parsing/header folding
"""
import re
import zlib
class ParseError(StandardError):
"""Baseclass for all http parsing errors"""
pass
from hanzo.httptools.semantics import Codes, Methods
NEWLINES = ('\r\n', '\n')
class HTTPMessage(object):
"""A stream based parser for http like messages"""
CONTENT_TYPE = "application/http"
def __init__(self, header):
self.buffer = bytearray()
self.offset = 0
self.header = header
self.body_chunks = []
self.mode = 'start'
self.body_reader = None
@property
def url(self):
return self.header.url
@property
def scheme(self):
return self.header.scheme
@property
def method(self):
return self.header.method
@property
def host(self):
return self.header.host
@property
def port(self):
return self.header.port
def feed_fd(self, fd):
while True:
length, terminator = self.feed_predict()
if length == 0:
return ''
elif terminator == '\r\n':
text = fd.readLine()
elif length < 0:
text = fd.read()
elif length > 0:
text = fd.read(length)
unread = self.feed(text)
if unread:
return unread
def feed_predict(self):
"""returns size, terminator request for input. size is 0 means end. """
if self.mode == 'start':
return None, '\r\n'
elif self.mode == 'headers':
return None, '\r\n'
elif self.mode == 'body':
if self.body_reader is not None:
return self.body_reader.feed_predict()
else:
# connection close
return -1, None
if self.mode == 'end':
return 0, None
if self.mode == 'incomplete':
return 0, None
def feed(self, text):
"""Push more text from the input stream into the parser."""
if text and self.mode == 'start':
text = self.feed_start(text)
if text and self.mode == 'headers':
text = self.feed_headers(text)
if self.mode == 'body':
if not self.header.has_body():
self.mode = 'end'
else:
if self.header.body_is_chunked():
self.body_reader = ChunkReader()
else:
length = self.header.body_length()
if length >= 0:
self.body_reader = LengthReader(length)
self.body_chunks = [(self.offset, length)]
if length == 0:
self.mode = 'end'
else:
self.body_chunks = [(self.offset, 0)]
self.body_reader = None
if text and self.mode == 'body':
if self.body_reader is not None:
#print >> sys.stderr, 'feeding', text[:50]
text = self.body_reader.feed(self, text)
else:
((offset, length),) = self.body_chunks
self.buffer.extend(text)
self.offset = len(self.buffer)
self.body_chunks = ((offset, length + len(text)),)
text = ''
return text
def close(self):
"""Mark the end of the input stream and finish parsing."""
if (self.body_reader is None and self.mode == 'body'):
self.mode = 'end'
elif self.mode != 'end':
if self.body_chunks:
# check for incomplete in body_chunks
offset, length = self.body_chunks.pop()
position = len(self.buffer)
length = min(length, position - offset)
self.body_chunks.append((offset, length))
self.mode = 'incomplete'
def headers_complete(self):
"""Check whether the input stream has finished supplying headers."""
return self.mode in ('end', 'body')
def complete(self):
"""Checks whether the input stream is at the end, i.e. if the parser
is expecting no more input."""
return self.mode == 'end'
def feed_line(self, text):
"""Feed text into the buffer, returning the first line found (if found
yet)"""
self.buffer.extend(text)
pos = self.buffer.find('\n', self.offset)
if pos > -1:
pos += 1
text = str(self.buffer[pos:])
del self.buffer[pos:]
line = str(self.buffer[self.offset:])
self.offset = len(self.buffer)
else:
line = None
text = ''
return line, text
def feed_length(self, text, remaining):
"""Feed (at most remaining bytes) text to buffer, returning
leftovers."""
body, text = text[:remaining], text[remaining:]
remaining -= len(body)
self.buffer.extend(body)
self.offset = len(self.buffer)
return remaining, text
def feed_start(self, text):
"""Feed text to the parser while it is in the 'start' state."""
line, text = self.feed_line(text)
if line is not None:
if line not in NEWLINES:
self.header.set_start_line(line)
self.mode = 'headers'
return text
def feed_headers(self, text):
"""Feed text to the parser while it is in the 'headers'
state."""
while text:
line, text = self.feed_line(text)
if line is not None:
self.header.add_header_line(line)
if line in NEWLINES:
self.mode = 'body'
break
return text
def get_message(self):
"""Returns the contents of the input buffer."""
return str(self.buffer)
def get_decoded_message(self):
"""Return the input stream reconstructed from the parsed
data."""
buf = bytearray()
self.write_decoded_message(buf)
return str(buf)
def write_message(self, buf):
| #TODO: No idea what this does, looks broken
| self.header.write(buf)
buf.extend('\r\n')
self.write_body(buf)
def write_decoded_message(self, buf):
"""Writes the parsed data to the buffer passed."""
self.header.write_decoded(buf)
if self.header.has_body():
length = sum(l for o, l in self.body_chunks)
buf.extend('Content-Length: %d\r\n' % length)
body = self.get_body()
if self.header.encoding and body:
try:
body = zlib.decompress(body)
except zlib.error:
try:
body = zlib.decompress(body, 16 + zlib.MAX_WBITS)
except zlib.error:
encoding_header = "Content-Encoding: %s\r\n" \
% self.header.encoding
buf.extend(encoding_header)
buf.extend('\r\n')
buf.extend(body)
def get_body(self):
"""Returns the body of the HTTP message."""
buf = bytearray()
self.write_body(buf)
return str(buf)
def write_body(self, buf):
"""Writes the body of the HTTP message to the passed
buffer."""
for offset, length in self.body_chunks:
buf.extend(self.buffer[offset:offset + length])
class ChunkReader(object):
"""Reads the body of a HTTP message with chunked encoding."""
def __init__(self):
self.mode = "start"
self.remaining = 0
def feed_predict(self):
if self.mode == 'start':
return None, '\r\n'
elif self.mode == 'chunk':
if self.remaining == 0:
|
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/pandas/core/arrays/__init__.py | Python | apache-2.0 | 581 | 0 | from .array_ import array # noqa: F401
from .base import ( # noqa: F401
ExtensionArray,
ExtensionOpsMixin,
ExtensionScalarOpsMixin,
)
from .categorical import Categorical # noqa: F401
from .datetimes import DatetimeArray # noqa: F401
from .integer import Inte | gerArray, integer_array # noqa: F401
from .interval import IntervalArray # noqa: F401
from .numpy_ import Pa | ndasArray, PandasDtype # noqa: F401
from .period import PeriodArray, period_array # noqa: F401
from .sparse import SparseArray # noqa: F401
from .timedeltas import TimedeltaArray # noqa: F401
|
waseem18/oh-mainline | mysite/base/disk_cache.py | Python | agpl-3.0 | 504 | 0 | # Note:
# This is a total hack to implement simple disk-based memoization,
# with no expiration.
import os
PATH = '/tmp/django_cache_belonging_to_%s' % os.environ.get('USER', 'unknown')
def set(key, value):
if not os.path.isdir(PATH):
os.mkdir(PATH)
file_obj = file(os.path.join(PATH, key), 'w')
file_obj.write(value)
file_obj.close()
def get(key):
try:
| with open(os.path.join(PATH, key)) as f:
return f.read()
ex | cept IOError:
return None
|
efiop/dvc | tests/unit/command/test_imp_url.py | Python | apache-2.0 | 3,589 | 0 | import logging
from dvc.cli import parse_args
from dvc.command.imp_url import CmdImportUrl
from dvc.exceptions import DvcException
def test_import_url(mocker):
cli_args = parse_args(
[
"import-url",
"src",
"out",
"--file",
"file",
"--jobs",
"4",
"--desc",
"description",
]
)
assert cli_args.func == CmdImportUrl
cmd = cli_args.func( | cli_args)
m = mocker.patch.object(cmd.repo, "imp_url", autospec=True)
assert cmd.run() == 0
m.assert_called_once_with(
"src",
out="out",
fname="file",
no_exec=False,
remote=None,
to_remote=False,
desc="description", |
jobs=4,
)
def test_failed_import_url(mocker, caplog):
cli_args = parse_args(["import-url", "http://somesite.com/file_name"])
assert cli_args.func == CmdImportUrl
cmd = cli_args.func(cli_args)
mocker.patch.object(cmd.repo, "imp_url", side_effect=DvcException("error"))
with caplog.at_level(logging.ERROR, logger="dvc"):
assert cmd.run() == 1
expected_error = (
"failed to import http://somesite.com/file_name. "
"You could also try downloading it manually, and "
"adding it with `dvc add`."
)
assert expected_error in caplog.text
def test_import_url_no_exec(mocker):
cli_args = parse_args(
[
"import-url",
"--no-exec",
"src",
"out",
"--file",
"file",
"--desc",
"description",
]
)
cmd = cli_args.func(cli_args)
m = mocker.patch.object(cmd.repo, "imp_url", autospec=True)
assert cmd.run() == 0
m.assert_called_once_with(
"src",
out="out",
fname="file",
no_exec=True,
remote=None,
to_remote=False,
desc="description",
jobs=None,
)
def test_import_url_to_remote(mocker):
cli_args = parse_args(
[
"import-url",
"s3://bucket/foo",
"bar",
"--to-remote",
"--remote",
"remote",
"--desc",
"description",
]
)
assert cli_args.func == CmdImportUrl
cmd = cli_args.func(cli_args)
m = mocker.patch.object(cmd.repo, "imp_url", autospec=True)
assert cmd.run() == 0
m.assert_called_once_with(
"s3://bucket/foo",
out="bar",
fname=None,
no_exec=False,
remote="remote",
to_remote=True,
desc="description",
jobs=None,
)
def test_import_url_to_remote_invalid_combination(mocker, caplog):
cli_args = parse_args(
[
"import-url",
"s3://bucket/foo",
"bar",
"--to-remote",
"--remote",
"remote",
"--no-exec",
]
)
assert cli_args.func == CmdImportUrl
cmd = cli_args.func(cli_args)
with caplog.at_level(logging.ERROR, logger="dvc"):
assert cmd.run() == 1
expected_msg = "--no-exec can't be combined with --to-remote"
assert expected_msg in caplog.text
cli_args = parse_args(
["import-url", "s3://bucket/foo", "bar", "--remote", "remote"]
)
cmd = cli_args.func(cli_args)
with caplog.at_level(logging.ERROR, logger="dvc"):
assert cmd.run() == 1
expected_msg = "--remote can't be used without --to-remote"
assert expected_msg in caplog.text
|
google-research/adamatch | semi_supervised_domain_adaptation/adamatch.py | Python | apache-2.0 | 10,175 | 0.003735 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FixMatch with Distribution Alignment and Adaptative Confidence Ratio.
"""
import os
import sys
from typing import Callable
import jax
import jax.numpy as jn
import objax
from absl import app
from absl import flags
from absl.flags import FLAGS
from objax.functional import stop_gradient
from objax.typing import JaxArray
from semi_supervised_domain_adaptation.lib.data import MixData, CTAData
from semi_supervised_domain_adaptation.lib.train import TrainableSSDAModule
from shared.data.fsl import DATASETS as FSL_DATASETS
from shared.data.ssl import DATASETS as SSL_DATASETS, DataSetSSL
from shared.train import ScheduleCos, ScheduleCosPhases
from shared.util import setup_tf, MyParallel
from shared.zoo.models import network, ARCHS
class AdaMatch(TrainableSSDAModule):
def __init__(self, nclass: int, model: Callable, **kwargs):
super().__init__(nclass, kwargs)
self.model: objax.Module = model(colors=3, nclass=nclass, **kwargs)
self.model_ema = objax.optimizer.ExponentialMovingAverageModule(self.model, momentum=0.999)
if FLAGS.arch.endswith('pretrain'):
# Initialize weights of EMA with pretrained model's weights.
self.model_ema.ema.momentum = 0
self.model_ema.update_ema()
self.model_ema.ema.momentum = 0.999
self.stats = objax.Module()
self.stats.keygen = objax.random.DEFAULT_GENERATOR
self.stats.p_labeled = objax.nn.ExponentialMovingAverage((nclass,), init_value=1 / nclass)
self.stats.p_unlabeled = objax.nn.MovingAverage((nclass,), buffer_size=128, init_value=1 / nclass)
train_vars = self.model.vars() + self.stats.vars()
self.opt = objax.optimizer.Momentum(train_vars)
self.wu = ScheduleCosPhases(1, [(0.5, 1), (1, self.params.wu)], start_value=0)
self.lr = ScheduleCos(self.params.lr, self.params.lr_decay)
@objax.Function.with_vars(self.model_ema.vars())
def eval_op(x: JaxArray, domain: int) -> JaxArray:
return objax.functional.softmax(self.model_ema(x, training=False, domain=domain))
def loss_function(sx, sy, tx, ty, tu, progress):
sx_domain = jn.ones(2 * sx.shape[0], dtype=jn.int32)
tx_domain = jn.zeros(2 * tx.shape[0], dtype=jn.int32)
tu_domain = jn.zeros(2 * tu.shape[0], dtype=jn.int32)
saved_vars = self.model.vars().tensors()
logit_bn = self.model(jn.concatenate((sx, tx)).reshape((-1, *sx.shape[2:])), training=True,
domain=jn.concatenate((sx_domain, tx_domain)))
logit_bn_sx, logit_bn_tx = jn.split(logit_bn, 2)
self.model.vars().assign(saved_vars)
xu = jn.concatenate((sx, tx, tu)).reshape((-1, *sx.shape[2:]))
logit = self.model(xu, training=True, domain=jn.concatenate((sx_domain, tx_domain, tu_domain)))
logit_sx, logit_tx, logit_tu = jn.split(logit, (2 * sx.shape[0], 2 * (sx.shape[0] + tx.shape[0])))
logit_sx += (logit_bn_sx - logit_sx) * objax.random.uniform(logit_sx.shape)
logit_tx += (logit_bn_tx - logit_tx) * objax.random.uniform(logit_tx.shape)
logit_sx_weak, logit_sx_strong = logit_sx[::2], logit_sx[1::2]
logit_tx_weak, logit_tx_strong = logit_tx[::2], logit_tx[1::2]
logit_tu_weak, logit_tu_strong = logit_tu[::2], logit_tu[1::2]
xe = 0.5 * (objax.functional.loss.cross_entropy_logits(logit_sx_weak, sy).mean() +
objax.functional.loss.cross_entropy_logits(logit_sx_strong, sy).mean())
xet = 0.5 * (objax.functional.loss.cross_entropy_logits(logit_tx_weak, ty).mean() +
objax.functional.loss.cross_entropy_logits(logit_tx_strong, ty).mean())
if self.params.use_cr:
real_confidence = objax.functional.softmax(stop_gradient(logit_sx_weak))
confidence_ratio = real_confidence.max(1).mean(0) * self.params.confidence
else:
confidence_ratio = self.params.confidence
pseudo_labels = objax.functional.softmax(logit_tu_weak)
p_labeled = self.stats.p_labeled(objax.functional.softmax(logit_sx_weak).mean(0))
p_unlabeled = self.stats.p_unlabeled(pseudo_labels.mean(0))
pseudo_labels *= (1e-6 + p_labeled) / (1e-6 + p_unlabeled)
pseudo_labels = stop_gradient(pseudo_labels / pseudo_labels.sum(1, keepdims=True))
pseudo_mask = (pseudo_labels.max(axis=1) >= confidence_ratio).astype(pseudo_labels.dtype)
xeu = objax.functional.loss.cross_entropy_logits_sparse(logit_tu_strong, pseudo_labels.argmax(axis=1))
xeu = (xeu * pseudo_mask).mean()
wd = 0.5 * sum((v.value ** 2).sum() for k, v in train_vars.items() if k.endswith('.w'))
loss = xe + xet + self.wu(progress) * xeu + self.params.wd * wd
return loss, {'losses/xe': xe,
'losses/xet': xet,
'losses/xeu': xeu,
'losses/wd': wd,
'losses/hregbn': jn.square(logit_sx - logit_bn_sx).mean(),
'monitors/confidence_ratio': confidence_ratio,
'monitors/wu': self.wu(progress),
'monitors/mask': pseudo_mask.mean(),
'monitors/klmodel': objax.functional.divergence.kl(p_labeled, p_unlabeled)}
gv = objax.GradValues(loss_function, train_vars)
@objax.Function.with_vars(self.vars())
def train_op(step, sx, sy, tx, ty, tu, probe=None):
y_probe = eval_op(probe, 1) if probe is not None else None
p = step / (FLAGS.train_mimg << 20)
lr = self.lr(p)
g, v = gv(sx, sy, tx, ty, tu, p)
self.opt(lr, objax.functional.parallel.pmean(g))
self.model_ema.update_ema()
return objax.functional.parallel.pmean({'monitors/lr': lr, **v[1]}), y_probe
self.train_op = MyParallel(train_op, reduce=lambda x: x)
self.eval_op = M | yParallel(eval_op, static_argnums=(1,))
def main(argv):
del argv
print('JAX host: %d / %d' % (jax.host_id(), jax.host_count()))
print('JAX devices:\n%s' % '\n'.join(str(d) for d in jax.devices()), flush=True)
setup_tf()
source = FSL_DATASETS()[f'{FLAGS.dataset}_{FLAGS.source}-0']()
target_name, target_samples_per_class, target_seed = DataSetSSL.parse_name(f'{FLAGS.dat | aset}_{FLAGS.target}')
target_labeled = SSL_DATASETS()[target_name](target_samples_per_class, target_seed)
target_unlabeled = FSL_DATASETS()[f'{target_name}-0']()
testsets = [target_unlabeled.test, source.test] # Ordered by domain (unlabeled always first)
module = AdaMatch(source.nclass, network(FLAGS.arch),
lr=FLAGS.lr,
lr_decay=FLAGS.lr_decay,
wd=FLAGS.wd,
arch=FLAGS.arch,
batch=FLAGS.batch,
wu=FLAGS.wu,
confidence=FLAGS.confidence,
use_cr=FLAGS.use_cr,
uratio=FLAGS.uratio)
logdir = f'SSDA/{FLAGS.dataset}/{FLAGS.source}/{FLAGS.target}/{FLAGS.augment}/{module.__class__.__name__}/'
logdir += '_'.join(sorted('%s%s' % k for k in module.params.items()))
logdir = os.path.join(FLAGS.logdir, logdir)
test = {}
for domain, testset in enumerate(testsets):
test.update((f'{FLAGS.source}_to_{k}',
v.parse().batch(FLAGS.batch).nchw().map(lambda |
davidam/python-examples | basics/verpython.py | Python | gpl-3.0 | 1,075 | 0.000934 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2018 David Arroyo Menéndez
# Author: David Arroyo Menéndez <davidam@gnu.org>
# Maintainer: David Arroyo Menéndez <davidam@gnu.org>
# This file is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along wit | h GNU Emacs; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301 USA,
# encoding: utf-8
ver = raw_input("¿Quieres ver mi python? (Si | No): ")
if (ver == "Si"):
print("Te quiero")
elif (ver == "No"):
| print("Eres una estrecha")
|
dalou/django-cargo | cargo/migrations/0008_auto_20151120_0906.py | Python | bsd-3-clause | 486 | 0.002058 | # | -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cargo', '0007_auto_20151120_0720'),
]
operations = [
migrations.AlterModelOptions(
name='emailinguseractivationtoken',
options={'ordering': ('activation_date',), 'verbose_name': "Cl\xe9 d'activation", 'verbose_name_plural': "Cl\xe9s d'acti | vation"},
),
]
|
davisd50/sparc.db | sparc/db/splunk/tests/test_kvstore.py | Python | mit | 5,425 | 0.006452 | import os
import unittest
import zope.testrunner
from zope import component
from sparc.testing.fixture import test_suite_mixin
from sparc.testing.testlayer import SPARC_INTEGRATION_LAYER
from sparc.db.splunk.testing import SPARC_DB_SPLUNK_INTEGRATION_LAYER
from zope import schema
from zope.interface import Interface
class ITestSchema(Interface):
date = schema.Date(title=u"date")
datetime = schema.Datetime(title=u"datetime")
decimal = schema.Decimal(title=u"decimal")
float = schema.Float(title=u"float")
int = schema.Int(title=u"int")
bool = schema.Bool(title=u"bool")
list = schema.Set(title=u"list", value_type=schema.Field(title=u"field"))
set = schema.Set(title=u"set", value_type=schema.Field(title=u"field"))
dict = schema.Dict(title=u"dict", key_type=schema.TextLine(title=u"key"), |
value_type=schema.Text(title=u"value"))
ip = schema.DottedName(title=u"ip",min_dots=3,max_dots=3)
ascii = schema.ASCII(title=u"ascii")
class SparcCacheSplunkAreaTestCase(unittest.TestCase):
layer = SPARC_INTEGRATION_LAYER
sm = component.getSiteManager()
|
def test_ISplunkKVCollectionSchema_adapter_for_schemas(self):
from sparc.db.splunk import ISplunkKVCollectionSchema
schema = ISplunkKVCollectionSchema(ITestSchema)
self.assertIn('field.date', schema)
self.assertEquals(schema['field.date'], 'time')
self.assertIn('field.datetime', schema)
self.assertEquals(schema['field.datetime'], 'time')
self.assertIn('field.decimal', schema)
self.assertEquals(schema['field.decimal'], 'number')
self.assertIn('field.float', schema)
self.assertEquals(schema['field.float'], 'number')
self.assertIn('field.int', schema)
self.assertEquals(schema['field.int'], 'number')
self.assertIn('field.bool', schema)
self.assertEquals(schema['field.bool'], 'bool')
self.assertIn('field.list', schema)
self.assertEquals(schema['field.list'], 'array')
self.assertIn('field.set', schema)
self.assertEquals(schema['field.set'], 'array')
self.assertIn('field.dict', schema)
self.assertEquals(schema['field.dict'], 'array')
self.assertIn('field.ip', schema)
self.assertEquals(schema['field.ip'], 'cidr')
self.assertIn('field.ascii', schema)
self.assertEquals(schema['field.ascii'], 'string')
def test_bad_collection(self):
from sparc.db.splunk import ISplunkKVCollectionSchema
class ITestSchemaDict(Interface):
list = schema.List(title=u'bad',
value_type=schema.Dict(title=u'bad'))
sschema = ISplunkKVCollectionSchema(ITestSchemaDict)
self.assertNotIn('field.list', sschema)
class ITestSchemaCollection(Interface):
list = schema.List(title=u'bad',
value_type=schema.List(title=u'bad'))
sschema = ISplunkKVCollectionSchema(ITestSchemaDict)
self.assertNotIn('field.list', sschema)
kv_names = {}
kv_names['test_collection'] = {}
kv_names['test_collection']['field.id'] = "string"
kv_names['test_collection']['field.name'] = "string"
SPARC_DB_SPLUNK_INTEGRATION_LAYER.kv_names.update(kv_names)
class SparcDBSplunkKVTestCase(unittest.TestCase):
layer = SPARC_DB_SPLUNK_INTEGRATION_LAYER
level = 2
sm = component.getSiteManager()
def test_current_kv_names(self):
from sparc.db.splunk.kvstore import current_kv_names
req = component.createObject(u'sparc.utils.requests.request')
req.req_kwargs['verify'] = False
req.gooble_warnings = True
self.assertIn('test_collection', \
current_kv_names(self.layer.sci,
self.layer.kv_username,
self.layer.kv_appname,
request=req))
def test_schema_adapter_for_named_collection(self):
# tests SplunkKVCollectionSchemaFromSplunkInstance
from sparc.db.splunk import ISplunkKVCollectionSchema
from sparc.utils.requests import IRequest
kv_id = self.layer.get_kv_id(u'test_collection')
schema = component.getMultiAdapter((self.layer.sci,
kv_id,
self.sm.getUtility(IRequest)),
ISplunkKVCollectionSchema)
for k in self.layer.kv_names['test_collection'].keys():
self.assertEquals(self.layer.kv_names['test_collection'][k], schema[k])
class test_suite(test_suite_mixin):
package = 'sparc.db.splunk'
module = 'kvstore'
def __new__(cls):
suite = super(test_suite, cls).__new__(cls)
suite.addTest(unittest.makeSuite(SparcCacheSplunkAreaTestCase))
suite.addTest(unittest.makeSuite(SparcDBSplunkKVTestCase))
return suite
if __name__ == '__main__':
zope.testrunner.run([
'--path', os.path.dirname(__file__),
'--tests-pattern', os.path.splitext(
os.path.basename(__file__))[0]
]) |
seomoz/roger-mesos-tools | bin/roger.py | Python | apache-2.0 | 2,779 | 0.00072 | #!/usr/bin/python
from __future__ import print_function
import os
import | sys
import subprocess
i | mport re
import importlib
from cli.utils import Utils
def print_help_opt(opt, desc):
print(" {} {}".format(opt.ljust(13), desc))
def roger_help(root, commands):
print("usage: roger [-h] [-v] command [arg...]\n")
print("a command line interface to work with roger mesos.")
print("\npositional arguments:")
print_help_opt("command", "command to run.")
print_help_opt("arg", "arguments to pass to the command.")
print("\noptional arguments:")
print_help_opt("-h, --help", "show this help message and exit.")
print_help_opt("-v, --version", "show version information and exit.")
print("\ncommands:")
sys.path.append("{}/cli".format(root))
for command in commands:
description = ""
module_name = "roger_" + command
cmd_module = importlib.import_module(module_name)
try:
description = cmd_module.describe()
except Exception as e:
pass
print_help_opt(command, description)
print("\nrun: 'roger < command > -h' for more information on a command.")
def getFiles(directory):
filenames = next(os.walk(directory))[2]
return filenames
def getCommands(files):
commands = set()
for filename in files:
if filename.startswith("roger_"):
commands.add(re.split("roger_|\.", filename)[1])
return sorted(commands)
def getScriptCall(root, command, command_args):
script_call = "roger_{}.py".format(command)
for command_arg in command_args:
script_call = script_call + " {}".format(command_arg)
return script_call
def main():
root = ''
utilsObj = Utils()
own_dir = os.path.dirname(os.path.realpath(__file__))
root = os.path.abspath(os.path.join(own_dir, os.pardir))
files = getFiles("{}/cli/".format(root))
commands = getCommands(files)
if len(sys.argv) > 1:
if sys.argv[1] == "-h" or sys.argv[1] == "--help":
roger_help(root, commands)
elif sys.argv[1] == "-v" or sys.argv[1] == "--version":
version = utilsObj.roger_version(root)
print(version)
else:
command = sys.argv[1]
command_args = sys.argv[2:]
if command in commands:
print("root: {} command: {} args: {}".format(
root, command, command_args
))
script_call = getScriptCall(root, command, command_args)
os.system(script_call)
else:
raise SystemExit("Command is not valid. Exiting.")
else:
raise SystemExit("No arguments found. Please refer to usage: roger -h")
if __name__ == "__main__":
main()
|
MERegistro/meregistro | meregistro/apps/titulos/models/EstadoTituloNacional.py | Python | bsd-3-clause | 471 | 0.002128 | # -*- coding: utf-8 -*-
from django.db import models
"""
Representa las opciones de estados que | tiene cada título nacional
"""
class EstadoTituloNacional(models.Model):
NO_VIGENTE = u'No vigente'
VIGENTE = u'Vigente'
nombre = models.CharField(max_length=50, unique=True)
class Meta:
app_label = 'titulos'
ordering = ['nombre']
db_table = 'titulos_estado_titulo_nacional'
def __ | unicode__(self):
return self.nombre
|
dhalleine/tensorflow | tensorflow/python/ops/nn.py | Python | apache-2.0 | 50,569 | 0.002472 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for | the specific l | anguage governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=unused-import,g-bad-import-order
"""## Activation Functions
The activation ops provide different types of nonlinearities for use in neural
networks. These include smooth nonlinearities (`sigmoid`, `tanh`, `elu`,
`softplus`, and `softsign`), continuous but not everywhere differentiable
functions (`relu`, `relu6`, and `relu_x`), and random regularization
(`dropout`).
All activation ops apply componentwise, and produce a tensor of the same
shape as the input tensor.
@@relu
@@relu6
@@elu
@@softplus
@@softsign
@@dropout
@@bias_add
@@sigmoid
@@tanh
## Convolution
The convolution ops sweep a 2-D filter over a batch of images, applying the
filter to each window of each image of the appropriate size. The different
ops trade off between generic vs. specific filters:
* `conv2d`: Arbitrary filters that can mix channels together.
* `depthwise_conv2d`: Filters that operate on each channel independently.
* `separable_conv2d`: A depthwise spatial filter followed by a pointwise filter.
Note that although these ops are called "convolution", they are strictly
speaking "cross-correlation" since the filter is combined with an input window
without reversing the filter. For details, see [the properties of
cross-correlation](https://en.wikipedia.org/wiki/Cross-correlation#Properties).
The filter is applied to image patches of the same size as the filter and
strided according to the `strides` argument. `strides = [1, 1, 1, 1]` applies
the filter to a patch at every offset, `strides = [1, 2, 2, 1]` applies the
filter to every other image patch in each dimension, etc.
Ignoring channels for the moment, and assume that the 4-D `input` has shape
`[batch, in_height, in_width, ...]` and the 4-D `filter` has shape
`[filter_height, filter_width, ...]`, then the spatial semantics of the
convolution ops are as follows: first, according to the padding scheme chosen
as `'SAME'` or `'VALID'`, the output size and the padding pixels are computed.
For the `'SAME'` padding, the output height and width are computed as:
out_height = ceil(float(in_height) / float(strides[1]))
out_width = ceil(float(in_width) / float(strides[2]))
and the padding on the top and left are computed as:
pad_along_height = ((out_height - 1) * strides[1] +
filter_height - in_height)
pad_along_width = ((out_width - 1) * strides[2] +
filter_width - in_width)
pad_top = pad_along_height / 2
pad_left = pad_along_width / 2
Note that the division by 2 means that there might be cases when the padding on
both sides (top vs bottom, right vs left) are off by one. In this case, the
bottom and right sides always get the one additional padded pixel. For example,
when `pad_along_height` is 5, we pad 2 pixels at the top and 3 pixels at the
bottom. Note that this is different from existing libraries such as cuDNN and
Caffe, which explicitly specify the number of padded pixels and always pad the
same number of pixels on both sides.
For the `'VALID`' padding, the output height and width are computed as:
out_height = ceil(float(in_height - filter_height + 1) / float(strides[1]))
out_width = ceil(float(in_width - filter_width + 1) / float(strides[2]))
and the padding values are always zero. The output is then computed as
output[b, i, j, :] =
sum_{di, dj} input[b, strides[1] * i + di - pad_top,
strides[2] * j + dj - pad_left, ...] *
filter[di, dj, ...]
where any value outside the original input image region are considered zero (
i.e. we pad zero values around the border of the image).
Since `input` is 4-D, each `input[b, i, j, :]` is a vector. For `conv2d`, these
vectors are multiplied by the `filter[di, dj, :, :]` matrices to produce new
vectors. For `depthwise_conv_2d`, each scalar component `input[b, i, j, k]`
is multiplied by a vector `filter[di, dj, k]`, and all the vectors are
concatenated.
@@conv2d
@@depthwise_conv2d
@@separable_conv2d
@@atrous_conv2d
@@conv2d_transpose
@@conv3d
## Pooling
The pooling ops sweep a rectangular window over the input tensor, computing a
reduction operation for each window (average, max, or max with argmax). Each
pooling op uses rectangular windows of size `ksize` separated by offset
`strides`. For example, if `strides` is all ones every window is used, if
`strides` is all twos every other window is used in each dimension, etc.
In detail, the output is
output[i] = reduce(value[strides * i:strides * i + ksize])
where the indices also take into consideration the padding values. Please refer
to the `Convolution` section for details about the padding calculation.
@@avg_pool
@@max_pool
@@max_pool_with_argmax
@@avg_pool3d
@@max_pool3d
## Morphological filtering
Morphological operators are non-linear filters used in image processing.
[Greyscale morphological dilation]
(https://en.wikipedia.org/wiki/Dilation_(morphology)) is the max-sum counterpart
of standard sum-product convolution:
output[b, y, x, c] =
max_{dy, dx} input[b,
strides[1] * y + rates[1] * dy,
strides[2] * x + rates[2] * dx,
c] +
filter[dy, dx, c]
The `filter` is usually called structuring function. Max-pooling is a special
case of greyscale morphological dilation when the filter assumes all-zero
values (a.k.a. flat structuring function).
[Greyscale morphological erosion]
(https://en.wikipedia.org/wiki/Erosion_(morphology)) is the min-sum counterpart
of standard sum-product convolution:
output[b, y, x, c] =
min_{dy, dx} input[b,
strides[1] * y - rates[1] * dy,
strides[2] * x - rates[2] * dx,
c] -
filter[dy, dx, c]
Dilation and erosion are dual to each other. The dilation of the input signal
`f` by the structuring signal `g` is equal to the negation of the erosion of
`-f` by the reflected `g`, and vice versa.
Striding and padding is carried out in exactly the same way as in standard
convolution. Please refer to the `Convolution` section for details.
@@dilation2d
@@erosion2d
## Normalization
Normalization is useful to prevent neurons from saturating when inputs may
have varying scale, and to aid generalization.
@@l2_normalize
@@local_response_normalization
@@sufficient_statistics
@@normalize_moments
@@moments
## Losses
The loss ops measure error between two tensors, or between a tensor and zero.
These can be used for measuring accuracy of a network in a regression task
or for regularization purposes (weight decay).
@@l2_loss
## Classification
TensorFlow provides several operations that help you perform classification.
@@sigmoid_cross_entropy_with_logits
@@softmax
@@log_softmax
@@softmax_cross_entropy_with_logits
@@sparse_softmax_cross_entropy_with_logits
@@weighted_cross_entropy_with_logits
## Embeddings
TensorFlow provides library support for looking up values in embedding
tensors.
@@embedding_lookup
@@embedding_lookup_sparse
## Recurrent Neural Networks
TensorFlow provides a number of methods for constructing Recurrent
Neural Networks. Most accept an `RNNCell`-subclassed object
(see the documentation for `tf.nn.rnn_cell`).
@@dynamic_rnn
@@rnn
@@state_saving_rnn
@@bidirectional_rnn
## Conectionist Temporal Classification (CTC)
@@ctc_loss
@@ctc_greedy_decoder
@@ctc_beam_search_decoder
## Evaluation
The |
rjusher/djsqla-query-operations | djsqla_query_operations/compat.py | Python | mit | 590 | 0 | # -*- coding: utf-8 -*-
from sys i | mport version_info
import copy
import types
try:
from collections import OrderedDict
except ImportError: # pragma: nocover
# Django < 1.5 fallback
from django.utils.datastructures import SortedDict as OrderedDict # noqa
# There is a bug with deepcopy in 2.6, patch if we are running python < 2.7
# http://bugs.python.org/issue1515
if version_info < (2, 7, 0):
def _deepcopy_method(x, memo):
return type(x)(x.im_func, copy.deepcopy(x.im_self, memo), | x.im_class)
copy._deepcopy_dispatch[types.MethodType] = _deepcopy_method
|
jrichte43/hangman | hangman/models/role.py | Python | gpl-2.0 | 278 | 0 | from flask_security.core import RoleMixin
from hangman.models import db
class Role(db.Model, RoleMixin):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80), unique=True)
description = db.Co | lumn(db.String(255))
| |
fracpete/python-weka-wrapper3 | python/weka/flow/container.py | Python | gpl-3.0 | 7,930 | 0.001009 | # This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# container.py
# Copyright (C) 2015-2016 Fracpete (pythonwekawrapper at gmail dot com)
import re
from weka.core.dataset import Instances
class Container(object):
"""
Container for storing multiple objects and passing them around together in the flow.
"""
def __init__(self):
"""
Initializes the container.
"""
self._data = {}
self._allowed = []
def get(self, name):
"""
Returns the stored data.
:param name: the name of the item to return
:type name: str
:return: the data
:rtype: object
"""
return self._data[name]
def set(self, name, value):
"""
Stores the given data (if not None).
:param name: the name of the item to store
:type name: str
:param value: the value to store
:type value: object
"""
if value is not None:
self._data[name] = value
@property
def allowed(self):
"""
Returns the all the allowed keys.
:return: the list of allowed keys.
:rtype: list
"""
return self._allowed
def is_valid(self):
"""
Checks whether the container is valid.
:return: True if the container is valid
:rtype: bool
"""
return True
def __str__(self):
"""
Returns the content of the container as string.
:return: the content
:rtype: str
"""
return str(self._data)
def generate_help(self):
"""
Generates a help string for this container.
:return: the help string
:rtype: str
"""
result = []
result.append(self.__class__.__name__)
result.append(re.sub(r'.', '=', self.__class__.__name__))
result.append("")
result.append("Supported value names:")
for a in self.allowed:
result.append(a)
return '\n'.join(result)
def print_help(self):
"""
Prints a help string for this actor to stdout.
"""
print(self.generate_help())
class ModelContainer(Container):
"""
Container for models.
"""
def __init__(self, model=None, header=None):
"""
Initializes the container.
:param model: the model to store (eg Classifier or Clusterer)
:type model: object
:param header: the header instances
:type header: Instances
"""
super(ModelContainer, self).__init__()
self.set("Model", model)
if header is not None:
header = Instances.template_instances(header)
self.set("Header", header)
self._allowed = ["Model", "Header"]
def is_valid(self):
"""
Checks whether the container is valid.
:return: True if the container is valid
:rtype: bool
"""
return ("Model" in self._data) or ("Model" in self._data and "Header" in self._data)
class Attribute | SelectionContainer(Container):
"""
Container for models.
"""
def __init__(self, original=None, reduced=None, num_atts=None, selected=None, results=None):
"""
Initializes the container.
:param original: the original dataset
:type original: Instances
| :param reduced: the reduced dataset
:type reduced: Instances
:param num_atts: the number of attributes
:type num_atts: int
:param selected: the list of selected attribute indices (0-based)
:type selected: list
:param results: the generated results string
:type results: str
"""
super(AttributeSelectionContainer, self).__init__()
self.set("Original", original)
self.set("Reduced", reduced)
self.set("NumAttributes", num_atts)
self.set("Selected", selected)
self.set("Results", results)
self._allowed = ["Original", "Reduced", "NumAttributes", "Selected", "Results"]
def is_valid(self):
"""
Checks whether the container is valid.
:return: True if the container is valid
:rtype: bool
"""
return ("Reduced" in self._data) and ("NumAttributes" in self._data) and ("Selected" in self._data)
class ModelContainer(Container):
"""
Container for models.
"""
def __init__(self, model=None, header=None):
"""
Initializes the container.
:param model: the model to store (eg Classifier or Clusterer)
:type model: object
:param header: the header instances
:type header: Instances
"""
super(ModelContainer, self).__init__()
self.set("Model", model)
if header is not None:
header = Instances.template_instances(header)
self.set("Header", header)
self._allowed = ["Model", "Header"]
def is_valid(self):
"""
Checks whether the container is valid.
:return: True if the container is valid
:rtype: bool
"""
return ("Model" in self._data) or ("Model" in self._data and "Header" in self._data)
class ClassificationContainer(Container):
"""
Container for predictions (classifiers).
"""
def __init__(self, inst=None, classification=None, label=None, distribution=None):
"""
Initializes the container.
:param inst: the instance used for making the prediction
:type inst: Instance
:param classification: the classification (numeric value or 0-based label index)
:type classification: float
:param label: classification label (for nominal classes)
:type label: str
:param distribution: the class distribution
:type distribution: ndarray
"""
super(ClassificationContainer, self).__init__()
self.set("Instance", inst)
self.set("Classification", classification)
self.set("Label", label)
self.set("Distribution", distribution)
self._allowed = ["Instance", "Classification", "Label", "Distribution"]
def is_valid(self):
"""
Checks whether the container is valid.
:return: True if the container is valid
:rtype: bool
"""
return ("Instance" in self._data) and ("Classification" in self._data)
class ClusteringContainer(Container):
"""
Container for predictions (clusterers).
"""
def __init__(self, inst=None, cluster=None, distribution=None):
"""
Initializes the container.
:param inst: the instance used for making the prediction
:type inst: Instance
:param cluster: the cluster
:type cluster: int
:param distribution: the class distribution
:type distribution: ndarray
"""
super(ClusteringContainer, self).__init__()
self.set("Instance", inst)
self.set("Cluster", cluster)
self.set("Distribution", distribution)
self._allowed = ["Instance", "Cluster", "Distribution"]
def is_valid(self):
"""
Checks whether the container is valid.
:return: True if the container is valid
:rtype: bool
"""
return ("Instance" in self._data) and ("Cluster" in self._data)
|
yzl0083/orange | Orange/OrangeWidgets/plot/owplot.py | Python | gpl-3.0 | 71,969 | 0.011394 | '''
#################
Plot (``owplot``)
#################
.. autoclass:: OrangeWidgets.plot.OWPlot
'''
LeftLegend = 0
RightLegend = 1
BottomLegend = 2
TopLegend = 3
ExternalLegend = 4
UNUSED_ATTRIBUTES_STR = 'unused attributes'
from owaxis import *
from owcurve import *
from owlegend import *
from owpalette import *
from owplotgui import OWPlotGUI
from owtools import *
## Color values copied from orngView.SchemaView for consistency
SelectionPen = QPen(QBrush(QColor(51, 153, 255, 192)), 1, Qt.SolidLine, Qt.RoundCap)
SelectionBrush = QBrush(QColor(168, 202, 236, 192))
from PyQt4.QtGui import QGraphicsView, QGraphicsScene, QPainter, QTransform, QPolygonF, QGraphicsItem, QGraphicsPolygonItem, QGraphicsRectItem, QRegion
from PyQt4.QtCore import QPointF, QPropertyAnimation, pyqtProperty, SIGNAL, Qt, QEvent
from OWDlgs import OWChooseImageSizeDlg
from OWBaseWidget import unisetattr
from OWColorPalette import * # color palletes, ...
from Orange.utils import deprecated_members, deprecated_attribute
from Orange import orangeqt
def n_min(*args):
lst = args[0] if len(args) == 1 else args
a = [i for i in lst if i is not None]
return min(a) if a else None
def n_max(*args):
lst = args[0] if len(args) == 1 else args
a = [i for i in lst if i is not None]
return max(a) if a else None
name_map = {
"saveToFileDirect": "save_to_file_direct",
"saveToFile" : "save_to_file",
"addCurve" : "add_curve",
"addMarker" : "add_marker",
"updateLayout" : "update_layout",
"activateZooming" : "activate_zooming",
"activateSelection" : "activate_selection",
"activateRectangleSelection" : "activate_rectangle_selection",
"activatePolygonSelection" : "activate_polygon_selection",
"activatePanning" : "activate_panning",
"getSelectedPoints" : "get_selected_points",
"setAxisScale" : "set_axis_scale",
"setAxisLabels" : "set_axis_labels",
"setAxisAutoScale" : "set_axis_autoscale",
"setTickLength" : "set_axis_tick_length",
"updateCurves" : "update_curves",
"itemList" : "plot_items",
"setShowMainTitle" : "set_show_main_title",
"setMainTitle" : "set_main_title",
"invTransform" : "inv_transform",
"setAxisTitle" : "set_axis_title",
"setShowAxisTitle" : "set_show_axis_title"
}
@deprecated_members(name_map, wrap_methods=name_map.keys())
class OWPlot(orangeqt.Plot):
"""
The base class for all plots in Orange. It uses the Qt Graphics View Framework
to draw elements on a graph.
**Plot layout**
.. attribute:: show_legend
A boolean controlling whether the legend is displayed or not
.. attribute:: show_main_title
Controls whether or not the main plot title is displayed
.. attribute:: main_title
The plot title, usually show on top of the plot
.. automethod:: set_main_title
.. automethod:: set_show_main_title
.. attribute:: axis_margin
How much space (in pixels) should be left on each side for the axis, its label and its title.
.. attribute:: title_margin
How much space (in pixels) should be left at the top of the plot for the title, if the title is shown.
.. seealso:: attribute :attr:`show_main_title`
.. attribute:: plot_margin
How much space (in pixels) should be left at each side of the plot as whitespace.
**Coordinate transformation**
There are several coordinate systems used by OWPlot:
* `widget` coordinates.
This is the coordinate system of the position returned by :meth:`.QEvent.pos()`.
No calculations or positions is done with this coordinates, they must first be converted
to scene coordinates with :meth:`mapToScene`.
* `data` coordinates.
The value used internally in Orange to specify the values of attributes.
For example, this can be age in years, the number of legs, or any other numeric value.
* `plot` coordinates.
These coordinates specify where the plot items are placed on the graph, but doesn't account for zoom.
They can be retrieved for a particular plot item with :meth:`.PlotItem.pos()`.
* `scene` or `zoom` coordinates.
Like plot coordinates, except that they take the :attr:`zoom_transform` into account. They represent the
actual position of an item on the scene.
These are the coordinates returned by :meth:`.PlotItem.scenePos()` and :meth:`mapToScene`.
For example, they can be used to determine what is under the cursor.
In most cases, you will use data coordinates for interacting with the actual data, and scene coordinates for
interacting with the plot items. The other two sets are mostly used for converting.
.. automethod:: map_to_graph
.. automethod:: map_from_graph
.. automethod:: transform
.. automethod:: inv_transform
.. method:: nearest_point(pos)
Returns the point nearest to ``pos``, or ``None`` if no point is close enough.
:param pos: The position in scene coordinates
:type pos: QPointF
:rtype: :obj:`.OWPoint`
.. method:: point_at(pos)
If there is a point with data coordinates equal to ``pos``, if is returned.
Otherwise, this function returns None.
:param pos: The posi | tion in data coordinates
:type pos: tuple of float float
:rtype: :obj:`.OWPoint`
**Data curves**
The preferred method for showing a series of data points is :meth:`set_main_curve_data`.
It allows you to specify po | int positions, colors, labels, sizes and shapes.
.. automethod:: set_main_curve_data
.. automethod:: add_curve
.. automethod:: add_custom_curve
.. automethod:: add_marker
.. method:: add_item(item)
Adds any PlotItem ``item`` to this plot.
Calling this function directly is useful for adding a :obj:`.Marker` or another object that does not have to appear in the legend.
For data curves, consider using :meth:`add_custom_curve` instead.
.. method:: plot_items()
Returns the list of all plot items added to this graph with :meth:`add_item` or :meth:`.PlotItem.attach`.
**Axes**
.. automethod:: add_axis
.. automethod:: add_custom_axis
.. automethod:: set_axis_enabled
.. automethod:: set_axis_labels
.. automethod:: set_axis_scale
**Settings**
.. attribute:: gui
An :obj:`.OWPlotGUI` object associated with this graph
**Point Selection and Marking**
There are four possible selection behaviors used for selecting or marking points in OWPlot.
They are used in :meth:`select_points` and :meth:`mark_points` and are the same for both operations.
.. data:: AddSelection
The points are added to the selection, without affected the currently selected points
.. data:: RemoveSelection
The points are removed from the selection, without affected the currently selected points
.. data:: ToggleSelection
The points' selection state is toggled
.. data:: ReplaceSelection
The current selection is replaced with the new one
.. note:: There are exacly the same functions for point selection and marking.
|
exowanderer/SpitzerDeepLearningNetwork | Python Scripts/spitzer_cal_NALU_train.py | Python | mit | 15,910 | 0.009742 | from multiprocessing import set_start_method, cpu_count
#set_start_method('forkserver')
import os
os.environ["OMP_NUM_THREADS"] = str(cpu_count()) # or to whatever you want
from argparse import ArgumentParser
from datetime import datetime
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
from sklearn.utils import shuffle
from tqdm import tqdm
import pandas as pd
import numpy as np
import tensorflow as tf
import nalu
time_now = datetime.utcnow().strftime("%Y%m%d%H%M%S")
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def chisq(y_true, y_pred, y_error): return np.sum(((y_true-y_pred)/y_error)**2.)
ap = ArgumentParser()
ap.add_argument('-d', '--directory', type=str, required=False, default='nalu_tf_save_dir/saves_{}'.format(time_now), help='The tensorflow ckpt save file.')
ap.add_argument('-nnl', '--n_nalu_layers', type=int, required=False, default=1, help='Whether to use 1 (default), 2, or ... N NALU layers.')
ap.add_argument('-nnn', '--n_nalu_neurons', type=int, required=False, default=0, help='How many features on the second NALU layer.')
ap.add_argument('-ne', '--n_epochs', type=int, required=False, default=200, help='Number of N_EPOCHS to train the network with.')
ap.add_argument('-nc', '--n_classes', type=int, required=False, default=1, help='n_classes == 1 for Regression (default); > 1 for Classification.')
ap.add_argument('-bs', '--batch_size', type=int, required=False, default=32, help='Batch size: number of samples per batch.')
ap.add_argument('-lr', '--learning_rate', type=float, required=False, default=1e-3, help='Learning rate: how fast the optimizer moves up/down the gradient.')
ap.add_argument('-ts', '--test_size', type=float, required=False, default=0.75, help='How much to split the train / test ratio.')
ap.add_argument('-rs', '--random_state', type=int, required=False, default=42, help='Integer value to initialize train/test splitting randomization.')
ap.add_argument('-pp', ' | --pre_process', type=str2bool, nargs='?', required=False, default=True, help='Toggle whether to MinMax-preprocess the features.')
ap.add_argument('-pca', '--pca_transform', type=str2bool, nargs='?', required=Fals | e, default=True, help='Toggle whether to PCA-pretransform the features.')
ap.add_argument('-v', '--verbose', type=str2bool, nargs='?', required=False, default=False, help='Whether to set verbosity = True or False (default).')
ap.add_argument('-ds', '--data_set', type=str, required=False, default='', help='The csv file containing the data with which to train.')
try:
args = vars(ap.parse_args())
except Exception as e:
print('Error: {}'.format(e))
args = {}
args['directory'] = ap.get_default('directory')
args['n_nalu_layers'] = ap.get_default('n_nalu_layers')
args['n_nalu_neurons'] = ap.get_default('n_nalu_neurons')
args['n_epochs'] = ap.get_default('n_epochs')
args['n_classes'] = ap.get_default('n_classes')
args['batch_size'] = ap.get_default('batch_size')
args['learning_rate'] = ap.get_default('learning_rate')
args['test_size'] = ap.get_default('test_size')
args['random_state'] = ap.get_default('random_state')
args['pre_process'] = ap.get_default('pre_process')
args['pca_transform'] = ap.get_default('pca_transform')
args['verbose'] = ap.get_default('verbose')
args['data_set'] = ap.get_default('data_set')
DO_PP = args['pre_process']
DO_PCA = args['pca_transform']
verbose = args['verbose']
data_set_fname = args['data_set']
'''
print("loading pipelines on disk vis joblib.")
full_pipe = joblib.load('pmap_full_pipe_transformer_16features.joblib.save')
std_scaler_from_raw = joblib.load('pmap_standard_scaler_transformer_16features.joblib.save')
pca_transformer_from_std_scaled = joblib.load('pmap_pca_transformer_from_stdscaler_16features.joblib.save')
minmax_scaler_transformer_raw = joblib.load('pmap_minmax_scaler_transformer_from_raw_16features.joblib.save')
minmax_scaler_transformer_pca = joblib.load('pmap_minmax_scaler_transformer_from_pca_16features.joblib.save')
'''
label_n_error_filename = 'pmap_raw_labels_and_errors.csv'
print("Loading in raw labels and errors from {}".format(label_n_error_filename))
labels_df = pd.read_csv(label_n_error_filename)
labels = labels_df['Flux'].values[:,None]
labels_err = labels_df['Flux_err'].values
# Feature File Switch
if DO_PP and DO_PCA:
features_input_filename = 'pmap_full_pipe_transformed_16features.csv'
elif DO_PP:
features_input_filename = 'pmap_minmax_transformed_from_raw_16features.csv'
elif DO_PCA:
features_input_filename = 'pmap_pca_transformed_from_stdscaler_16features.csv'
else:
features_input_filename = 'pmap_raw_16features.csv'
print("Loading in pre-processed features from {}".format(features_input_filename))
features_input = pd.read_csv(feature_input_filename).drop(['idx'], axis=1).values
def nalu(input_layer, num_outputs):
""" Neural Arithmetic Logic Unit tesnorflow layer
Arguments:
input_layer - A Tensor representing previous layer
num_outputs - number of ouput units
Returns:
A tensor representing the output of NALU
"""
shape = (int(input_layer.shape[-1]), num_outputs)
# define variables
W_hat = tf.Variable(tf.truncated_normal(shape, stddev=0.02))
M_hat = tf.Variable(tf.truncated_normal(shape, stddev=0.02))
G = tf.Variable(tf.truncated_normal(shape, stddev=0.02))
# operations according to paper
W = tf.tanh(W_hat) * tf.sigmoid(M_hat)
m = tf.exp(tf.matmul(tf.log(tf.abs(input_layer) + 1e-7), W))
g = tf.sigmoid(tf.matmul(input_layer, G))
a = tf.matmul(input_layer, W)
out = g * a + (1 - g) * m
return out
if __name__ == "__main__":
N_FEATURES = features_input.shape[-1]
EXPORT_DIR = args['directory']
N_NALU_LAYERS = args['n_nalu_layers']
N_NALU_NEURONS = args['n_nalu_neurons'] if args['n_nalu_neurons'] > 0 else N_FEATURES
N_CLASSES = args['n_classes'] # = 1 for regression
TEST_SIZE = args['test_size']
RANDOM_STATE = args['random_state']
N_EPOCHS = args['n_epochs']
LEARNING_RATE = args['learning_rate']
BATCH_SIZE = args['batch_size']
EXPORT_DIR = EXPORT_DIR + '_nnl{}_nnn{}_nc{}_bs{}_lr{}_ne{}_ts{}_rs{}_PP{}_PCA{}/'.format(N_NALU_LAYERS, N_NALU_NEURONS, N_CLASSES,
BATCH_SIZE, LEARNING_RATE, N_EPOCHS,
TEST_SIZE, RANDOM_STATE,
{True:1, False:0}[DO_PP], {True:1, False:0}[DO_PCA])
print("Saving models to path: {}".format(EXPORT_DIR))
idx_train, idx_test = train_test_split(np.arange(labels.size), test_size=TEST_SIZE, random_state=RANDOM_STATE)
X_data, Y_data = features_input[idx_train], labels[idx_train]#[:,None]
LAST_BIT = X_data.shape[0]-BATCH_SIZE*(X_data.shape[0]//BATCH_SIZE)
# Force integer number of batches total by dropping last "<BATCH_SIEZ" number of samples
X_data_use = X_data[:-LAST_BIT].copy()
Y_data_use = Y_data[:-LAST_BIT].copy()
output_dict = {}
output_dict['loss'] = np.zeros(N_EPOCHS)
output_dict['accuracy'] = np.zeros(N_EPOCHS)
output_dict['R2_train'] = np.zeros(N_EPOCHS)
output_dict['R2_test'] = np.zeros(N_EPOCHS)
output_dict['chisq_train'] = np.zeros(N_EPOCHS)
output_dict['chisq_test'] = np.zeros(N_EPOCHS)
with tf.device("/cpu:0"):
# tf.reset_default_graph()
# define placeholders and network
X = tf.placeholder(tf.float32, shape=[None, N_FEATURES])
Y_true = tf.placeholder(tf.float32, shape=[None, 1])
# Setup NALU Layers
nalu_layers = {'nalu0':nalu(X,N_NALU_NEURONS)}
for kn in range(1, N_NALU_LAYERS):
#with tf.name_scope('nalu{}'.form |
shackra/thomas-aquinas | tests/customstuff.py | Python | bsd-3-clause | 1,159 | 0.000863 | # coding: utf-8
from summa.director import director
from summa.scene import Scene
import pyglet
class TimedScene(Scene):
def __init__(self, *children):
super(TimedScene, self).__init__(*children)
self._dt = 0
| self._maxtime = 20
self._next_scene = None
self._changetype = 0
self.schedule(self.timeit)
def changescene(self):
if self._next_scene is None:
pyglet.app.exit()
if self._changetype == 0:
director.push(self._next_scene)
elif self._changety | pe == 1:
self._next_scene = None
director.pop()
elif self._changetype == 2:
director.replace(self._next_scene)
elif self._changetype == 3:
self._next_scene = None
director.scene.end(True)
def timeit(self, dt):
if self._dt >= self._maxtime:
self._dt = 0
self.changescene()
else:
self._dt += dt
def setnextscene(self, change, scene):
self._next_scene = scene
self._changetype = change
def setmaxtime(self, maxtime):
self._maxtime = maxtime
|
andyrenpanlong/Taptap | tabUnique.py | Python | mit | 987 | 0.003995 | #coding=utf-8
import pymongo
def delete_repeat_data():
client = pymongo.MongoClient('localhost', 27017)
db = client.admin
collection = db.taplist
for | url in collection.distinct('game_id'): # 使用distinct方法,获取每一个独特的元素列表
num = collection.count({"game_id": url}) # 统计每一个元素的数量
print num, "===== aawa =====", url
# for i in range(1, num): # 根据每一个元素的数量进行删除操作,当前元素只有一个就不再删除
# | print 'delete %s %d times ' % (url, i)
# # 注意后面的参数, 很奇怪,在mongo命令行下,它为1时,是删除一个元素,这里却是为0时删除一个
# collection.remove({"game_id": url}, 0)
# for i in collection.find({"game_id": url}): # 打印当前所有元素
# print i
# print collection.distinct('game_id') # 再次打印一遍所要去重的元素
delete_repeat_data()
|
EIREXE/SpaceDock | SpaceDock/blueprints/admin.py | Python | mit | 3,466 | 0.004328 | from flask import Blueprint, render_template, abort, redirect
from flask.ext.login import current_user
from sqlalchemy import desc
from SpaceDock.objects import User, Mod, GameVersion, Game, Publisher
from SpaceDock.database import db
from SpaceDock.common import *
from SpaceDock.config import _cfg
from SpaceDock.email import send_bulk_email
from flask.ext.login import current_user, login_user, logout_user
admin = Blueprint('admin', __name__, template_folder='../../templates/admin')
@admin.route("/admin")
@adminrequired
def backend():
users = User.query.count()
usrs = User.query.order_by(desc(User.created));
mods = Mod.query.count()
versions = GameVersion.query.order_by(desc(GameVersion.id)).all()
games = Game.query.filter(Game.active == True).order_by(desc(Game.id)).all()
publishers = Publisher.query.order_by(desc(Publisher.id)).all()
return render_template("admin/admin.html", users=users, mods=mods, usrs=usrs, versions=versions, games=games, publishers=publishers)
@admin.route("/admin/impersonate/<username>")
@adminrequired
def impersonate(username):
user = User.query.filter(User.username == username).first()
login_user(user)
return redirect("/")
@admin.route("/versions/create", methods=['POST'])
@adminrequired
@with_session
def create_version():
friendly = request.form.get("friendly_version")
gid = request.form.get("ganame")
if not friendly or not gid:
return redirect("/asdf")
if any(GameVersion.query.filter(GameVersion.friendly_version == friendly)):
return redirect("/fsda")
version = GameVersion(friendly,gid)
db.add(version)
db.commit()
return redirect("/admin")
@admin.route("/games/create", methods=['POST'])
@adminrequired
@with_session |
def create_game():
name = request.form.get("gname")
sname = request.form.get("sname")
pid = request.form.get("p | name")
if not name or not pid or not sname:
return redirect("/asdf")
if any(Game.query.filter(Game.name == name)):
return redirect("/fsda")
go = Game(name,pid,sname)
db.add(go)
db.commit()
return redirect("/admin")
@admin.route("/publishers/create", methods=['POST'])
@adminrequired
@with_session
def create_publisher():
name = request.form.get("pname")
if not name:
return redirect("/asdf")
if any(Publisher.query.filter(Publisher.name == name)):
return redirect("/fsda")
gname = Publisher(name)
db.add(gname)
db.commit()
return redirect("/admin")
@admin.route("/admin/email", methods=['POST'])
@adminrequired
def email():
subject = request.form.get('subject')
body = request.form.get('body')
modders_only = request.form.get('modders-only') == 'on'
if not subject or not body:
abort(400)
if subject == '' or body == '':
abort(400)
users = User.query.all()
if modders_only:
users = [u for u in users if len(u.mods) != 0 or u.username == current_user.username]
send_bulk_email([u.email for u in users], subject, body)
return redirect("/admin")
@admin.route("/admin/manual-confirmation/<user_id>")
@adminrequired
@with_session
def manual_confirm(user_id):
user = User.query.filter(User.id == int(user_id)).first()
if not user:
abort(404)
user.confirmation = None
return redirect("/profile/" + user.username)
|
ric2b/Vivaldi-browser | chromium/chrome/installer/mac/signing/signing_test.py | Python | bsd-3-clause | 9,294 | 0.000861 | # Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from . import model, signing, test_common, test_config
mock = test_common.import_mock()
# python2 support.
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
@mock.patch('signing.commands.lenient_run_command_output')
@mock.patch('signing.commands.macos_version', return_value=[10, 15])
class TestLinkerSignedArm64NeedsForce(unittest.TestCase):
def test_oserror(self, macos_version, lenient_run_command_output):
lenient_run_command_output.return_value = (None, None, None)
self.assertFalse(signing._linker_signed_arm64_needs_force(None))
lenient_run_command_output.assert_called_once()
def test_unsigned(self, macos_version, lenient_run_command_output):
lenient_run_command_output.return_value = (
1, b'', b'test: code object is not signed at all\n')
self.assertFalse(signing._linker_signed_arm64_needs_force(None))
lenient_run_command_output.assert_called_once()
def test_not_linker_signed(self, macos_version, lenient_run_command_output):
lenient_run_command_output.return_value = (0, b'', b'''Executable=test
Identifier=test
Format=Mach-O thin (arm64)
CodeDirectory v=20100 size=592 flags=0x2(adhoc) hashes=13+2 location=embedded
Signature=adhoc
Info.plist=not bound
TeamIdentifier=not set
Sealed Resources=none
Internal requirements count=0 size=12
''')
self.assertFalse(signing._linker_signed_arm64_needs_force(None))
lenient_run_command_output.assert_called_once()
def test_linker_signed_10_15(self, macos_version,
lenient_run_command_output):
lenient_run_command_output.return_value = (0, b'', b'''Executable=test
Identifier=test
Format=Mach-O thin (arm64)
CodeDirectory v=20400 size=512 flags=0x20002(adhoc,???) hashes=13+0 location=embedded
Signature=adhoc
Info.plist=not bound
TeamIdentifier=not set
Sealed Resources=none
Internal requirements=none
''')
self.assertTrue(signing._linker_signed_arm64_needs_force(None))
lenient_run_command_output.assert_called_once()
def test_linker_signed_10_16(self, macos_version,
lenient_run_command_output):
# 10.16 is what a Python built against an SDK < 11.0 will see 11.0 as.
macos_version.return_value = [10, 16]
lenient_run_command_output.return_value = (0, b'', b'''Executable=test
Identifier=test
Format=Mach-O thin (arm64)
CodeDirectory v=20400 size=250 flags=0x20002(adhoc,linker-signed) hashes=5+0 location=embedded
Signature=adhoc
Info.plist=not bound
TeamIdentifier=not set
Sealed Resources=none
Internal requirements=none
''')
self.assertFalse(signing._linker_signed_arm64_needs_force(None))
lenient_run_command_output.assert_not_called()
def test_linker_signed_11_0(self, macos_version,
lenient_run_command_output):
macos_version.return_value = [11, 0]
lenient_run_command_output.return_value = (0, b'', b'''Executable=test
Identifier=test
Format=Mach-O thin (arm64)
CodeDirectory v=20400 size=250 flags=0x20002(adhoc,linker-signed) hashes=5+0 location=embedded
Signature=adhoc
Info.plist=not bound
TeamIdentifier=not set
Sealed Resources=none
Internal requirements=none
''')
self.assertFalse(signing._linker_signed_arm64_needs_force(None))
lenient_run_command_output.assert_not_called()
@mock.patch(
'signing.signing._linker_signed_arm64_needs_force', return_value=False)
@mock.patch('signing.commands.run_command')
class TestSignPart(unittest.TestCase):
def setUp(self):
self.paths = model.Paths('/$I', '/$O', '/$W')
self.config = test_config.TestConfig()
def test_sign_part(self, run_command, linker_signed_arm64_needs_force):
part = model.CodeSignedProduct('Test.app', 'test.signing.app')
signing.sign_part(self.paths, self.config, part)
run_command.assert_called_once_with([
'codesign', '--sign', '[IDENTITY]', '--timestamp', '--requirements',
'=designated => identifier "test.signing.app"', '/$W/Test.app'
])
def test_sign_part_needs_force(self, run_command,
linker_signed_arm64_needs_force):
linker_signed_arm64_needs_force.return_value = True
part = model.CodeSignedProduct('Test.app', 'test.signing.app')
signing.sign_part(self.paths, self.config, part)
run_command.assert_called_once_with([
'codesign', '--sign', '[IDENTITY]', '--force', '--timestamp',
'--requirements', '=designated => identifier "test.signing.app"',
'/$W/Test.app'
])
def test_sign_part_no_notary(self, run_command,
linker_signed_arm64_needs_force):
config = test_config.TestConfig(notary_user=None, notary_password=None)
part = model.CodeSignedProduct('Test.app', 'test.signing.app')
signing.sign_part(self.paths, config, part)
run_command.assert_called_once_with([
'codesign', '--sign', '[IDENTITY]', '--requirements',
'=designated => identifier "test.signing.app"', '/$W/Test.app'
])
def test_sign_part_no_identifier_requirement(
self, run_command, linker_signed_arm64_needs_force):
part = model.CodeSignedProduct(
'Test.app', 'test.signing.app', identifier_requirement=False)
signing.sign_part(self.paths, self.config, part)
run_command.assert_called_once_with(
['codesign', '--sign', '[IDENTITY]', '--timestamp', '/$W/Test.app'])
def test_sign_with_identifier(self, run_command,
linker_signed_arm64_needs_force):
part = model.CodeSignedProduct(
'Test.app', 'test.signing.app', sign_with_identifier=True)
signing.sign_part(self.paths, self.config, part)
run_command.assert_called_once_with([
'codesign', '--sign', '[IDENTITY]', '--timestamp', '--identifier',
'test.signing.app', '--requirements',
'=designated => identifier "test.signing.app"', '/$W/Test.app'
])
def test_sign_with_identifier_no_requirement(
self, run_command, linker_signed_arm64_needs_force):
part = model.CodeSignedProduct(
'Test.app',
'test.signing.app',
sign_with_identifier=True,
identifier_requirement=Fal | se)
signing.sign_part(self.paths, self.config, part)
run_command.assert_called_once_with([
'codesign', '--sign', '[IDENTITY]', '--timestamp', '--identifie | r',
'test.signing.app', '/$W/Test.app'
])
def test_sign_part_with_options(self, run_command,
linker_signed_arm64_needs_force):
part = model.CodeSignedProduct(
'Test.app',
'test.signing.app',
options=model.CodeSignOptions.RESTRICT +
model.CodeSignOptions.LIBRARY_VALIDATION)
signing.sign_part(self.paths, self.config, part)
run_command.assert_called_once_with([
'codesign', '--sign', '[IDENTITY]', '--timestamp', '--requirements',
'=designated => identifier "test.signing.app"', '--options',
'restrict,library', '/$W/Test.app'
])
def test_sign_part_with_entitlements(self, run_command,
linker_signed_arm64_needs_force):
part = model.CodeSignedProduct(
'Test.app',
'test.signing.app',
entitlements='entitlements.plist',
identifier_requirement=False)
signing.sign_part(self.paths, self.config, part)
run_command.assert_called_once_with([
'codesign', '--sign', '[IDENTITY]', '--timestamp', '--entitlements',
'/$W/entitlements.plist', '/$W/Test.app'
])
def test_verify_part(self, run_command, linker_signed_arm64_needs_force):
part = model.CodeSignedProduct('T |
cts-admin/cts | cts/fundraising/migrations/0003_auto_20170824_0419.py | Python | gpl-3.0 | 546 | 0.001832 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-08-24 04:19
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('fundraising', '0002_ctsdonor_profile'),
]
operations = [
migrations.AlterField(
model_name='ctsdonor',
name='profile',
field=model | s.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='accounts.Profi | le'),
),
]
|
pdp10/sbpipe | sbpipe/pl/ps2/parscan2.py | Python | mit | 13,510 | 0.002073 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 Piero Dalle Pezze
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# L | IABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# for computing the pipeline elapsed time
import datetime
import glob
import logging
import | os
import os.path
import yaml
import traceback
from ..pipeline import Pipeline
from sbpipe.utils.dependencies import is_r_package_installed
from sbpipe.utils.io import refresh
from sbpipe.utils.parcomp import parcomp
from sbpipe.utils.rand import get_rand_alphanum_str
from sbpipe.report.latex_reports import latex_report_ps2, pdf_report
logger = logging.getLogger('sbpipe')
class ParScan2(Pipeline):
"""
This module provides the user with a complete pipeline of scripts for computing
double parameter scans.
"""
def __init__(self, models_folder='Models', working_folder='Results',
sim_data_folder='double_param_scan_data', sim_plots_folder='double_param_scan_plots'):
__doc__ = Pipeline.__init__.__doc__
Pipeline.__init__(self, models_folder, working_folder, sim_data_folder, sim_plots_folder)
def run(self, config_file):
__doc__ = Pipeline.run.__doc__
logger.info("===============================")
logger.info("Pipeline: double parameter scan")
logger.info("===============================")
logger.info("\n")
logger.info("Loading file: " + config_file)
logger.info("=============\n")
# load the configuration file
try:
config_dict = Pipeline.load(config_file)
except yaml.YAMLError as e:
logger.error(e.message)
logger.debug(traceback.format_exc())
return False
except IOError:
logger.error('File `' + config_file + '` does not exist.')
logger.debug(traceback.format_exc())
return False
# variable initialisation
(generate_data, analyse_data, generate_report, generate_tarball,
project_dir, simulator, model, scanned_par1, scanned_par2,
cluster, local_cpus, runs,
sim_length) = self.parse(config_dict)
runs = int(runs)
local_cpus = int(local_cpus)
sim_length = int(sim_length)
models_dir = os.path.join(project_dir, self.get_models_folder())
working_dir = os.path.join(project_dir, self.get_working_folder())
output_folder = os.path.splitext(model)[0]
outputdir = os.path.join(working_dir, output_folder)
# Get the pipeline start time
start = datetime.datetime.now().replace(microsecond=0)
# preprocessing
if not os.path.exists(outputdir):
os.makedirs(outputdir)
if generate_data:
logger.info("\n")
logger.info("Data generation:")
logger.info("================")
status = ParScan2.generate_data(simulator,
model,
sim_length,
models_dir,
os.path.join(outputdir, self.get_sim_data_folder()),
cluster,
local_cpus,
runs)
if not status:
return False
if analyse_data:
logger.info("\n")
logger.info("Data analysis:")
logger.info("==============")
status = ParScan2.analyse_data(os.path.splitext(model)[0],
scanned_par1,
scanned_par2,
os.path.join(outputdir, self.get_sim_data_folder()),
os.path.join(outputdir, self.get_sim_plots_folder()),
cluster,
local_cpus,
runs)
if not status:
return False
if generate_report:
logger.info("\n")
logger.info("Report generation:")
logger.info("==================")
status = ParScan2.generate_report(os.path.splitext(model)[0],
scanned_par1,
scanned_par2,
outputdir,
self.get_sim_plots_folder())
if not status:
return False
if generate_tarball:
status = self.generate_tarball(working_dir, output_folder)
if not status:
return False
# Print the pipeline elapsed time
end = datetime.datetime.now().replace(microsecond=0)
logger.info("\n\nPipeline elapsed time (using Python datetime): " + str(end - start))
return True
@classmethod
def generate_data(cls, simulator, model, sim_length, inputdir, outputdir, cluster, local_cpus, runs):
"""
The first pipeline step: data generation.
:param simulator: the name of the simulator (e.g. Copasi)
:param model: the model to process
:param sim_length: the length of the simulation
:param inputdir: the directory containing the model
:param outputdir: the directory to store the results
:param cluster: local, lsf for Load Sharing Facility, sge for Sun Grid Engine.
:param local_cpus: the number of CPU.
:param runs: the number of model simulation
:return: True if the task was completed successfully, False otherwise.
"""
# Some controls
if not os.path.isfile(os.path.join(inputdir, model)):
logger.error(os.path.join(inputdir, model) + " does not exist.")
return False
if runs < 1:
logger.error("variable `runs` must be greater than 0. Please, check your configuration file.")
return False
if int(sim_length) < 1:
logger.error("variable sim_length must be greater than 0. Please, check your configuration file.")
return False
refresh(outputdir, os.path.splitext(model)[0])
logger.info("Simulating Model: " + model)
try:
sim = cls.get_simul_obj(simulator)
except TypeError as e:
logger.error("simulator: " + simulator + " not found.")
logger.debug(traceback.format_exc())
return False
try:
return sim.ps2(model, sim_length, inputdir, outputdir, cluster, local_cpus, runs)
except Exception as e:
logger.error(str(e))
logger.debug(traceback.format_exc())
return False
@classmethod
def analyse_data(cls, model, scanned_par1, scanned_par2, inputdir, outputdir, cluster='local', local_cpus=1, runs=1):
"""
The second pipeline step: data analysis.
:param model: the model name
:param scanned_par1: the first scanned parameter
:param scanned_par2: the |
boxu0001/practice | py3/S163_L641_MissingRange.py | Python | gpl-3.0 | 1,104 | 0.005435 | '''
Given a sorted integer array where the range of elements are in the inclusive r | ange [lower, upper], return its missing ranges.
Have you met this question in a real interview?
Example
Example 1
Input:
nu | ms = [0, 1, 3, 50, 75], lower = 0 and upper = 99
Output:
["2", "4->49", "51->74", "76->99"]
Explanation:
in range[0,99],the missing range includes:range[2,2],range[4,49],range[51,74] and range[76,99]
Example 2
Input:
nums = [0, 1, 2, 3, 7], lower = 0 and upper = 7
Output:
["4->6"]
Explanation:
in range[0,7],the missing range include range[4,6]
'''
class Solution:
"""
@param: nums: a sorted integer array
@param: lower: An integer
@param: upper: An integer
@return: a list of its missing ranges
"""
def findMissingRanges(self, nums, lower, upper):
# write your code here
r=[]
prev = lower
nums+=[upper+1]
for cur in nums:
if cur - prev >= 2:
r += [str(prev) + '->' + str(cur-1)]
elif cur - prev == 1:
r += [str(cur-1)]
prev = cur + 1
return r
|
Ibcrootbeer/PyScripts | Install/RemoveAll.py | Python | mit | 432 | 0.002315 | #!/usr/bin/env python
import os
os.system("sudo apt-get -y purg | e apache2")
os.system("sudo apt-get -y purge openssh-server")
os.system("sudo rm /etc/motd")
os.system("sudo rm ~/.ssh/known_hosts")
os.system("sudo apt-get -y purge vsftpd")
os.system("sudo apt-get -y purge mysql*")
os.system("sudo rm /usr/bin/mysql -R")
os.system("sudo rm /var/lib/mysql -R")
os.system("sudo rm /etc/mysql -R")
os.system("sudo rm /etc/my.cnf.rmp" | )
|
dNG-git/pas_tasks | src/pas_tasks/abstract_persistent.py | Python | gpl-2.0 | 2,708 | 0.000739 | # -*- coding: utf-8 -*-
"""
direct PAS
Python Application Services
----------------------------------------------------------------------------
(C) direct Netware Group - All rights reserved
https://www.direct-netware.de/redirect?pas;tasks
The following license agreement remains valid unless any additions or
changes are being made by direct Netware Group in a written form.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2 of the License, or (at your
option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
----------------------------------------------------------------------------
https://www.direct-netware.de/redirect?licenses;gpl
----------------------------------------------------------------------------
#echo(pasTasksVersion)#
#echo(__FILEPATH__)#
"""
from dpt_runtime.not_implemented_exception import NotImplementedException
from pas_timed_tasks import TimedTasksMixin
from .abstract import Abstract
class AbstractPersistent(TimedTasksMixin, Abstract):
"""
"AbstractPersistent" instances provide additional scheduling related
methods for persistent tasks.
:author: direct Netware Group et al.
:copyright: direct Netware Group - All rights reserved
:package: pas
:subpackage: tasks
:since: v1.0.0
:license: https://www.direct-netware.de/redirect?licenses;gpl
GNU General Public License 2 or later
"""
__ | slots__ = TimedTasksMixin._mixin_slots_
"""
python.org: __slots__ reserves space for the declared variables and prevents
the automatic creation of __dict__ and __weakref__ for each instance.
"""
def __init__(self):
"""
Constructor __init__(AbstractPersistent)
:since: v1.0.0
"""
Abstract.__init__(self)
Ti | medTasksMixin.__init__(self)
#
@classmethod
def is_executing_daemon(cls):
"""
True if the instance is executing scheduled tasks.
:return: (bool) True if executing scheduled tasks
:since: v1.0.0
"""
_return = False
try:
persistent_tasks = cls.get_instance()
_return = persistent_tasks.is_started
except NotImplementedException: pass
return _return
#
#
|
pimier15/PyGUI | Kivy/Kivy/Bk_Interractive/My/C1/Proj_ComicBook/comiccreator.py | Python | mit | 434 | 0.013825 | from kivy.app import App
from kivy.lang | import Builder
from kivy.uix.anchorlayout import AnchorLayout
Builder.load_file('toolbox.kv')
Builder.load_file('generaloptions.kv')
Builder.load_file('statusbar.kv')
Builder.load_file('drawingspace.kv')
class ComicCreator(AnchorLayout):
pass
class ComicCreatorApp(App):
def build(self) | :
return ComicCreator()
if __name__=='__main__':
ComicCreatorApp().run()
|
mkieszek/odoo | addons/purchase/res_config.py | Python | agpl-3.0 | 4,217 | 0.005454 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp.osv import fields, osv
from openerp.tools.translate import _
class purchase_config_settings(osv.osv_memory):
_name = 'purchase.config.settings'
_inherit = 'res.config.settings'
_columns = {
'group_product_variant': fields.selection([
(0, "No variants on products"),
(1, 'Products can have several attributes, defining variants (Example: size, color,...)')
], "Product Variants",
help='Work with product variant allows you to define some variant of the same products, an ease the product management in the ecommerce for example',
implied_group='product.group_product_variant'),
'default_invoice_method': fields.selection(
[('manual', 'Control vendor bill on purchase order line'),
('picking', 'Control vendor bill on incoming shipments'),
('order', 'Control vendor bill on a pregenerated draft invoice'),
], 'Default invoicing control method', required=True, default_model='purchase.order'),
'group_purchase_pricelist':fields.selection([
(0, 'Set a fixed cost price on each product'),
(1, 'Use pricelists to adapt your price per vendors or products')
], "Pricelists",
implied_group='product.group_purchase_pricelist',
help='Allows to manage different prices based on rules per category of vendor.\n'
'Example: 10% for retailers, promotion of 5 EUR on this product, etc.'),
'group_uom':fields.selection([
(0, 'Products have only one unit of measure (easier)'),
(1, 'Some products may be sold/puchased in different unit of measures (advanced)')
], "Unit of Measures",
implied_group='product.group_uom',
help="""Allows you to select and maintain different units of measure for products."""),
'group_costing_method':fields.selection([
(0, 'Set a fixed cost price on each product'),
(1, "Use a 'Fixed', 'Real' or 'Average' price costing method")
], "Costing Methods",
implied_group='stock_account.group_inventory_valuation',
help="""Allows you to compute product cost price based on average cost."""),
'module_purchase_double_validation': fields.selection([
(0, 'Confirm purchase orders in one step'),
(1, 'Get 2 levels of approvals to confirm a purchase order')
], "Levels of Approvals",
help='Provide a double validation mechanism for purchases exceeding minimum amount.\n'
'-This installs the module purcha | se_double_validation.'),
'module_purchase_requisition': fields.selection([
(0, 'Purchase propositions trigger draft purchase orders to a single supplier'),
(1, 'Allow using call for tenders to get quotes from multiple suppliers (advanced)')
], "Calls for Tenders",
help="""Calls for tenders are used w | hen you want to generate requests for quotations to several vendors for a given set of products.
You can configure per product if you directly do a Request for Quotation
to one vendor or if you want a Call for Tenders to compare offers from several vendors."""),
'module_stock_dropshipping': fields.selection([
(0, 'Suppliers always deliver to your warehouse(s)'),
(1, "Allow suppliers to deliver directly to your customers")
], "Dropshipping",
help='\nCreates the dropship Route and add more complex tests'
'-This installs the module stock_dropshipping.'),
}
_defaults = {
'default_invoice_method': 'order',
}
class account_config_settings(osv.osv_memory):
_inherit = 'account.config.settings'
_columns = {
'group_analytic_account_for_purchases': fields.boolean('Analytic accounting for purchases',
implied_group='purchase.group_analytic_accounting',
help="Allows you to specify an analytic account on purchase order lines."),
}
|
sorgerlab/indra | indra/sources/medscan/fix_csxml_character_encoding.py | Python | bsd-2-clause | 1,680 | 0.00119 | import sys
import logging
codec_options = ['utf-8', 'latin_1']
logger = logging.getLogger(__name__)
def try_decode(byte_string, codec):
try:
s = byte_string.decode(codec)
return s
except:
return None
def shortest_string(strings):
best_string = None
best_length = None
for s in strings:
if best_string is None or len(s) < best_length:
best_string = s
best_length = | len(s)
return best_string
def fix_character_encoding(input_file, output_file):
with open(input_file, 'rb') as f_in:
with open(output_file, 'wb') as f_out:
for line in f_in:
# Try to decode with both latin_1 and utf-8
decoded = [try_decode(line, c) for c in codec_options]
decoded = [d for d in decoded if d is not | None]
if len(decoded) == 0:
# Hopefully at least one codec worked
logger.info('Could not decode: %s' % line)
sys.exit(1)
else:
# If more than one, choose the codec that gives the best
# length
chosen_string = shortest_string(decoded)
# Write result as ascii, with non-ascii characters escaped
f_out.write(chosen_string.encode('utf-8'))
if __name__ == '__main__':
args = sys.argv[1:]
if len(args) != 2:
logger.error('Expected two arguments: the input file'
' and the output file')
sys.exit(1)
input_file = args[0]
output_file = args[1]
fix_character_encoding(input_file, output_file)
|
npcasler/phenomics | HTPGeoprocessor.py | Python | apache-2.0 | 4,471 | 0.006039 | """
/***************************************************************************
Name : HTP Geoprocessor
Description : Tools for processing HTP geospatial data
Date : 29/Mar/12
copyright : (C) 2012 by Dr. Kelly Thorp, USDA-ARS
email : kelly.thorp@ars.usda.gov
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
# Import the PyQt and QGIS libraries
from PyQt4.QtCore import *
from PyQt4.QtGui import *
#from qgis.core import *
# Initialize Qt resources from file resources.py
import resources
import os
import sys
# Import the code for the dialogs
from MapCreatorDlg import MapCreatorDlg
from GeoprocessorDlg import GeoprocessorDlg
from PreprocessorDlg import PreprocessorDlg
class HTPGeoprocessor:
def __init__(self, iface):
# Save reference to the QGIS interface
self.iface = iface
# initialize plugin directory
self.plugin_dir = os.path.dirname(__file__)
# initialize locale
locale = QSettings().value("locale/userLocale")[0:2]
localePath = os.path.join(self.plugin_dir, 'i18n', 'htpgeoprocessor_{}.qm'.format(locale))
if os.path.exists(localePath):
self.translator = QTranslator()
self.translator.load(localePath)
if qVersion() > '4.3.3':
QCoreApplication.installTranslator(self.translator)
def initGui(self):
# Create | action that will start plugin configuration
icon = QIcon(":/plugins/htpgeoprocessor/icon.png")
self.createmap = QAction(icon,u"Map Creator", self.iface.mainWindow())
self.preprocess = QAction(icon,u"Preprocessor", self.iface.mainWindow())
self.geoprocess = QAction(icon,u" | Geoprocessor", self.iface.mainWindow())
self.helpme = QAction(icon, u"Help", self.iface.mainWindow())
# connect the action to a method
self.createmap.triggered.connect(self.CreateMap)
self.preprocess.triggered.connect(self.Preprocess)
self.geoprocess.triggered.connect(self.Geoprocess)
self.helpme.triggered.connect(self.Help)
# Add toolbar button and menu item
self.iface.addPluginToMenu(u"&HTP Geoprocessor", self.createmap)
self.iface.addPluginToMenu(u"&HTP Geoprocessor", self.preprocess)
self.iface.addPluginToMenu(u"&HTP Geoprocessor", self.geoprocess)
self.iface.addPluginToMenu(u"&HTP Geoprocessor", self.helpme)
def unload(self):
# Remove the plugin menu item and icon
self.iface.removePluginMenu(u"&HTP Geoprocessor", self.createmap)
self.iface.removePluginMenu(u"&HTP Geoprocessor", self.preprocess)
self.iface.removePluginMenu(u"&HTP Geoprocessor", self.geoprocess)
self.iface.removePluginMenu(u"&HTP Geoprocessor", self.helpme)
# run methods that perform all the real work
def CreateMap(self):
dlg = MapCreatorDlg(self.iface)
dlg.exec_()
def Preprocess(self):
dlg = PreprocessorDlg()
dlg.exec_()
def Geoprocess(self):
# create and show the dialog
dlg = GeoprocessorDlg(self.iface)
# show the dialog
#dlg.show() #Modeless dialog
dlg.exec_() #Modal dialog
def Help(self):
path = os.path.dirname(sys.modules[__name__].__file__)
if sys.platform == 'linux':
os.system(path+"//HTP Geoprocessor README.pdf")
elif sys.platform == 'win32':
os.startfile(path+"\\HTP Geoprocessor README.pdf")
else:
QMessageBox.critical(self.iface.mainWindow(),'Help','Error opening document. Look in plug-in install directory for PDF.')
|
jimengliu/cattle | tests/integration/cattletest/core/test_link.py | Python | apache-2.0 | 7,262 | 0 | from common_fixtures import * # NOQA
def test_link_instance_stop_start(super_client, client, context):
target1 = context.create_container(ports=['180', '122/udp'])
target2 = context.create_container(ports=['280', '222/udp'])
c = context.create_container(instanceLinks={
'target1_link': target1.id,
'target2_link': target2.id})
assert c.state == 'running'
ports = set()
for link in c.instanceLinks():
for port in super_client.reload(link).data.fields.ports:
ports.add('{}:{}'.format(port.publicPort, port.privatePort))
assert len(ports) > 0
new_ports = set()
c = client.wait_success(c.stop())
assert c.state == 'stopped'
for link in super_client.reload(c).instanceLinks():
assert len(link.data.fields.ports) == 2
for port in link.data.fields.ports:
new_ports.add('{}:{}'.format(port.publicPort, port.privatePort))
assert ports == new_ports
new_ports = set()
c = client.wait_success(c.start())
assert c.state == 'running'
for link in super_client.reload(c).instanceLinks():
assert len(link.data.fields.ports) == 2
for port in link.data.fields.ports:
new_ports.add('{}:{}'.format(port.publicPort, port.privatePort))
assert ports == new_ports
def _find_agent_instance_ip(nsp, source):
assert source is not None
vnet_id = source.nics()[0].vnetId
assert vnet_id is not None
for agent_instance in nsp.instances():
if agent_instance.nics()[0].vnetId == vnet_id:
assert agent_instance.primaryIpAddress is not None
return agent_instance.primaryIpAddress
assert False, 'Failed to find agent instance for ' + source.id
def test_link_create(client, super_client, context):
target1 = context.create_container(ports=['180', '122/udp'])
target2 = context.create_container(ports=['280', '222/udp'])
c = context.create_container(instanceLinks={
'target1_link': target1.id,
'target2_link': target2.id})
assert c.state == 'running'
assert len(c.instanceLinks()) == 2
assert len(target1.targetInstanceLinks()) == 1
assert len(target2.targetInstanceLinks()) == 1
links = c.instanceLinks()
names = set([x.linkName for x in links])
assert names == set(['target1_link', 'target2_link'])
for link in links:
link = super_client.reload(link)
assert link.state == 'active'
assert link.instanceId == c.id
ip_address = _find_agent_instance_ip(context.nsp,
super_client.reload(c))
if link.linkName == 'target1_link':
assert link.targetInstanceId == target1.id
assert len(link.data.fields.ports) == 2
for port in link.data.fields.ports:
assert port.ipAddress == ip_address
assert port.publicPort is not None
if port.privatePort == 180:
assert port.protocol == 'tcp'
elif port.privatePort == 122:
assert port.protocol == 'udp'
else:
assert False
if link.linkName == 'target2_link':
assert link.targetInstanceId == target2.id
assert len(link.data.fields.ports) == 2
for port in link.data.fields.ports:
assert port.ipAddress == ip_address
assert port.publicPort is not None
if port.privatePort == 280:
assert port.protocol == 'tcp'
elif port.privatePort == 222:
assert port.protocol == 'udp'
else:
assert False
def test_link_update(client, context):
target1 = context.create_container()
target2 = context.create_container()
c = context.create_container(instanceLinks={
'target1_link': target1.id,
})
link = c.instanceLinks()[0]
assert link.targetInstanceId == target1.id
link.targetInstanceId = target2.id
link = client.update(link, link)
assert link.state == 'updating-active'
link = client.wait_success(link)
assert link.targetInstanceId == target2.id
assert link.state == 'active'
def test_link_remove_restore(client, context):
target1 = context.create_container()
c = client.create_container(imageUuid=context.image_uuid,
startOnCreate=False,
instanceLinks={
'target1_link': target1.id})
c = client.wait_success(c)
links = c.instanceLinks()
assert len(links) == 1
link = links[0]
assert link.state == 'inactive'
c = client.wait_success(c.start())
link = client.reload(link)
assert c.state == 'running'
assert link.state == 'active'
c = client.wait_success(c.stop())
link = client.reload(link)
assert c.state == 'stopped'
assert link.state == 'inactive'
c = client.wait_success(client.delete(c))
link = client.reload(link)
assert c.state == 'removed'
assert link.state == 'inactive'
c = client.wait_success(c.restore())
link = client.reload(link)
assert c.state == 'stopped'
assert link.state == 'inactive'
c = client.wait_success(client.delete(c))
link = client.reload(link)
assert c.state == 'removed'
assert link.state == 'inactive'
c = client.wait_success(c.purge())
link = client.reload(link)
assert c.state == 'purged'
assert link.state == 'removed'
def test_null_links(context):
c = context.create_container(instanceLinks={
'null_link': None
})
links = c.instanceLinks()
assert len(links) == 1
assert links[0].state == 'active'
assert links[0].linkName == 'null_link'
assert links[0].targetInstanceId is None
def test_link_timeout(super_client, client, context):
t = client.create_container(imageUuid=context.image_uuid,
startOnCreate=False)
c = super_client.create_container(accountId=context.project.id,
imageUuid=context.image_uuid,
instanceLinks={'t': t.id},
data={'linkWaitTime': 100})
c = client.wait_transitioning(c)
assert c.state == 'running'
def test_link_remove_instance_restart(client, super_client, context):
target1 = context.create_container()
c = client.create_container(imageUuid=context.image_uuid,
startOnCreate=False,
instanceLinks={
'target1_link': target1.id})
c = client.wait_success(c)
links = c.instanceLinks()
assert len(links) | == 1
link = links[0]
assert link.state == 'inactive'
c = client.wait_success(c.start())
link = client.reload(link)
assert c.state == 'running'
| assert link.state == 'active'
c = client.wait_success(c.stop())
assert c.state == 'stopped'
link = client.reload(link)
link = super_client.wait_success(link.remove())
assert link.state == 'removed'
c = client.wait_success(c.start())
assert c.state == 'running'
|
bsc-dd/hecuba | hecuba_py/tests/withcassandra_twins/storagenumpy_twins_tests.py | Python | apache-2.0 | 6,773 | 0.00502 | import gc
import unittest
from hecuba import config, StorageNumpy
import uuid
import numpy as np
from storage.api import getByID
class StorageNumpyTwinsTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.old = config.execution_name
config.execution_name = "StorageNumpyTest".lower()
@classmethod
def tearDownClass(cls):
# config.session.execute("DROP KEYSPACE IF EXISTS {}".format(config.execution_name))
# config.session.execute("DROP KEYSPACE IF EXISTS {}_arrow".format(config.execution_name))
config.execution_name = cls.old
# Create a new keyspace per test
def setUp(self):
self.ksp = config.execution_name
def tearDown(self):
pass
table = 'numpy_twin_test'
def test_twin_volatile_from_numpy(self):
n = np.arange(3*4).reshape(3,4)
s = StorageNumpy(n)
self.assertTrue(s._twin_ref is not None)
self.assertEqual(s._twin_id, None)
self.assertEqual(s._twin_name, None)
self.assertEqual(s._twin_ref._name, None)
self.assertEqual(s._twin_ref.storage_id, None)
self.assertEqual(n.T.shape, s._twin_ref.shape)
self.assertTrue(np.allclose(s._twin_ref, n.T))
def test_twin_volatile_from_storagenumpy(self):
n = np.arange(3*4).reshape(3,4)
s = StorageNumpy(n)
s2 = StorageNumpy(s)
self.assertTrue(s2._twin_ref is not None)
self.assertEqual(s2._twin_id, None)
self.assertEqual(s2._twin_name, None)
self.assertEqual(s2._twin_ref._name, None)
self.assertEqual(s2._twin_ref.storage_id, None)
self.assertEqual(n.T.shape, s2._twin_ref.shape)
self.assertTrue(np.array_equal(s2._twin_ref, n.T))
def test_twin_persistent(self):
n = np.arange(3*4).reshape(3,4)
| s = StorageNumpy(n, 'persistent')
self.assertTrue(s._twin_id is not None)
self.assertEqual(s._twin_name, self.ksp+'_arrow.persistent_arrow')
self.assertTrue(np.array_equal(s._twin_ref, n.T))
self.assertEqual(s._twin_ref._name, self.ksp+'_arrow.persistent_arrow')
self.assertEqual(s._twin_ref.storage_id, s._twin_id)
self.a | ssertEqual(n.T.shape, s._twin_ref.shape)
self.assertEqual(s._build_args.twin_id, s._twin_id) #stored data in cassandra
res = config.session.execute(
"SELECT twin_id FROM hecuba.istorage WHERE storage_id = %s",
[s.storage_id] )
self.assertEqual(res.one().twin_id, s._twin_id)
def test_twin_persistent_manual(self):
n = np.arange(3*4).reshape(3,4)
s = StorageNumpy(n)
s.make_persistent('manual_pers')
self.assertTrue(s._twin_id is not None)
self.assertEqual(s._twin_name, self.ksp+'_arrow.manual_pers_arrow')
self.assertEqual(s._twin_ref._name, self.ksp+'_arrow.manual_pers_arrow')
self.assertEqual(s._twin_ref.storage_id, s._twin_id)
self.assertEqual(n.T.shape, s._twin_ref.shape)
self.assertEqual(s._build_args.twin_id, s._twin_id) #stored data in cassandra
res = config.session.execute(
"SELECT twin_id FROM hecuba.istorage WHERE storage_id = %s",
[s.storage_id] )
self.assertEqual(res.one().twin_id, s._twin_id)
self.assertTrue(np.allclose(s._twin_ref, n.T))
def test_twin_persistent_from_storagenumpy(self):
n = np.arange(3*4).reshape(3,4)
s = StorageNumpy(n, 'pers_from_sn')
s2 = StorageNumpy(s) # Create a volatile SN
self.assertTrue(s2._twin_ref is not None)
self.assertEqual(s2._twin_id, None)
self.assertEqual(s2._twin_name, None)
self.assertEqual(s2._twin_ref._name, None)
self.assertEqual(s2._twin_ref.storage_id, None)
self.assertEqual(n.T.shape, s2._twin_ref.shape)
self.assertTrue(np.allclose(s2._twin_ref, n.T))
# FIXME currently this case is not implemented
# def test_twin_persistent_from_storagenumpy2(self):
#
# n = np.arange(3*4).reshape(3,4)
#
# s = StorageNumpy(n, 'kk')
#
# s2 = StorageNumpy(s, 'ooops') #The name should be ignored
#
# self.assertTrue(s2._twin_id is not None)
# self.assertEqual(s2._twin_name, self.ksp+'.harrow_kk')
# self.assertEqual(s2._twin_ref._name, self.ksp+'.harrow_kk')
# self.assertEqual(s2._twin_ref.storage_id, s2._twin_id)
# self.assertEqual(s2._build_args.twin_id, s2._twin_id) #stored data in cassandra
# self.assertEqual(n.T.shape, s2._twin_ref.shape)
# self.assertTrue(np.array_equal(s2._twin_ref, n.T))
def test_load_persistent_twin_by_name(self):
n = np.arange(3*4).reshape(3,4)
s = StorageNumpy(n, 'load_by_name')
sid = s.storage_id
del s
s2 = StorageNumpy(None, 'load_by_name')
self.assertTrue(s2._twin_id is not None)
self.assertEqual(s2._twin_name, self.ksp+'_arrow.load_by_name_arrow')
self.assertEqual(s2._twin_ref._name, self.ksp+'_arrow.load_by_name_arrow')
self.assertEqual(s2._twin_ref.storage_id, s2._twin_id)
self.assertEqual(sid, s2.storage_id)
self.assertEqual(n.T.shape, s2._twin_ref.shape)
self.assertTrue(np.allclose(s2._twin_ref, n.T))
self.assertTrue(np.allclose(s2, n))
def test_load_persistent_twin_by_id(self):
n = np.arange(3*4).reshape(3,4)
s = StorageNumpy(n, 'load_by_id')
sid = s.storage_id
del s
s2 = StorageNumpy(None, None, sid)
self.assertTrue(s2._twin_ref is not None)
self.assertTrue(s2._twin_id is not None)
self.assertEqual(s2._twin_name, self.ksp+'_arrow.load_by_id_arrow')
self.assertEqual(s2._twin_ref._name, self.ksp+'_arrow.load_by_id_arrow')
self.assertEqual(s2._twin_ref.storage_id, s2._twin_id)
self.assertEqual(sid, s2.storage_id)
self.assertEqual(n.T.shape, s2._twin_ref.shape)
self.assertTrue(np.allclose(s2, n))
self.assertTrue(np.allclose(s2._twin_ref, n.T))
def test_load_persistent_twin_by_name_and_id(self):
n = np.arange(3*4).reshape(3,4)
s = StorageNumpy(n, 'by_name_and_id')
sid = s.storage_id
del s
s2 = StorageNumpy(None, 'by_name_and_id', sid)
#FIXME
#self.assertTrue(s2._twin_id is not None)
#self.assertEqual(s2._twin_name, self.ksp+'.harrow_kk')
#self.assertEqual(s2._twin_ref._name, self.ksp+'.harrow_kk')
#self.assertEqual(s2._twin_ref.storage_id, s2._twin_id)
#self.assertEqual(sid, s2.storage_id)
#self.assertTrue(np.array_equal(s2._twin_ref, n.T))
pass
if __name__ == '__main__':
unittest.main()
|
h4ck3rm1k3/orca-sonar | test/keystrokes/gtk-demo/role_dialog.py | Python | lgpl-2.1 | 1,507 | 0.002654 | #!/usr/bin/python
"""Test of dialog presentation."""
from macaroon.playback import *
import utils
sequence = MacroSequence()
sequence.append(KeyComboAction("<Control>f"))
sequence.append(TypeAction("Expander"))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Return"))
sequence.append(utils.AssertPresentationAction(
"Dialog automatic reading",
["KNOWN ISSUE: Depending on timing we present more than just the stuff below",
"BRAILLE LINE: 'gtk-demo application GtkExpander dialog'",
" VISIBLE: 'GtkExpander dialog', cursor=1",
"BRAILLE LINE: 'gtk-demo application GtkExpander dialog & y Details collapsed toggle button'",
" VISIBLE: '& y Details collapsed toggle but', cursor=1",
"SPEECH OUTPUT: 'GtkExpander Expander demo. Cli | ck on the triangle for details.'",
"SPEECH OUTPUT: 'Details toggle button collapsed'"]))
sequence.append(KeyComboAction("Tab"))
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_Enter"))
sequence.append(utils.AssertPresentationAction(
"Dialog Where Am I",
["BRAILLE LINE: 'gtk-demo application GtkExpander dialog & y Details collapsed toggle button'",
" VISIBLE: '& y Detai | ls collapsed toggle but', cursor=1",
"SPEECH OUTPUT: 'Details'",
"SPEECH OUTPUT: 'toggle button collapsed'"]))
sequence.append(KeyComboAction("<Alt>F4"))
sequence.append(utils.AssertionSummaryAction())
sequence.start()
|
project-callisto/callisto-core | callisto_core/wizard_builder/forms.py | Python | agpl-3.0 | 1,434 | 0.000697 | fro | m django import forms
from django.contrib.auth import get_user_model
from . import widgets
User = get_user_model()
class PageForm(forms.Form):
@classmethod
def setup(cls, page, data):
cls.base_fields = {
question.field_id: question.make_field() for question in page.mock_questions
}
self = cls(data)
| self.page = page
self.full_clean()
return self
@property
def sections(self):
from .models import Page
return dict(Page.SECTION_CHOICES)
@property
def serialized(self):
return [question.serialized for question in self.page.mock_questions]
def _clean_fields(self):
for name, field in self.fields.items():
self.cleaned_data[name] = field.widget.value_from_datadict(
self.data, self.files, self.add_prefix(name)
)
self._clean_conditional_fields()
def _clean_conditional_fields(self):
for question in self.page.mock_questions:
for choice in question.choices:
self._clean_if_choice_conditional(choice.data)
def _clean_if_choice_conditional(self, choice):
field = widgets.conditional_field_from_choice(choice)
if field:
name = widgets.conditional_id(choice)
self.cleaned_data[name] = field.widget.value_from_datadict(
self.data, self.files, name
)
|
k11a/snmpconverter | configinit.py | Python | gpl-3.0 | 24,267 | 0.015366 | # Copyright 2017 www.msv.space
#
# This file is part of SNMPTableMixer.
#
# SNMPTableMixer is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SNMPTableMixer is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SNMPTableMixer. If not, see <http://www.gnu.org/licenses/>.
#
# (Этот файл — часть SNMPTableMixer.
#
# SNMPTableMixer - свободная программа: вы можете перераспространять ее и/или
# изменять ее на условиях Стандартной общественной лицензии GNU в том виде,
# в каком она была опубликована Фондом свободного программного обеспечения;
# либо версии 3 лицензии, либо (по вашему выбору) любой более поздней
# версии.
#
# SNMPTableMixer распространяется в надежде, что она будет полезной,
# но БЕЗО ВСЯКИХ ГАРАНТИЙ; даже без неявной гарантии ТОВАРНОГО ВИДА
# или ПРИГОДНОСТИ ДЛЯ ОПРЕДЕЛЕННЫХ ЦЕЛЕЙ. Подробнее см. в Стандартной
# об | щественной лицензии GNU.
#
# Вы должны были получить копию Стандартной общественной лицензии GNU
# вместе с этой программой. Если это не так, см.
# <http://www.gnu.org/licenses/>.
import sqlite3
def normalization(data=''):
if '=' in data:
pos = data.find('=')
data = data[:pos].rstrip() + '=' + data[pos+1:].lstrip()
if data.start | swith('#'):
data = ''
if ' #' in data:
data = data[:data.find(' #')] + '\n'
data = data.rstrip() + '\n'
return data
def calc_pos_param(data):
return len(data) - len(data.lstrip())
def check_param_value(param_value, checker_value):
result_ok = False
errdesc = ''
if checker_value is None:
result_ok = True
elif checker_value == 'tcp_udp_port':
try:
if 0 <= int(param_value) <= 65535:
result_ok = True
else:
errdesc = 'Not correct tcp/udp port. Available from 0 to 65535. Read: ' + str(int(param_value))
except:
errdesc = 'Not correct tcp/udp port. Available from 0 to 65535. Read: ' + param_value
elif checker_value == 'secs':
try:
if 0 <= int(param_value) <= 3153600000: # <= 100 years
result_ok = True
else:
errdesc = 'Not correct Secs. Available from 0 to 3153600000. Read: ' + str(int(param_value))
except:
errdesc = 'Not correct Secs. Available from 0 to 3153600000. Read: ' + param_value
elif checker_value == 'ip':
ipaddress = param_value.split('.')
if (set(param_value).issubset(set('0123456789.'))) and (len(ipaddress) == 4):
result_ok = True
for i in ipaddress:
try:
if int(i) < 0 or int(i) > 255:
result_ok = False
errdesc = 'Not correct byte of IP address: "' + i + '"'
break
except:
result_ok = False
errdesc = 'Not correct byte of IP address: "' + i + '"'
break
elif checker_value == 'name':
if len(param_value) <= 128 and set(param_value).issubset('0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~'):
result_ok = True
else:
errdesc = 'Not correct Name. Available len 128. Allowed characters: 0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~'
elif checker_value == 'yesno':
if param_value.lower() == 'yes' or param_value.lower() == 'no':
result_ok = True
else:
errdesc = 'Not correct value. Available "yes" and "no"'
elif checker_value == 'oid':
oid = param_value.split('.')
if len(oid) <= 128 and set(param_value).issubset(set('0123456789.')): # RFC 4181 (4.6.6.)
result_ok = True
for i in oid:
try:
if int(i) < 0 or int(i) > 4294967295: # RFC 2578 (3.5.)
result_ok = False
errdesc = 'Not correct OID. RFC 2578 paragraph 3.5. Each sub-identifier has a maximum value of 2^32-1 (4294967295 decimal). Read: "' + i + '"'
break
except:
result_ok = False
errdesc = 'Not correct OID. Empty sub-identifier or other error.'
break
else:
errdesc = 'Not correct OID value. All OIDs are limited to 128 sub-identifiers. RFC 4181 paragraph 4.6.6. Allowed characters: "0123456789."'
elif checker_value == 'table_oid':
receive_check = check_param_value(param_value, 'oid')
result_ok = receive_check['result_ok']
if not result_ok:
errdesc = 'Not correct table_oid. ' + receive_check['errdesc']
else:
if not param_value.startswith('1.3.6.1.4.1.0.'):
errdesc = 'Not correct table_oid. table_oid need started "1.3.6.1.4.1.0." (OID for private SNMP MIBs).'
result_ok = False
elif checker_value == 'string':
if len(param_value) <= 4096:
result_ok = True
else:
errdesc = 'Not correct value. Available len 4096 symbols. Read len: ' + str(len(param_value)) + 'symbols'
return {'result_ok' : result_ok, 'errdesc' : errdesc}
def check_param_return_pos_param_stack (pos_param_stack, param_description, param_name, param_value, pos_param):
result_ok = False
errdesc = 'Not correct name of parametrs'
if pos_param == 0:
if param_name in param_description.keys():
pos_param_stack.clear()
pos_param_stack = {0 : param_name}
result_ok = True
elif pos_param in pos_param_stack:
for i in sorted(pos_param_stack.keys()):
if i >= pos_param:
break
param_description = param_description[pos_param_stack[i]]['childrens']
if param_name in param_description.keys():
for i in list(pos_param_stack.keys()):
if i > pos_param:
pos_param_stack.pop(i)
pos_param_stack[pos_param] = param_name
result_ok = True
elif pos_param > max(pos_param_stack):
for i in sorted(pos_param_stack.keys()):
param_description = param_description[pos_param_stack[i]]['childrens']
if param_name in param_description.keys():
pos_param_stack[pos_param] = param_name
result_ok = True
if result_ok:
if (param_description[param_name]['need_value'] == 0) and (param_value is not None):
result_ok = False
elif (param_description[param_name]['need_value'] == 1) and (param_value is None):
result_ok = False
elif (param_description[param_name]['need_value'] == 1) and (param_value is not None):
receive_check = check_param_value(param_value, param_description[param_name]['checker_value'])
errdesc = receive_check['errdesc']
result_ok = receive_check['result_ok']
return {'param_ok' : result_ok, 'pos_param_stack' : pos_param_stack, 'errdesc' : errdesc}
def set_param (pos_param_stack, params, param_name, param_value):
if len(pos_param_stack)==1:
params.append({})
elif len(pos_param_stack)==2:
params[-1].update({param_name : param_value})
else:
raise ValueError ('Error config.')
return params
def name_of_table_correct (link, table, tables_cfg):
for ctable in tables_cfg:
if (not ctable is table) and (link == ctable.table_name):
return True
return False
def name_of_column_correct(link, table, tables_cfg):
for ctable in tables_cfg:
if not ctable is table:
for ccolumn in ctable.columns:
if ccolumn['column_name |
natetrue/ReplicatorG | skein_engines/skeinforge-0006/skeinforge_tools/export_plugins/binary_16_byte.py | Python | gpl-2.0 | 10,998 | 0.037189 | """
Binary 16 byte is an export plugin to convert gcode into 16 byte binary segments.
An export plugin is a script in the export_plugins folder which has the functions getOuput, isArchivable and writeOutput. It is
meant to be run from the export tool. To ensure that the plugin works on platforms which do not handle file capitalization
properly, give the plugin a lower case name.
The getOutput function of this script takes a gcode text and returns that text converted into 16 byte segments. The writeOutput
function of this script takes a gcode text and writes that in a binary format converted into 16 byte segments.
Many of the functions in this script are copied from gcodec in skeinforge_utilities. They are copied rather than imported so
developers making new plugins do not have to learn about gcodec, the code here is all they need to learn.
This plugin is just a starter to make a real binary converter.
//Record structure
BinArray(0) = AscW(Inst_Code_Letter)
BinArray(1) = cInst_Code
X Data
sInt32_to_Hbytes(iXdim_1)
BinArray(2) = lsb 'short lsb
BinArray(3) = msb 'short msb
Y Data
sInt32_to_Hbytes(iYdim_2)
BinArray(4) = lsb 'short lsb
BinArray(5) = msb 'short msb
Z Data
sInt32_to_Hbytes(iZdim_3)
BinArray(6) = lsb 'short lsb
BinArray(7) = msb 'short msb
I Data
sInt32_to_Hbytes(iIdim_4)
BinArray(8) = lsb 'short lsb
BinArray(9) = msb 'short msb
J Data
sInt32_to_Hbytes(iJdim_5)
BinArray(10) = lsb 'short lsb
BinArray(11) = msb 'short msb
BinArray(12) = FP_Char
sInt32_to_Hbytes(iFP_Num)
BinArray(13) = lsb 'short lsb
BinArray(14) = bActiveFlags
BinArray(15) = AscW("#")End of record filler
Byte 14 is worth a few extra notes, this byte is used to define which of the axes are active, its used to get round the problem of say a
line of code with no mention of z. This would be put into the file as z = 0 as the space for this data is reserved, if we did nothing, this
would instruct the machine to go to z = 0. If we use the active flag to define the z axis as inactive the z = 0 is ignored and the value
set to the last saved value of z, i.e it does not move. If the z data is actually set to z = 0 then the axis would be set to active and
the move takes place.
"""
from __future__ import absolute_import
import __init__
from skeinforge_tools.skeinforge_utilities import gcodec
from skeinforge_tools.skeinforge_utilities import preferences
from skeinforge_tools.skeinforge_utilities import interpret
from skeinforge_tools import polyfile
from struct import Struct
import cStringIO
import os
import sys
__author__ = "Enrique Perez (perez_enrique@yahoo.com)"
__date__ = "$Date: 2008/21/04 $"
__license__ = "GPL 3.0"
def getIntegerFromCharacterLengthLineOffset( character, offset, splitLine, stepLength ):
"Get the integer after the first occurence of the character in the split line."
lineFromCharacter = getStringFromCharacterSplitLine( character, splitLine )
if lineFromCharacter == None:
return | 0
floatValue = ( float( lineFromCharacter ) + offset ) / stepLength
return int( round( floatValue ) )
def getIntegerFlagFromCharacterSplitLine( character, splitLine ):
"Get the integer flag after the first occurence of the character in the split line."
lineFromCharacter = getStri | ngFromCharacterSplitLine( character, splitLine )
if lineFromCharacter == None:
return 0
return 1
def getOutput( gcodeText, binary16BytePreferences = None ):
"""Get the exported version of a gcode file. This function, isArchivable and writeOutput are the only necessary functions in a skeinforge export plugin.
If this plugin writes an output than should not be printed, an empty string should be returned."""
if gcodeText == '':
return ''
if binary16BytePreferences == None:
binary16BytePreferences = Binary16BytePreferences()
preferences.readPreferences( binary16BytePreferences )
skein = Binary16ByteSkein()
skein.parseGcode( gcodeText, binary16BytePreferences )
return skein.output.getvalue()
def getStringFromCharacterSplitLine( character, splitLine ):
"Get the string after the first occurence of the character in the split line."
indexOfCharacter = indexOfStartingWithSecond( character, splitLine )
if indexOfCharacter < 0:
return None
return splitLine[ indexOfCharacter ][ 1 : ]
def getSummarizedFilename( fileName ):
"Get the fileName basename if the file is in the current working directory, otherwise return the original full name."
if os.getcwd() == os.path.dirname( fileName ):
return os.path.basename( fileName )
return fileName
def getTextLines( text ):
"Get the all the lines of text of a text."
return text.replace( '\r', '\n' ).split( '\n' )
def indexOfStartingWithSecond( letter, splitLine ):
"Get index of the first occurence of the given letter in the split line, starting with the second word. Return - 1 if letter is not found"
for wordIndex in xrange( 1, len( splitLine ) ):
word = splitLine[ wordIndex ]
firstLetter = word[ 0 ]
if firstLetter == letter:
return wordIndex
return - 1
def isArchivable():
"Return whether or not this plugin is archivable."
return True
def isReplacable():
"Return whether or not the output from this plugin is replacable. This should be true if the output is text and false if it is binary."
return False
def writeFileText( fileName, fileText ):
"Write a text to a file."
try:
file = open( fileName, 'wb' )
file.write( fileText )
file.close()
except IOError:
print( 'The file ' + fileName + ' can not be written to.' )
def writeOutput( fileName = '', gcodeText = '' ):
"Write the exported version of a gcode file. This function, getOutput and isArchivable are the only necessary functions in a skeinforge export plugin."
if fileName == '':
unmodified = interpret.getGNUTranslatorFilesUnmodified()
if len( unmodified ) == 0:
print( "There are no unmodified gcode files in this folder." )
return
fileName = unmodified[ 0 ]
binary16BytePreferences = Binary16BytePreferences()
preferences.readPreferences( binary16BytePreferences )
gcodeText = gcodec.getGcodeFileText( fileName, gcodeText )
skeinOutput = getOutput( gcodeText, binary16BytePreferences )
suffixFilename = fileName[ : fileName.rfind( '.' ) ] + '_export.' + binary16BytePreferences.fileExtension.value
writeFileText( suffixFilename, skeinOutput )
print( 'The converted file is saved as ' + getSummarizedFilename( suffixFilename ) )
class Binary16BytePreferences:
"A class to handle the export preferences."
def __init__( self ):
"Set the default preferences, execute title & preferences fileName."
#Set the default preferences.
self.archive = []
self.fileExtension = preferences.StringPreference().getFromValue( 'File Extension:', 'bin' )
self.archive.append( self.fileExtension )
self.fileNameInput = preferences.Filename().getFromFilename( [ ( 'Gcode text files', '*.gcode' ) ], 'Open File to be Converted to Binary 16 Byte', '' )
self.archive.append( self.fileNameInput )
self.feedrateStepLength = preferences.FloatPreference().getFromValue( 'Feedrate Step Length (millimeters/second)', 0.1 )
self.archive.append( self.feedrateStepLength )
self.xStepLength = preferences.FloatPreference().getFromValue( 'X Step Length (millimeters)', 0.1 )
self.archive.append( self.xStepLength )
self.yStepLength = preferences.FloatPreference().getFromValue( 'Y Step Length (millimeters)', 0.1 )
self.archive.append( self.yStepLength )
self.zStepLength = preferences.FloatPreference().getFromValue( 'Z Step Length (millimeters)', 0.01 )
self.archive.append( self.zStepLength )
self.xOffset = preferences.FloatPreference().getFromValue( 'X Offset (millimeters)', 0.0 )
self.archive.append( self.xOffset )
self.yOffset = preferences.FloatPreference().getFromValue( 'Y Offset (millimeters)', 0.0 )
self.archive.append( self.yOffset )
self.zOffset = preferences.FloatPreference().getFromValue( 'Z Offset (millimeters)', 0.0 )
self.archive.append( self.zOffset )
#Create the archive, title of the execute button, title of the dialog & preferences fileName.
self.executeTitle = 'Convert to Binary 16 Byte'
self.saveTitle = 'Save Preferences'
preferences.setHelpPreferencesFileNameTitleWindowPosition( self, 'skeinforge |
masaori335/hyper | hyper/http20/connection.py | Python | mit | 27,500 | 0.000255 | # -*- coding: utf-8 -*-
"""
hyper/http20/connection
~~~~~~~~~~~~~~~~~~~~~~~
Objects that build hyper's connection-level HTTP/2 abstraction.
"""
from ..tls import wrap_socket, H2_NPN_PROTOCOLS, H2C_PROTOCOL
from ..common.exceptions import ConnectionResetError
from ..common.bufsocket import BufferedSocket
from ..common.headers import HTTPHeaderMap
from ..packages.hyperframe.frame import (
FRAMES, DataFrame, HeadersFrame, PushPromiseFrame, RstStreamFrame,
SettingsFrame, Frame, WindowUpdateFrame, GoAwayFrame, PingFrame,
BlockedFrame, FRAME_MAX_LEN, FRAME_MAX_ALLOWED_LEN
)
from ..packages.hpack.hpack_compat import Encoder, Decoder
from .stream import Stream
from .response import HTTP20Response, HTTP20Push
from .window import FlowControlManager
from .exceptions import ConnectionError, ProtocolError
from . import errors
import errno
import logging
import socket
log = logging.getLogger(__name__)
class HTTP20Connection(object):
"""
An object representing a single HTTP/2 connection to a server.
This object behaves similarly to the Python standard library's
``HTTPConnection`` object, with a few critical differences.
Most of the standard library's arguments to the constructor are irrelevant
for HTTP/2 or not supported by hyper.
:param host: The host to connect to. This may be an IP address or a
hostname, and optionally may include a port: for example,
``'http2bin.org'``, ``'http2bin.org:443'`` or ``'127.0.0.1'``.
:param port: (optional) The port to connect to. If not provided and one also
isn't provided in the ``host`` parameter, defaults to 443.
:param secure: (optional) Whether the request should use TLS. Defaults to
``False`` for most requests, but to ``True`` for any request issued to
port 443.
:param window_manager: (optional) The class to use to manage flow control
windows. This needs to be a subclass of the
:class:`BaseFlowControlManager <hyper.http20.window.BaseFlowControlManager>`.
If not provided,
:class:`FlowControlManager <hyper.http20.window.FlowControlManager>`
will be used.
:param enable_push: (optional) Whether the server is allowed to push
resources to the client (see
:meth:`get_pushes() <hyper.HTTP20Connection.get_pushes>`).
:param ssl_context: (optional) A class with custom certificate settings.
If not provided then hyper's default ``SSLContext`` is used instead.
"""
def __init__(self, host, port=None, secure=None, window_manager=None, enable_push=False,
ssl_context=None, **kwargs):
"""
Creates an HTTP/2 connection to a specific server.
"""
if port is None:
try:
self.host, self.port = host.split(':')
self.port = int(self.port)
except ValueError:
self.host, self.port = host, 443
else:
self.host, self.port = host, port
if secure is not None:
self.secure = secure
elif self.port == 443:
self.secure = True
else:
self.secure = False
self._enable_push = enable_push
self.ssl_context = ssl_context
#: The size of the in-memory buffer used to store data from the
#: network. This is used as a performance optimisation. Increase buffer
#: size to improve performance: decrease it to conserve memory.
#: Defaults to 64kB.
self.network_buffer_size = 65536
# Create the mutable state.
self.__wm_class = window_manager or FlowControlManager
self.__init_state()
return
def __init_state(self):
"""
Initializes the 'mutable state' portions of the HTTP/2 connection
object.
This method exists to enable HTTP20Connection objects to be reused if
they're closed, by resetting the connection object to its basic state
whenever it ends up closed. Any situation that needs to recreate the
connection can call this method and it will be done.
This is one of the only methods in hyper that is truly private, as
users should be strongly discouraged from messing about with connection
objects themselves.
"""
# Streams are stored in a dictionary keyed off their stream IDs. We
| # also save the most recent one for easy access without having to walk
# the dictionary.
self.streams = {}
| self.recent_stream = None
self.next_stream_id = 1
# Header encoding/decoding is at the connection scope, so we embed a
# header encoder and a decoder. These get passed to child stream
# objects.
self.encoder = Encoder()
self.decoder = Decoder()
# Values for the settings used on an HTTP/2 connection.
self._settings = {
SettingsFrame.INITIAL_WINDOW_SIZE: 65535,
SettingsFrame.SETTINGS_MAX_FRAME_SIZE: FRAME_MAX_LEN,
}
# The socket used to send data.
self._sock = None
# The inbound and outbound flow control windows.
self._out_flow_control_window = 65535
# Instantiate a window manager.
self.window_manager = self.__wm_class(65535)
return
def request(self, method, url, body=None, headers={}):
"""
This will send a request to the server using the HTTP request method
``method`` and the selector ``url``. If the ``body`` argument is
present, it should be string or bytes object of data to send after the
headers are finished. Strings are encoded as UTF-8. To use other
encodings, pass a bytes object. The Content-Length header is set to the
length of the body field.
:param method: The request method, e.g. ``'GET'``.
:param url: The URL to contact, e.g. ``'/path/segment'``.
:param body: (optional) The request body to send. Must be a bytestring
or a file-like object.
:param headers: (optional) The headers to send on the request.
:returns: A stream ID for the request.
"""
stream_id = self.putrequest(method, url)
for name, value in headers.items():
self.putheader(name, value, stream_id)
# Convert the body to bytes if needed.
if isinstance(body, str):
body = body.encode('utf-8')
self.endheaders(message_body=body, final=True, stream_id=stream_id)
return stream_id
def _get_stream(self, stream_id):
return (self.streams[stream_id] if stream_id is not None
else self.recent_stream)
def get_response(self, stream_id=None):
"""
Should be called after a request is sent to get a response from the
server. If sending multiple parallel requests, pass the stream ID of
the request whose response you want. Returns a
:class:`HTTP20Response <hyper.HTTP20Response>` instance.
If you pass no ``stream_id``, you will receive the oldest
:class:`HTTPResponse <hyper.HTTP20Response>` still outstanding.
:param stream_id: (optional) The stream ID of the request for which to
get a response.
:returns: A :class:`HTTP20Response <hyper.HTTP20Response>` object.
"""
stream = self._get_stream(stream_id)
return HTTP20Response(stream.getheaders(), stream)
def get_pushes(self, stream_id=None, capture_all=False):
"""
Returns a generator that yields push promises from the server. **Note
that this method is not idempotent**: promises returned in one call
will not be returned in subsequent calls. Iterating through generators
returned by multiple calls to this method simultaneously results in
undefined behavior.
:param stream_id: (optional) The stream ID of the request for which to
get push promises.
:param capture_all: (optional) If ``False``, the generator will yield
all buffered push promises without blocking. If ``True``, the
generator will first yield all buffe |
ThreatConnect-Inc/tcex | tests/api/tc/v2/threat_intelligence/test_intrusion_set_interface.py | Python | apache-2.0 | 2,914 | 0 | """Test the TcEx Threat Intel Module."""
# standard library
import os
from .ti_helpers import TestThreatIntelligence, TIHelper
class TestIntrustionSetGroups(TestThreatIntelligence):
"""Test TcEx Instrusion Set Groups."""
group_type = 'Intrusion Set'
owner = os.getenv('TC_OWNER')
ti = None
ti_helper = None
tcex = None
def setup_method(self):
"""Configure setup before all tests."""
self.ti_helper = TIHelper(self.group_type)
self.ti = self.ti_helper.ti
self.tcex = self.ti_helper.tcex
def tests_ti_intrusion_set_create(self):
"""Create a group using specific interface."""
group_data = {
'name': self.ti_helper.rand_name(),
'owner': self.owner,
}
ti = self.ti.intrusion_set(**group_data)
r = ti.create()
# assert response
assert r.status_code == 201
# retrieve group for asserts
group_data['unique_id'] = ti.unique_id
ti = self.ti.intrusion_set(**group_data)
r = ti.single()
response_data = r.json()
ti_data = response_data.get('data', {}).get(ti.api_entity)
# validate response data
assert r.status_code == 200
assert response_data.get('status') == 'Success'
# validate ti data
assert ti_data.get(ti.api_entity) == group_data.get(ti.api_entity)
# cleanup group
r = ti.delete()
assert r.status_code == 200
def tests_ti_intrusion_set_add_attribute(self, request):
"""Test group add attribute."""
super().group_add_attribute(request)
def tests_ti_intrusion_set_add_label(self):
"""Test group add label."""
super().group_add_label()
def tests_ti_intrusion_set_add_tag(self, request):
"""Test group add tag."""
super().group_add_tag(request)
def tests_ti_intrusion_set_delete(self):
"""Test group delete."""
super().group_delete()
def tests_ti_intrusion_set_get(self):
"""Test group get with generic group method."""
super().group_get()
def tests_ti_intrusion_set_get_filter(self):
"""Test group get with filter | ."""
super().group_get_filter()
def tests_ti_intrusion_set_get_includes(self, request):
"""Test group get with includes."""
super().group_get_includes(request)
def tests_ti_intrusion_set_get_attribute(self, request):
"""Test group get attribute."""
super().group_get_attribute(request)
def tests_ti_intrusion_set_get | _label(self):
"""Test group get label."""
super().group_get_label()
def tests_ti_intrusion_set_get_tag(self, request):
"""Test group get tag."""
super().group_get_tag(request)
def tests_ti_intrusion_set_update(self, request):
"""Test updating group metadata."""
super().group_update(request)
|
weso/CWR-WebClient | cwr_webclient/report/cwr.py | Python | mit | 2,566 | 0 | # -*- encoding: utf-8 -*-
import StringIO
import xlsxwriter
"""
Web app module.
"""
__author__ = 'Bernardo Martínez Garrido'
__license__ = 'MIT'
__status__ = 'Development'
def generate_cwr_report_excel(cwr):
output = StringIO.StringIO()
workbook = xlsxwriter.Workbook(output, {'in_memory': True})
_generate_cwr_report_excel_general(workbook, cwr)
for group in cwr.transmission.groups:
_generate_cwr_report_excel_group(workbook, group)
workbook.close()
output.seek(0)
return output.read()
def _generate_cwr_report_excel_group(workbook, group):
results_sheet = workbook.add_worksheet(group.group_header.transaction_type)
bold = workbook.add_format({'bold': 1})
row = 1
col = 0
for transaction in group.transactions:
for record in transaction:
results_sheet.write(row, col + 1, record.record_type)
row += 1
def _generate_cwr_report_excel_general(workbook, cwr):
results_sheet = workbook.add_worksheet('General info')
bold = workbook.add_format({'bold': 1})
header = cwr.transmission.header
trailer = cwr.transmission.trailer
row = 1
col = 0
results_sheet.write(row, col, 'Sender ID', bold)
results_sheet.write(row, col + 1, header.sender_id)
row += 1
results_sheet.write(row, col, 'Sender Name', bold)
results_sheet.write(row, col + 1, header.sender_name)
row += 1
results_sheet.write(row, col, 'Sender Type', bold)
results_sheet.write(row, col + 1, header.sender_name)
row += 1
row += 1
results_sheet.write(row, col, 'Creation Date', bold)
results_shee | t.write(row, col + 1, header.creation_date_time)
row += 1
results_sheet.write(row, col, 'Transmission Date', bold)
results_sheet.write(row, col + 1, header.transmission_date)
row += 1
row += 1
results_sheet.write(row, col, 'EDI Standard', bold)
results_sheet.write(row, col + 1, header.edi_standard)
row += 1
results_sheet.write(row, col, 'Character Set', bold)
results_sheet.wr | ite(row, col + 1, header.character_set)
row += 1
row += 1
results_sheet.write(row, col, 'Counts', bold)
row += 1
results_sheet.write(row, col, 'Groups', bold)
results_sheet.write(row, col + 1, trailer.group_count)
row += 1
results_sheet.write(row, col, 'Transactions', bold)
results_sheet.write(row, col + 1, trailer.transaction_count)
row += 1
results_sheet.write(row, col, 'Records', bold)
results_sheet.write(row, col + 1, trailer.record_count)
|
rootcanal/whereikeepinfo | whereikeepinfo/forms.py | Python | gpl-2.0 | 2,003 | 0.003495 | import formencode
import os
class FileValidator(formencode.FancyValidator):
__unpackargs__ = ('upload_field', 'max_upload_size')
def _to_python(self, field_storage, state):
if field_storage is None:
return field_storage
fileobj = field_storage.file
fileobj.seek(0, os.SEEK_END)
size = int(fileobj.tell())
if size > int(self.max_upl | oad_size):
raise formencode.Invalid(
_('File too big'),
field_storage, state,
error_dict={self.upload_field:
| formencode.Invalid(_('File too big'), field_storage, state)})
fileobj.seek(0)
return dict(filename=field_storage.filename, file=fileobj, size=size)
class RegistrationSchema(formencode.Schema):
allow_extra_fields = True
username = formencode.validators.PlainText(not_empty=True)
password = formencode.validators.PlainText(not_empty=True)
email = formencode.validators.Email(resolve_domain=False)
name = formencode.validators.String(not_empty=True)
password = formencode.validators.String(not_empty=True)
confirm_password = formencode.validators.String(not_empty=True)
chained_validators = [
formencode.validators.FieldsMatch('password', 'confirm_password')
]
class LoginSchema(formencode.Schema):
allow_extra_fields = True
username = formencode.validators.PlainText(not_empty=True)
password = formencode.validators.String(not_empty=True, min=8)
class UploadFileSchema(formencode.Schema):
allow_extra_fields = True
uploaded_file = FileValidator(upload_field='uploaded_file', max_upload_size=10485760)
class PassphraseSchema(formencode.Schema):
allow_extra_fields = True
passphrase = formencode.validators.String(not_empty=True)
class GenKeySchema(formencode.Schema):
allow_extra_fields = True
keyname = formencode.validators.PlainText(not_empty=True)
passphrase = formencode.validators.String(not_empty=True, min=8)
|
krishauser/Klampt | Python/python2_version/klampt/vis/glcommon.py | Python | bsd-3-clause | 15,541 | 0.014156 | """Defines GLWidgetPlugin, GLMultiViewportProgram, and CachedGLObject, which
are used by the core visualization module. They may be useful for writing
your own GLPluginInterface classes, too.
"""
from glinterface import GLPluginInterface
from glprogram import GLProgram,GLPluginProgram
import math
from OpenGL.GL import *
import weakref
class GLWidgetPlugin(GLPluginInterface):
"""A GL plugin that sends user events to one or more Klamp't widgets.
To use, add this to a GLPluginProgram and call addWidget to add widgets"""
def __init__(self):
from ..robotsim import WidgetSet
GLPluginInterface.__init__(self)
self.klamptwidgetbutton = 2
self.klamptwidgetmaster = WidgetSet()
self.klamptwidgetdragging = False
def addWidget(self,widget):
self.klamptwidgetmaster.add(widget)
def widgetchangefunc(self,event):
"""Called whenever a widget is clicked or dragged.
event can be 'mousedown', 'mousedrag', 'mouseup'.
Subclasses can use this to respond to widget click events"""
pass
def widgethoverfunc(self):
"""Called whenever a widget changes appearance due to hover.
Subclasses can use this to respond to widget click events"""
pass
def display(self):
self.klamptwidgetmaster.drawGL(self.viewport())
return False
def keyboardfunc(self,c,x,y):
if len(c)==1:
#if c is a bytes object, need to extract out the first byte
self.klamptwidgetmaster.keypress(c[0])
return False
def keyboardupfunc(self,c,x,y):
return False
def mousefunc(self,button,state,x,y):
if button == self.klamptwidgetbutton:
if state == 0: #down
if self.klamptwidgetmaster.beginDrag(x,self.view.h-y,self.viewport()):
self.widgetchangefunc("mousedown")
self.klamptwidgetdragging = True
else:
if self.klamptwidgetdragging:
self.widgetchangefunc("mouseup")
self.klamptwidgetmaster.endDrag()
self.klamptwidgetdragging = False
if self.klamptwidgetmaster.wantsRedraw( | ):
self.refresh()
return True
return False
def motionfunc(self,x,y,dx,dy):
if self.klamptwidgetdragging:
self.klamptwidgetmaster.drag(dx,-dy,self.viewport())
self.widgetchangefunc("mousedrag")
if self.klamptwidgetmaster.wantsRedraw():
self.refresh()
return True
else:
self.klamptwidget | master.hover(x,self.view.h-y,self.viewport())
if self.klamptwidgetmaster.wantsRedraw():
self.widgethoverfunc()
self.refresh()
return False
def idlefunc(self):
self.klamptwidgetmaster.idle()
return True
class GLMultiViewportProgram(GLProgram):
def __init__(self):
GLProgram.__init__(self)
self.views = []
self.activeView = None
self.dragging = False
self.sizePolicy = 'fit'
self.broadcast = False
self.defaultSizes = []
#self.height = self.view.h
def initialize(self):
if not GLProgram.initialize(self): return False
for v in self.views:
v.window = self.window
if not v.initialize():
return False
return True
def addView(self,view):
if isinstance(view,GLPluginInterface):
plugin = view
pview = GLPluginProgram()
pview.window = self.window
pview.setPlugin(view)
view = pview
assert isinstance(view,GLProgram)
self.views.append(view)
#spoofs reshape, motion functions
view.window = weakref.proxy(self)
self.defaultSizes.append((view.view.w,view.view.h))
self.fit()
#print "Added a view, total",len(self.views),"size now",self.view.w,self.view.h
return view
def removeView(self,view):
view.window = None
for i,p in enumerate(self.views):
if p is view:
self.views.pop(i)
self.defaultSizes.pop(i)
self.fit()
self.activeView = None
return
def clearViews(self):
for p in self.views:
p.window = None
self.views = []
self.defaultSizes = []
self.activeView = None
def updateActive(self,x,y):
if not self.view.contains(x,y):
return
self.activeView = None
for i,p in enumerate(self.views):
if p.view.contains(x,y):
#print "Selecting view",x,y,":",i
self.activeView = i
return
return
def fit(self):
if len(self.views) == 0: return
rowlen = int(math.ceil(math.sqrt(len(self.views))))
assert rowlen > 0
rowheights = [0]*int(math.ceil(float(len(self.views))/rowlen))
colwidths = [0]*rowlen
for i,p in enumerate(self.views):
col = i % rowlen
row = int(i / rowlen)
rowheights[row] = max(self.defaultSizes[i][1],rowheights[row])
colwidths[col] = max(self.defaultSizes[i][0],colwidths[col])
cumrowheights = [0]
cumcolwidths = [0]
for h in rowheights:
cumrowheights.append(cumrowheights[-1]+h)
for w in colwidths:
cumcolwidths.append(cumcolwidths[-1]+w)
if self.sizePolicy == 'fit':
self.view.w = sum(colwidths)
self.view.h = sum(rowheights)
for i,p in enumerate(self.views):
col = i % rowlen
row = int(i / rowlen)
p.view.x,p.view.y = (cumcolwidths[col],cumrowheights[row])
self.width = self.view.w
self.height = self.view.h
if self.window != None:
self.window.reshape(self.view.w,self.view.h)
else:
#squeeze
self.width = self.view.w
self.height = self.view.h
for i,p in enumerate(self.views):
col = i % rowlen
row = int(i / rowlen)
p.view.x = float(self.view.w)*float(cumcolwidths[col])/float(cumcolwidths[-1])
p.view.y = float(self.view.h)*float(cumrowheights[row])/float(cumrowheights[-1])
p.view.w = float(self.view.w)*float(colwidths[col]) / float(cumcolwidths[-1])
p.view.h = float(self.view.h)*float(rowheights[row]) / float(cumrowheights[-1])
p.view.x = self.view.x+int(p.view.x)
p.view.y = self.view.y+int(p.view.y)
p.view.w = int(p.view.w)
p.view.h = int(p.view.h)
#print "View",i,"shape",(p.view.x,p.view.y,p.view.w,p.view.h)
p.reshapefunc(p.view.w,p.view.h)
if self.window != None:
self.refresh()
def reshapefunc(self,w,h):
if (w,h) != (self.view.w,self.view.h):
self.view.w,self.view.h = w,h
self.height = self.view.h
self.sizePolicy = 'squash'
self.fit()
return True
def displayfunc(self):
anyTrue = False
glClearColor(0,0,0,0)
glScissor(0,0,self.view.w,self.view.h)
glEnable(GL_SCISSOR_TEST);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
for p in self.views:
try:
if p.displayfunc():
anyTrue = True
except Exception:
print "Error running displayfunc() for plugin",p.__class__.__name__
raise
return anyTrue
def display(self):
anyTrue = False
for p in self.views:
try:
if p.display():
anyTrue = True
except Exception:
print "Error running display() for plugin",p.__class__.__name__
raise
return anyTrue
def display_screen(self):
anyTrue = False
for p in self.views:
try:
if p.displa |
practice-vishnoi/dev-spark-1 | examples/src/main/python/ml/simple_text_classification_pipeline.py | Python | apache-2.0 | 2,809 | 0.001068 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
from pyspark import SparkContext
from pyspark.ml import Pipeline
from pyspark.ml.classification import LogisticRegression
from pyspark.ml.feature import HashingTF, Tokenizer
from pyspark.sql import Row, SQLContext
"""
A simple text classification pipeline that recognizes "spark" from
input text. This is to show how to create and configure a Spark ML
pipeline in Python. Run with:
bin/spark-submit examples/src/main/python/ml/simple_text_classification_pipeline.py
"""
if __name__ == "__main__":
sc = SparkContext(appName="SimpleTextClassificationPipeline")
sqlContext = SQLContext(sc)
# Prepare training documents, which are labeled.
LabeledDocument = Row("id", "text", "label")
training = sc.parallelize([(0, "a b c d e spark", 1.0),
(1, "b d", 0.0),
(2, "spark f g h", 1.0),
(3, "hadoop mapreduce", 0.0)]) \
.map(lambda x: LabeledDocument(*x)).toDF()
# Configure an ML pipeline, which consists of tree stages: tokenizer, hashingTF, and lr.
tokenizer = Tokenizer(inputCol="text", outputCol="words")
hashingTF = HashingTF(inputCol=tokenizer.getOutputCol(), outputCol="features")
lr = LogisticRegression(maxIter=10, regParam=0.001)
pipeline = Pipeline(stages=[tokenizer, hashingTF, lr])
# Fit the pipeline to training documents.
model = pipeline.fit(training)
# Prepare test documents, which are unla | beled.
Document = Row("id", "text")
test = sc.parallelize([(4, "spark i j k"),
(5, "l m n"),
(6, "spark hadoop spark"),
(7, "apac | he hadoop")]) \
.map(lambda x: Document(*x)).toDF()
# Make predictions on test documents and print columns of interest.
prediction = model.transform(test)
selected = prediction.select("id", "text", "prediction")
for row in selected.collect():
print(row)
sc.stop()
|
gustavomazevedo/tbackup-server | server/tests.py | Python | mit | 8,897 | 0.015398 |
from django.utils import timezone
from django.conf import settings
from django.test import TestCase
from django.core.files import File
from django.contrib.auth.models import User, AnonymousUser
from rest_framework.test import APITestCase
from rest_framework import status
import os
import shutil
import subprocess
import time
import paramiko
import mock
from datetime import datetime
from .models import (
Origin,
BaseDestination,
LocalDestination,
SFTPDestination,
APIDestination,
Backup
)
from .views import (
UserViewSet,
DestinationViewSet,
BackupViewSet
)
PATH=os.path.join(settings.BASE_DIR, 'examples')
def mock_sftp_connect(self):
pkey = paramiko.RSAKey.from_private_key_file('test_rsa.key')
transport = paramiko.Transport(('localhost', 3373))
transport.connect(username='admin', password='admin', pkey=pkey)
return paramiko.SFTPClient.from_transport(transport)
def create_sftp_server():
return subprocess.Popen(['sftpserver', '-k', 'test_rsa.key', '-l', 'WARNING'])
def rm_dir_files(dirname):
for fname in os.listdir(dirname):
file_path = os.path.join(dirname, fname)
try | :
if os.path.isfile(file_path) and 'reactive_course source code_reactive-week1.zip' not in file_path:
print 'remove file %s' % file_path
os.remove(file_path)
elif os.path.isdir(file_path):
rm_dir_files(file_path)
print 'remove dir %s' % file_path
shutil.rmtree(file_path)
except Exception, e:
print ' | ERROR %s' % e
pass
# Create your tests here.
class DestinationCase(TestCase):
def setUp(self):
o = Origin.objects.create(
name = 'Guadalupe',
plan = 'blablablablabla'
)
self.user = User.objects.create(
username='Guadalupe',
email='g@g.com'
)
ld1 = LocalDestination.objects.create(
name = 'HD1',
directory = os.path.join(PATH, 'destination1')
)
ld2 = LocalDestination.objects.create(
name = 'HD2',
directory = os.path.join(PATH, 'destination2')
)
sftp1 = SFTPDestination.objects.create(
name = 'TestSFTPDestination',
hostname = 'localhost',
port = '3373',
username = 'admin',
key_filename = os.path.expanduser('test_rsa.key')
)
api1 = APIDestination.objects.create(
name = 'Amazon S3',
pubkey = r'TyByYXRvIHJvZXUgYSByb3VwYSBkbyByZWkgZGUgcm9tYQ',
base_uri = r'https://aws.amazon.com/s3/',
set_uri = r'/object/',
get_uri = r'/object/'
)
dt = timezone.now()
fn = 'backup_%s.tar.gz' % dt.strftime(settings.DT_FORMAT)
Backup.objects.create(
user = self.user,
#origin = o,
name = fn,
destination = ld2.basedestination_ptr,
date = dt
)
Backup.objects.create(
user = self.user,
#origin = o,
name = fn,
destination = sftp1.basedestination_ptr,
date = dt
)
self.fn = os.path.join(PATH, 'reactive_course source code_reactive-week1.zip')
@classmethod
def tearDownClass(cls):
rm_dir_files('Guadalupe')
rm_dir_files(PATH)
def test_localbackup(self):
#b = Backup.objects.get(origin__pk=1,
# destination__name='HD2')
b = Backup.objects.get(user__pk=self.user.id,
destination__name='HD2')
#contents = File(self.fn).open('rb')
contents = File(open(self.fn, 'rb'))
b.backup(contents)
self.assertTrue(b.success)
self.assertFalse(b.before_restore)
self.assertFalse(b.after_restore)
self.assertIsNone(b.restore_dt)
self.assertIsNone(b.related_to)
#print (b,
# b.name,
# b.origin,
# b.destination,
# b.date,
# b.success,
# b.before_restore,
# b.after_restore,
# b.restore_dt,
# b.related_to)
def test_localrestore(self):
#b = Backup.objects.get(origin__pk=1,
# destination__name='HD2')
b = Backup.objects.get(user__pk=self.user.id,
destination__name='HD2')
#print b.__dict__
data = b.restore()
self.assertIsNotNone(data)
self.assertEquals(''.join(data), open(self.fn, 'rb').read())
#if not data is None:
# print 'success'
# print len(data)
#else:
# print 'fail'
#@mock.patch.object(SFTPDestination, 'connect', side_effect='mock_sftp_connect')
def test_sftpbackup(self):
proc = create_sftp_server()
time.sleep(0.3)
b = None
with mock.patch.object(SFTPDestination, 'connect', return_value=mock_sftp_connect(None)) as mocked_func:
#b = Backup.objects.get(origin__pk=1,
# destination__name='TestSFTPDestination')
b = Backup.objects.get(user__pk=self.user.id,
destination__name='TestSFTPDestination')
contents = File(open(self.fn, 'rb'))
b.backup(contents)
proc.kill()
self.assertTrue(b.success)
self.assertFalse(b.before_restore)
self.assertFalse(b.after_restore)
self.assertIsNone(b.restore_dt)
self.assertIsNone(b.related_to)
def test_sftprestore(self):
proc = create_sftp_server()
time.sleep(0.3)
data = None
with mock.patch.object(SFTPDestination, 'connect', return_value=mock_sftp_connect(None)) as mocked_func:
#b = Backup.objects.get(origin__pk=1,
# destination__name='TestSFTPDestination')
b = Backup.objects.get(user__pk=self.user.id,
destination__name='TestSFTPDestination')
data = b.restore()
proc.kill()
self.assertIsNotNone(data)
self.assertEquals(''.join(data), open(self.fn, 'rb').read())
class APILoginTestCase(APITestCase):
def setUp(self):
users = [
{ 'username': 'admin', 'email': 'a@a.com', 'is_superuser': True },
{ 'username': 'default', 'email': 'd@d.com', 'is_superuser': False }
]
for u_info in users:
user = User.objects.create(username=u_info['username'], email=u_info['email'], is_superuser=u_info['is_superuser'])
user.set_password(u_info['username'])
user.save()
def test_api_authenticate_username_password(self):
user = User.objects.get(username='admin')
logged_in = self.client.login(username=user.username, password=user.username)
self.client.logout()
self.assertTrue(logged_in)
def test_api_authenticate_token(self):
user = User.objects.get(username='admin')
self.client.credentials(HTTP_AUTHORIZATION='Token ' + user.auth_token.key)
response = self.client.get('/users/', format='json')
self.client.logout()
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertNotIn('error', response.data)
class APIAdminTestCase(APITestCase):
def setUp(self):
users = [
{ 'username': 'admin', 'email': 'a@a.com', 'is_superuser': True },
{ 'username': 'default', 'email': 'd@d.com', 'is_superuser': False }
]
for u_info in users:
user = User.objects.create(username=u_info['username'], email=u_info['email'], is_superuser=u_info['is_superuser'])
user.set_password(u_i |
dominicneeraj/Technex_api | bell/admin.py | Python | mit | 308 | 0.032468 | from django.contrib import admin
from bell.mo | dels import *
class CityAdmin(admin.ModelAdmin):
list_display = ['name']
admin.site.register(City,CityAdmin)
admin.site.register(Restra)
class DealAdmin(admin | .ModelAdmin):
list_display = ['id','restra_name']
admin.site.register(Deal,DealAdmin)
|
OCA/l10n-italy | l10n_it_fatturapa_in/models/__init__.py | Python | agpl-3.0 | 91 | 0 | fr | om . import attachment
from . import account
from . import partner
from . import company | |
synergeticsedx/deployment-wipro | openedx/core/djangoapps/cors_csrf/views.py | Python | agpl-3.0 | 2,496 | 0.000801 | """Views for enabling cross-domain requests. """
import logging
import json
from django.conf import settings
from django.views.decorators.cache import cache_page
from django.http import HttpResponseNotFound
from edxmako.shortcuts import render_to_response
from .models import XDomainProxyConfiguration
log = logging.getLogger(__name__ | )
XDOMAIN_PROXY_CACHE_TIMEOUT = getattr(settings, 'XDOMAIN_PROXY_CACHE_TIMEOUT', 60 * 15)
@cache_page(XDOMAIN_PROXY_C | ACHE_TIMEOUT)
def xdomain_proxy(request): # pylint: disable=unused-argument
"""Serve the xdomain proxy page.
Internet Explorer 9 does not send cookie information with CORS,
which means we can't make cross-domain POST requests that
require authentication (for example, from the course details
page on the marketing site to the enrollment API
to auto-enroll a user in an "honor" track).
The XDomain library [https://github.com/jpillora/xdomain]
provides an alternative to using CORS.
The library works as follows:
1) A static HTML file ("xdomain_proxy.html") is served from courses.edx.org.
The file includes JavaScript and a domain whitelist.
2) The course details page (on edx.org) creates an invisible iframe
that loads the proxy HTML file.
3) A JS shim library on the course details page intercepts
AJAX requests and communicates with JavaScript on the iframed page.
The iframed page then proxies the request to the LMS.
Since the iframed page is served from courses.edx.org,
this is a same-domain request, so all cookies for the domain
are sent along with the request.
You can enable this feature and configure the domain whitelist
using Django admin.
"""
config = XDomainProxyConfiguration.current()
if not config.enabled:
return HttpResponseNotFound()
allowed_domains = []
for domain in config.whitelist.split("\n"):
if domain.strip():
allowed_domains.append(domain.strip())
if not allowed_domains:
log.warning(
u"No whitelist configured for cross-domain proxy. "
u"You can configure the whitelist in Django Admin "
u"using the XDomainProxyConfiguration model."
)
return HttpResponseNotFound()
context = {
'xdomain_masters': json.dumps({
domain: '*'
for domain in allowed_domains
})
}
return render_to_response('cors_csrf/xdomain_proxy.html', context)
|
apiaas/gae-search | src/api/urls.py | Python | gpl-3.0 | 350 | 0.008571 | from django.conf.urls import patterns, url
from . import views
url | patterns = patterns('', |
url(r'^endpoints/$', views.endpoint_list, name='endpoint_list'),
url(r'^endpoints/(?P<path>[^?#]+)/$', views.endpoint_details, name='endpoint_details'),
url(r'^api/(?P<path>[^?#]+)/$', views.application_endpoint, name='application_endpoint'),
)
|
enigmampc/catalyst | catalyst/pipeline/factors/factor.py | Python | apache-2.0 | 58,406 | 0 | """
factor.py
"""
from functools import wraps
from operator import attrgetter
from numbers import Number
from math import ceil
from numpy import empty_like, inf, nan, where
from scipy.stats import rankdata
from catalyst.errors import BadPercentileBounds, UnknownRankMethod
from catalyst.lib.normalize import naive_grouped_rowwise_apply
from catalyst.lib.rank import masked_rankdata_2d, rankdata_1d_descending
from catalyst.pipeline.api_utils import restrict_to_dtype
from catalyst.pipeline.classifiers import Classifier, Everything, Quantiles
from catalyst.pipeline.expression import (
BadBinaryOperator,
COMPARISONS,
is_comparison,
MATH_BINOPS,
method_name_for_op,
NumericalExpression,
NUMEXPR_MATH_FUNCS,
UNARY_OPS,
unary_op_name,
)
from catalyst.pipeline.filters import (
Filter,
NumExprFilter,
PercentileFilter,
NotNullFilter,
NullFilter,
)
from catalyst.pipeline.mixins import (
AliasedMixin,
CustomTermMixin,
DownsampledMixin,
LatestMixin,
PositiveWindowLengthMixin,
RestrictedDTypeMixin,
SingleInputMixin,
)
from catalyst.pipeline.sentinels import NotSpecified, NotSpecifiedType
from catalyst.pipeline.term import ComputableTerm, Term
from catalyst.utils.functional import with_doc, with_name
from catalyst.utils.input_validation import expect_types
from catalyst.utils.math_utils import nanmean, nanstd
from catalyst.utils.memoize import classlazyval
from catalyst.utils.numpy_utils import (
bool_dtype,
categorical_dtype,
coerce_to_dtype,
datetime64ns_dtype,
float64_dtype,
int64_dtype,
)
_RANK_METHODS = frozenset(['average', 'min', 'max', 'dense', 'ordinal'])
def coerce_numbers_to_my_dtype(f):
"""
A decorator for methods whose signature is f(self, other) that coerces
``other`` to ``self.dtype``.
This is used to make comparison operations between numbers and `Factor`
instances work independently of whether the user supplies a float or
integer literal.
For example, if I write::
my_filter = my_factor > 3
my_factor probably has dtype float64, but 3 is an int, so we want to coerce
to float64 before doing the comparison.
"""
@wraps(f)
def method(self, other):
if isinstance(other, Number):
other = coerce_to_dtype(self.dtype, other)
return f(self, other)
return method
def binop_return_type(op):
if is_comparison(op):
return NumExprFilter
else:
return NumExprFactor
def binop_return_dtype(op, left, right):
"""
Compute the expected return dtype for the given binary operator.
Parameters
----------
op : str
Operator symbol, (e.g. '+', '-', ...).
left : numpy.dtype
Dtype of left hand side.
right : numpy.dtype
Dtype of right hand side.
Returns
-------
outdtype : numpy.dtype
The dtype of the | result of `left <op> right`.
"""
if is_comparison(op):
if left != right:
raise TypeError( |
"Don't know how to compute {left} {op} {right}.\n"
"Comparisons are only supported between Factors of equal "
"dtypes.".format(left=left, op=op, right=right)
)
return bool_dtype
elif left != float64_dtype or right != float64_dtype:
raise TypeError(
"Don't know how to compute {left} {op} {right}.\n"
"Arithmetic operators are only supported between Factors of "
"dtype 'float64'.".format(
left=left.name,
op=op,
right=right.name,
)
)
return float64_dtype
def binary_operator(op):
"""
Factory function for making binary operator methods on a Factor subclass.
Returns a function, "binary_operator" suitable for implementing functions
like __add__.
"""
# When combining a Factor with a NumericalExpression, we use this
# attrgetter instance to defer to the commuted implementation of the
# NumericalExpression operator.
commuted_method_getter = attrgetter(method_name_for_op(op, commute=True))
@with_doc("Binary Operator: '%s'" % op)
@with_name(method_name_for_op(op))
@coerce_numbers_to_my_dtype
def binary_operator(self, other):
# This can't be hoisted up a scope because the types returned by
# binop_return_type aren't defined when the top-level function is
# invoked in the class body of Factor.
return_type = binop_return_type(op)
if isinstance(self, NumExprFactor):
self_expr, other_expr, new_inputs = self.build_binary_op(
op, other,
)
return return_type(
"({left}) {op} ({right})".format(
left=self_expr,
op=op,
right=other_expr,
),
new_inputs,
dtype=binop_return_dtype(op, self.dtype, other.dtype),
)
elif isinstance(other, NumExprFactor):
# NumericalExpression overrides ops to correctly handle merging of
# inputs. Look up and call the appropriate reflected operator with
# ourself as the input.
return commuted_method_getter(other)(self)
elif isinstance(other, Term):
if self is other:
return return_type(
"x_0 {op} x_0".format(op=op),
(self,),
dtype=binop_return_dtype(op, self.dtype, other.dtype),
)
return return_type(
"x_0 {op} x_1".format(op=op),
(self, other),
dtype=binop_return_dtype(op, self.dtype, other.dtype),
)
elif isinstance(other, Number):
return return_type(
"x_0 {op} ({constant})".format(op=op, constant=other),
binds=(self,),
# .dtype access is safe here because coerce_numbers_to_my_dtype
# will convert any input numbers to numpy equivalents.
dtype=binop_return_dtype(op, self.dtype, other.dtype)
)
raise BadBinaryOperator(op, self, other)
return binary_operator
def reflected_binary_operator(op):
"""
Factory function for making binary operator methods on a Factor.
Returns a function, "reflected_binary_operator" suitable for implementing
functions like __radd__.
"""
assert not is_comparison(op)
@with_name(method_name_for_op(op, commute=True))
@coerce_numbers_to_my_dtype
def reflected_binary_operator(self, other):
if isinstance(self, NumericalExpression):
self_expr, other_expr, new_inputs = self.build_binary_op(
op, other
)
return NumExprFactor(
"({left}) {op} ({right})".format(
left=other_expr,
right=self_expr,
op=op,
),
new_inputs,
dtype=binop_return_dtype(op, other.dtype, self.dtype)
)
# Only have to handle the numeric case because in all other valid cases
# the corresponding left-binding method will be called.
elif isinstance(other, Number):
return NumExprFactor(
"{constant} {op} x_0".format(op=op, constant=other),
binds=(self,),
dtype=binop_return_dtype(op, other.dtype, self.dtype),
)
raise BadBinaryOperator(op, other, self)
return reflected_binary_operator
def unary_operator(op):
"""
Factory function for making unary operator methods for Factors.
"""
# Only negate is currently supported.
valid_ops = {'-'}
if op not in valid_ops:
raise ValueError("Invalid unary operator %s." % op)
@with_doc("Unary Operator: '%s'" % op)
@with_name(unary_op_name(op))
def unary_operator(self):
if self.dtype != float64_dtype:
raise TypeError(
"Can't apply unary operator {op!r} to instance of "
"{typename!r} with dtype { |
ul-fmf/projekt-tomo | web/courses/models.py | Python | agpl-3.0 | 20,320 | 0.002805 | from django.db import models
from django.db.models import Count
from django.utils.translation import ugettext_lazy as _
from django.template.defaultfilters import slugify
from django.template.loader import render_to_string
from users.models import User
from utils.models import OrderWithRespectToMixin
from taggit.managers import TaggableManager
from attempts.models import Attempt, HistoricalAttempt
from problems.models import Part
from copy import deepcopy
class Institution(models.Model):
name = models.CharField(max_length=140)
def __str__(self):
return self.name
class Course(models.Model):
title = models.CharField(max_length=70)
description = models.TextField(blank=True)
students = models.ManyToManyField(User, blank=True, related_name='courses', through='StudentEnrollment')
teachers = models.ManyToManyField(User, blank=True, related_name='taught_courses')
institution = models.ForeignKey(Institution, related_name='institution')
tags = TaggableManager(blank=True)
class Meta:
ordering = ['institution', 'title']
def __str__(self):
return '{} @{{{}}}'.format(self.title, self.institution)
def get_absolute_url(self):
from django.core.urlresolvers import reverse
return reverse('course_detail', args=[str(self.pk)])
def recent_problem_sets(self, n=3):
return self.problem_sets.reverse().filter(visible=True)[:n]
def user_attempts(self, user):
attempts = {}
for attempt in user.attempts.filter(part__problem__problem_set__course=self):
attempts[attempt.part_id] = attempt
sorted_attempts = []
for problem_set in self.problem_sets.all().prefetch_related('problems__parts'):
problem_set_attempts = []
prob_set_valid = prob_set_invalid = prob_set_empty = 0
for problem in problem_set.problems.all():
valid = invalid = empty = 0
problem_attempts = [attempts.get(part.pk) for part in problem.parts.all()]
for attempt in problem_attempts:
if attempt is None:
empty += 1
elif attempt.valid:
valid += 1
else:
invalid += 1
problem_set_attempts.append((problem, problem_attempts, valid, invalid, empty))
prob_set_valid += valid
prob_set_invalid += invalid
prob_set_empty += empty
sorted_attempts.append((problem_set, problem_set_attempts, prob_set_valid, prob_set_invalid, prob_set_empty))
return sorted_attempts
def prepare_annotated_problem_sets(self, user):
self.is_taught = user.can_edit_course(self)
self.is_favourite = user.is_favourite_course(self)
self.annotated_problem_sets = []
for problem_set in self.problem_sets.all():
if user.can_view_problem_set(problem_set):
self.annotated_problem_sets.append(problem_set)
def annotate(self, user):
if self.is_taught:
self.annotate_for_teacher()
else:
self.annotate_for_user(user)
def annotate_for_user(self, user):
for problem_set in self.annotated_problem_sets:
problem_set.percentage = problem_set.valid_percentage(user)
if problem_set.percentage is None:
problem_set.percentage = 0
problem_set.grade = min(5, int(problem_set.percentage / 20) + 1)
def annotate_for_teacher(self):
students = self.observed_students()
student_count = len(students)
part_sets = Part.objects.filter(problem__problem_set__in=self.annotated_problem_sets)
parts_count = part_sets.values('problem__problem_set_id').annotate(count=Count('problem__problem_set_id')).order_by('count')
parts_dict = {}
for part in parts_count:
problem_set_id = part['problem__problem_set_id']
parts_dict[problem_set_id] = part['count']
attempts_full = Attempt.objects.filter(user__in=students,
part__problem__problem_set__in=self.annotated_problem_sets)
attempts = attempts_full.values('valid', 'part__problem__problem_set_id')
attempts_dict = {}
for attempt in attempts:
problem_set_id = attempt['part__problem__problem_set_id']
if problem_set_id in attempts_dict:
attempts_dict[problem_set_id]['submitted_count'] += 1
attempts_dict[problem_set_id]['valid_count'] += 1 if attempt['valid'] else 0
else:
attempts_dict[problem_set_id] = {
'submitted_count': 1,
'valid_count': 1 if attempt['valid'] else 0
}
for problem_set in self.annotated_problem_sets:
part_count = parts_dict[problem_set.id] if problem_set.id in parts_dict else 0
if problem_set.id in attempts_dict:
submitted_count = attempts_dict[problem_set.id]['submitted_count']
valid_count = attempts_dict[problem_set.id]['valid_count']
else:
submitted_count = 0
valid_count = 0
invalid_count = submitted_count - valid_count
total_count = student_count * part_count
if total_count:
valid_percentage = int(100.0 * valid_count / total_count)
invalid_percentage = int(100.0 * invalid_count / total_count)
else:
valid_percentage = 0
invalid_percentage = 0
empty_percentage = 100 - valid_percentage - invalid_percentage
problem_set.valid = valid_percentage
problem_set.invalid = invalid_percentage
problem_set.empty = empty_percentage
problem_set.grade = min(5, int(valid_percentage / 20) + 1)
def enroll_student(self, user):
enrollment = StudentEnrollment(course=self, user=user)
enrollment.save()
def unenroll_ | stu | dent(self, user):
enrollment = StudentEnrollment.objects.get(course=self, user=user)
enrollment.delete()
def promote_to_teacher(self, user):
self.unenroll_student(user)
self.teachers.add(user)
def demote_to_student(self, user):
self.enroll_student(user)
self.teachers.remove(user)
def toggle_observed(self, user):
enrollment = StudentEnrollment.objects.get(course=self, user=user)
enrollment.observed = not enrollment.observed
enrollment.save()
def observed_students(self):
return User.objects.filter(studentenrollment__course=self, studentenrollment__observed=True).order_by('first_name')
def student_success(self):
students = self.observed_students()
problem_sets = self.problem_sets.filter(visible=True)
part_count = Part.objects.filter(problem__problem_set__in=problem_sets).count()
attempts = Attempt.objects.filter(part__problem__problem_set__in=problem_sets)
valid_attempts = attempts.filter(valid=True).values('user').annotate(Count('user'))
all_attempts = attempts.values('user').annotate(Count('user'))
def to_dict(attempts):
attempts_dict = {}
for val in attempts:
attempts_dict[val['user']] = val['user__count']
return attempts_dict
valid_attempts_dict = to_dict(valid_attempts)
all_attempts_dict = to_dict(all_attempts)
for student in students:
student.valid = valid_attempts_dict.get(student.pk, 0)
student.invalid = all_attempts_dict.get(student.pk, 0) - student.valid
student.empty = part_count - student.valid - student.invalid
return students
def duplicate(self):
new_course = deepcopy(self)
new_course.id = None
new_course.title += ' (copy)'
new_course.save()
for problem_set in self.problem_sets.all():
problem_set.copy_to(new_course)
return new_course
def student_success_by_problem_set(self):
|
vpv11110000/pyss | setup.py | Python | mit | 533 | 0.005629 | # -*- coding: utf-8 -*-
try:
| from setuptools import setup
except ImportError:
from distutils.core import setup
from setuptools import find_packages
from os.path import join, dirname
import pyss
import unittest
setup(
name='pyss',
version=pyss.__version__,
packages=find_packages(),
long_description=open(join(dirname(__file__), 'README.rst')).read(),
install_requires=[
'matplotlib==2.0.2'
#,'Flask==0.8'
],
include_package_data=True,
test_suite='disc | over_tests',
)
|
AngelTerrones/Algol | Core/regfile.py | Python | mit | 2,952 | 0 | #!/usr/bin/env python
# Copyright (c) 2015 Angel Terrones (<angelterrones@gmail.com>)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from myhdl import Signal
from myhdl import always
from myhdl import always_comb
from myhdl import modbv
class RFReadPort:
"""
Defines the RF's read IO port.
:ivar ra: Read address
:ivar rd: Read data
"""
def __init__(self):
"""
Initializes the IO ports.
"""
self.ra = Signal(modbv(0)[5:])
self.rd = Signal(modbv(0)[32:])
cla | ss RFWritePort:
"""
Defines the RF's write IO port.
:ivar wa: Write address
:ivar we: Write enable
:ivar wd: Write data
"""
def __init__(self):
"""
Initializes the IO ports.
"""
self.wa = Signal(modbv(0)[5:])
self.we = Signal(False)
self.wd = Signal(modbv(0)[32:])
def RegisterFile(clk,
portA,
portB,
writePort):
"""
The Register File (RF) module.
32 32-bit r | egisters, with the register 0 hardwired to zero.
:param clk: System clock
:param portA: IO bundle (read port)
:param portB: IO bundle (read port)
:param writePort: IO bundle (write port)
"""
_registers = [Signal(modbv(0)[32:]) for ii in range(0, 32)]
@always_comb
def read():
"""
Asynchronous read operation.
"""
portA.rd.next = _registers[portA.ra] if portA.ra != 0 else 0
portB.rd.next = _registers[portB.ra] if portB.ra != 0 else 0
@always(clk.posedge)
def write():
"""
Synchronous write operation.
If the write address is zero, do nothing.
"""
if writePort.wa != 0 and writePort.we == 1:
_registers[writePort.wa].next = writePort.wd
return read, write
# Local Variables:
# flycheck-flake8-maximum-line-length: 120
# flycheck-flake8rc: ".flake8rc"
# End:
|
almarklein/scikit-image | skimage/util/shape.py | Python | bsd-3-clause | 7,382 | 0.000135 | __all__ = ['view_as_blocks', 'view_as_windows']
import numpy as np
from numpy.lib.stride_tricks import as_strided
def view_as_blocks(arr_in, block_shape):
"""Block view of the input n-dimensional array (using re-striding).
Blocks are non-overlapping views of the input array.
Parameters
----------
arr_in: ndarray
The n-dimensional input array.
block_shape: tuple
The shape of the block. Each dimension must divide evenly into the
corresponding dimensions of `arr_in`.
Returns
-------
arr_out: ndarray
Block view of the input array.
Examples
--------
>>> import numpy as np
>>> from skimage.util.shape import view_as_blocks
>>> A = np.arange(4*4).reshape(4,4)
>>> A
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> B = view_as_blocks(A, block_shape=(2, 2))
>>> B[0, 0]
array([[0, 1],
[4, 5]])
>>> B[0, 1]
array([[2, 3],
[6, 7]])
>>> B[1, 0, 1, 1]
13
>>> A = np.arange(4*4*6).reshape(4,4,6)
>>> A # doctest: +NORMALIZE_WHITESPACE
array([[[ 0, 1, 2, 3, 4, 5],
[ 6, 7, 8, 9, 10, 11],
[12, 13, 14, 15, 16, 17],
[18, 19, 20, 21, 22, 23]],
[[24, 25, 26, 27, 28, 29],
[30, 31, 32, 33, 34, 35],
[36, 37, 38, 39, 40, 41],
[42, 43, 44, 45, 46, 47]],
[[48, 49, 50, 51, 52, 53],
[54, 55, 56, 57, 58, 59],
[60, 61, 62, 63, 64, 65],
[66, 67, 68, 69, 70, 71]],
[[72, 73, 74, 75, 76, 77],
[78, 79, 80, 81, 82, 83],
[84, 85, 86, 87, 88, 89],
[90, 91, 92, 93, 94, 95]]])
>>> B = view_as_blocks(A, block_shape=(1, 2, 2))
>>> B.shape
(4, 2, 3, 1, 2, 2)
>>> B[2:, 0, 2] # doctest: +NORMALIZE_WHITESPACE
array([[[[52, 53],
[58, 59]]],
| [[[76, 77],
[82, 83]]]])
"""
# -- basic checks on arguments
if not isinstance(block_shape, tuple):
raise TypeError('block needs to be a tuple')
block_shape = np.array(block_shape)
if (block_shape <= 0).any():
raise ValueError("'block_shape' elements must be strictly positive")
if block_shape.size != arr_in.ndim:
raise ValueError("'block_shape' must have the same length "
| "as 'arr_in.shape'")
arr_shape = np.array(arr_in.shape)
if (arr_shape % block_shape).sum() != 0:
raise ValueError("'block_shape' is not compatible with 'arr_in'")
# -- restride the array to build the block view
arr_in = np.ascontiguousarray(arr_in)
new_shape = tuple(arr_shape / block_shape) + tuple(block_shape)
new_strides = tuple(arr_in.strides * block_shape) + arr_in.strides
arr_out = as_strided(arr_in, shape=new_shape, strides=new_strides)
return arr_out
def view_as_windows(arr_in, window_shape, step=1):
"""Rolling window view of the input n-dimensional array.
Windows are overlapping views of the input array, with adjacent windows
shifted by a single row or column (or an index of a higher dimension).
Parameters
----------
arr_in: ndarray
The n-dimensional input array.
window_shape: tuple
Defines the shape of the elementary n-dimensional orthotope
(better know as hyperrectangle [1]_) of the rolling window view.
step : int
Number of elements to skip when moving the window forward (by
default, move forward by one).
Returns
-------
arr_out: ndarray
(rolling) window view of the input array.
Notes
-----
One should be very careful with rolling views when it comes to
memory usage. Indeed, although a 'view' has the same memory
footprint as its base array, the actual array that emerges when this
'view' is used in a computation is generally a (much) larger array
than the original, especially for 2-dimensional arrays and above.
For example, let us consider a 3 dimensional array of size (100,
100, 100) of ``float64``. This array takes about 8*100**3 Bytes for
storage which is just 8 MB. If one decides to build a rolling view
on this array with a window of (3, 3, 3) the hypothetical size of
the rolling view (if one was to reshape the view for example) would
be 8*(100-3+1)**3*3**3 which is about 203 MB! The scaling becomes
even worse as the dimension of the input array becomes larger.
References
----------
.. [1] http://en.wikipedia.org/wiki/Hyperrectangle
Examples
--------
>>> import numpy as np
>>> from skimage.util.shape import view_as_windows
>>> A = np.arange(4*4).reshape(4,4)
>>> A
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> window_shape = (2, 2)
>>> B = view_as_windows(A, window_shape)
>>> B[0, 0]
array([[0, 1],
[4, 5]])
>>> B[0, 1]
array([[1, 2],
[5, 6]])
>>> A = np.arange(10)
>>> A
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> window_shape = (3,)
>>> B = view_as_windows(A, window_shape)
>>> B.shape
(8, 3)
>>> B
array([[0, 1, 2],
[1, 2, 3],
[2, 3, 4],
[3, 4, 5],
[4, 5, 6],
[5, 6, 7],
[6, 7, 8],
[7, 8, 9]])
>>> A = np.arange(5*4).reshape(5, 4)
>>> A
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15],
[16, 17, 18, 19]])
>>> window_shape = (4, 3)
>>> B = view_as_windows(A, window_shape)
>>> B.shape
(2, 2, 4, 3)
>>> B # doctest: +NORMALIZE_WHITESPACE
array([[[[ 0, 1, 2],
[ 4, 5, 6],
[ 8, 9, 10],
[12, 13, 14]],
[[ 1, 2, 3],
[ 5, 6, 7],
[ 9, 10, 11],
[13, 14, 15]]],
[[[ 4, 5, 6],
[ 8, 9, 10],
[12, 13, 14],
[16, 17, 18]],
[[ 5, 6, 7],
[ 9, 10, 11],
[13, 14, 15],
[17, 18, 19]]]])
"""
# -- basic checks on arguments
if not isinstance(arr_in, np.ndarray):
raise TypeError("`arr_in` must be a numpy ndarray")
if not isinstance(window_shape, tuple):
raise TypeError("`window_shape` must be a tuple")
if not (len(window_shape) == arr_in.ndim):
raise ValueError("`window_shape` is incompatible with `arr_in.shape`")
if step < 1:
raise ValueError("`step` must be >= 1")
arr_shape = np.array(arr_in.shape)
window_shape = np.array(window_shape, dtype=arr_shape.dtype)
if ((arr_shape - window_shape) < 0).any():
raise ValueError("`window_shape` is too large")
if ((window_shape - 1) < 0).any():
raise ValueError("`window_shape` is too small")
# -- build rolling window view
arr_in = np.ascontiguousarray(arr_in)
new_shape = tuple((arr_shape - window_shape) // step + 1) + \
tuple(window_shape)
arr_strides = np.array(arr_in.strides)
new_strides = np.concatenate((arr_strides * step, arr_strides))
arr_out = as_strided(arr_in, shape=new_shape, strides=new_strides)
return arr_out
|
StephDC/MiniBioKit | bioChemData/protein.py | Python | gpl-3.0 | 1,344 | 0.019345 | from . import sqldb
class aminoAcid():
def __init__(self,abbr1,abbr3,name):
self.abbr3 = abbr3
self.abbr1 = abbr1
self.name = name
def __str__(self):
return self.name
def __repr__(self):
return self.name
def getOne(self):
return self.abbr1
def getThree(self):
return self.abbr3
cl | ass aminoAcidDB():
def __init__(self):
self.db = sqldb.sqliteDB('bioChemData/data.sql','protein')
def getAA3(self,abbr3):
abbr1 = self.db.getItem(abbr3,'one')
name = self.db.getItem(abbr3,'name')
return aminoAcid(abbr1,abbr3,name)
class translateDB():
def __init__(self):
self.db = sqldb.sqliteDB('bioChemDa | ta/data.sql','translate')
def getAA3(self,codon):
return self.db.getItem(codon,'protein')
def codonTranslate(codon,codonDB,aaDB):
return aaDB.getAA3(codonDB.getAA3(codon))
def nucleotideTranslation(posStrand):
pointer = 0
result = ''
lastAA = 'M'
adb = aminoAcidDB()
cdb = translateDB()
while posStrand[pointer:pointer+3] != 'ATG' and pointer <= len(posStrand)-3:
pointer += 1
while pointer <= len(posStrand)-3 and lastAA != 'X':
lastAA = adb.getAA3(cdb.getAA3(posStrand[pointer:pointer+3])).getOne()
result += lastAA
pointer += 3
return result
|
doclements/pywps-4 | pywps/storage.py | Python | mit | 2,563 | 0.002341 | from abc import ABCMeta, abstractmethod, abstractproperty
import os
class STORE_TYPE:
PATH = 0
# TODO: cover with tests
class StorageAbstract(object):
"""Data storage abstract class
"""
__metaclass__ = ABCMeta
@abstractmethod
def store(self):
"""
:param output: of type IOHandler
:returns: (type, store, url) where
type - is type of STORE_TYPE - number
store - string describing storage - file name, database connection
url - url, where the data can be downloaded
"""
pass
class DummyStorage(StorageAbstract):
"""Dummy empty storage implementation, does nothing
Default instance, for non-reference output request
>>> store = DummyStorage()
>>> assert store.store
"""
def __init__(self, config=None):
"""
:param config: storage configuration object
"""
self.config = config
def store(self, ouput):
pass
class FileStorage(StorageAbstract):
"""File storage implementation, stores data to file system
>>> import ConfigParser
>>> config = ConfigParser.RawConfigParser()
>>> config.add_section('FileStorage')
>>> config.set('FileStorage', 'target', './')
>>> config.add_section('server')
>>> config.set('server', 'outputurl', 'http://foo/bar/filestorage')
>>>
>>> store = FileStorage(config = config)
>>>
>>> class FakeOutput(object):
... def __init__(self):
... self.file = self._get_file()
... def _get_file(self):
... tiff_file = open('file.tiff', 'w')
| ... tiff_file.close()
| ... return 'file.tiff'
>>> fake_out = FakeOutput()
>>> (type, path, url) = store.store(fake_out)
>>> type == STORE_TYPE.PATH
True
"""
def __init__(self, config):
"""
:param config: storage configuration object
"""
self.target = config.get('FileStorage', 'target')
self.outputurl = config.get('server', 'outputurl')
def store(self, output):
import shutil, tempfile
from urlparse import urljoin
file_name = output.file
(prefix, suffix) = os.path.splitext(file_name)
output_name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
dir=self.target)[1]
shutil.copy2(output.file, output_name)
just_file_name = os.path.basename(output_name)
url = urljoin(self.outputurl, just_file_name)
return (STORE_TYPE.PATH, output_name, url)
|
NeCTAR-RC/nectar-images | community_image_tests/setup.py | Python | apache-2.0 | 1,054 | 0 | # Copyright (c) 2016, Monash e-Research Centre
# (Monash University, Australia)
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# | WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specif | ic language governing permissions and limitations
# under the License.
import setuptools
# In python < 2.7.4, a lazy loading of package `pbr` will break
# setuptools if some other modules registered functions in `atexit`.
# solution from: http://bugs.python.org/issue15881#msg170215
try:
import multiprocessing
except ImportError:
pass
setuptools.setup(
setup_requires=['pbr>=1.8'],
pbr=True,
install_requires=['Tempest'])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.