code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
# Copyright (C) 2011-2012 by the Free Software Foundation, Inc.
#
# This file is part of GNU Mailman.
#
# GNU Mailman is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# GNU Mailman is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# GNU Mailman. If not, see <http://www.gnu.org/licenses/>.
"""RFC 2369 List-* and related headers."""
from __future__ import absolute_import, print_function, unicode_literals
__metaclass__ = type
__all__ = [
'RFC2369',
]
from email.utils import formataddr
from zope.interface import implementer
from mailman.config import config
from mailman.core.i18n import _
from mailman.handlers.cook_headers import uheader
from mailman.interfaces.archiver import ArchivePolicy
from mailman.interfaces.handler import IHandler
CONTINUATION = ',\n\t'
def process(mlist, msg, msgdata):
"""Add the RFC 2369 List-* and related headers."""
# Some people really hate the List-* headers. It seems that the free
# version of Eudora (possibly on for some platforms) does not hide these
# headers by default, pissing off their users. Too bad. Fix the MUAs.
if not mlist.include_rfc2369_headers:
return
list_id = '{0.list_name}.{0.mail_host}'.format(mlist)
if mlist.description:
# Don't wrap the header since here we just want to get it properly RFC
# 2047 encoded.
i18ndesc = uheader(mlist, mlist.description, 'List-Id', maxlinelen=998)
listid_h = formataddr((str(i18ndesc), list_id))
else:
# Without a description, we need to ensure the MUST brackets.
listid_h = '<{0}>'.format(list_id)
# No other agent should add a List-ID header except Mailman.
del msg['list-id']
msg['List-Id'] = listid_h
# For internally crafted messages, we also add a (nonstandard),
# "X-List-Administrivia: yes" header. For all others (i.e. those coming
# from list posts), we add a bunch of other RFC 2369 headers.
requestaddr = mlist.request_address
subfieldfmt = '<{0}>, <mailto:{1}>'
listinfo = mlist.script_url('listinfo')
headers = {}
# XXX reduced_list_headers used to suppress List-Help, List-Subject, and
# List-Unsubscribe from UserNotification. That doesn't seem to make sense
# any more, so always add those three headers (others will still be
# suppressed).
headers.update({
'List-Help' : '<mailto:{0}?subject=help>'.format(requestaddr),
'List-Unsubscribe': subfieldfmt.format(listinfo, mlist.leave_address),
'List-Subscribe' : subfieldfmt.format(listinfo, mlist.join_address),
})
if not msgdata.get('reduced_list_headers'):
# List-Post: is controlled by a separate attribute, which is somewhat
# misnamed. RFC 2369 requires a value of NO if posting is not
# allowed, i.e. for an announce-only list.
list_post = ('<mailto:{0}>'.format(mlist.posting_address)
if mlist.allow_list_posts
else 'NO')
headers['List-Post'] = list_post
# Add RFC 2369 and 5064 archiving headers, if archiving is enabled.
if mlist.archive_policy is not ArchivePolicy.never:
for archiver in config.archivers:
headers['List-Archive'] = '<{0}>'.format(
archiver.list_url(mlist))
permalink = archiver.permalink(mlist, msg)
if permalink is not None:
headers['Archived-At'] = permalink
# XXX RFC 2369 also defines a List-Owner header which we are not currently
# supporting, but should.
for h, v in headers.items():
# First we delete any pre-existing headers because the RFC permits
# only one copy of each, and we want to be sure it's ours.
del msg[h]
# Wrap these lines if they are too long. 78 character width probably
# shouldn't be hardcoded, but is at least text-MUA friendly. The
# adding of 2 is for the colon-space separator.
if len(h) + 2 + len(v) > 78:
v = CONTINUATION.join(v.split(', '))
msg[h] = v
@implementer(IHandler)
class RFC2369:
"""Add the RFC 2369 List-* headers."""
name = 'rfc-2369'
description = _('Add the RFC 2369 List-* headers.')
def process(self, mlist, msg, msgdata):
"""See `IHandler`."""
process(mlist, msg, msgdata)
|
hcs/mailman
|
src/mailman/handlers/rfc_2369.py
|
Python
|
gpl-3.0
| 4,802
|
import copy
from common.common_consts.post_breach_consts import POST_BREACH_COMMUNICATE_AS_BACKDOOR_USER
from monkey_island.cc.database import mongo
from monkey_island.cc.models import Monkey
from monkey_island.cc.services.telemetry.zero_trust_checks.communicate_as_backdoor_user import (
check_new_user_communication,
)
EXECUTION_WITHOUT_OUTPUT = "(PBA execution produced no output)"
def process_communicate_as_backdoor_user_telemetry(telemetry_json):
current_monkey = Monkey.get_single_monkey_by_guid(telemetry_json["monkey_guid"])
message = telemetry_json["data"]["result"][0]
success = telemetry_json["data"]["result"][1]
check_new_user_communication(current_monkey, success, message)
POST_BREACH_TELEMETRY_PROCESSING_FUNCS = {
POST_BREACH_COMMUNICATE_AS_BACKDOOR_USER: process_communicate_as_backdoor_user_telemetry,
}
def process_post_breach_telemetry(telemetry_json):
def convert_telem_data_to_list(data):
modified_data = [data]
if type(data["result"][0]) is list: # multiple results in one pba
modified_data = separate_results_to_single_pba_telems(data)
return modified_data
def separate_results_to_single_pba_telems(data):
modified_data = []
for result in data["result"]:
temp = copy.deepcopy(data)
temp["result"] = result
modified_data.append(temp)
return modified_data
def add_message_for_blank_outputs(data):
if not data["result"][0]:
data["result"][0] = EXECUTION_WITHOUT_OUTPUT
return data
post_breach_action_name = telemetry_json["data"]["name"]
if post_breach_action_name in POST_BREACH_TELEMETRY_PROCESSING_FUNCS:
POST_BREACH_TELEMETRY_PROCESSING_FUNCS[post_breach_action_name](telemetry_json)
telemetry_json["data"] = convert_telem_data_to_list(telemetry_json["data"])
for pba_data in telemetry_json["data"]:
pba_data = add_message_for_blank_outputs(pba_data)
update_data(telemetry_json, pba_data)
def update_data(telemetry_json, data):
mongo.db.monkey.update(
{"guid": telemetry_json["monkey_guid"]}, {"$push": {"pba_results": data}}
)
|
guardicore/monkey
|
monkey/monkey_island/cc/services/telemetry/processing/post_breach.py
|
Python
|
gpl-3.0
| 2,187
|
# coding: utf-8
# Copyright (C) 2016 Bruno Abude Cardoso
#
# Imagem Cinemática is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Imagem Cinemática is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Contains the Application class that shares global states to the entire app.
"""
import sys
import os
from Queue import Queue
import imp
import logging
import imp
from importlib import import_module
from imp import find_module
from PyQt4.QtCore import (QTimer, QTranslator)
from PyQt4.QtGui import QApplication
from PyQt4 import uic
from ic import messages
from ic import engine
from ic import settings
LOG = logging.getLogger(__name__)
class UILoadError(Exception):
"""Raised when the uic module fails to load an ui file.
"""
pass
def get_app():
"""Return the unique instance of the Application class.
"""
return Application.instance()
class Application(object):
"""The Main application class.
The Application class is responsible for holding global data that will be
shared between the whole application. It also provides a message dispatching
system for intercommunication and methods for loading resources.
This class is a singleton. The instance can be retrived by the function
get_app avaible in the module `application`.
Attributes:
settings: a dictionary containing application-wide settings.
user_options: a dictionary containing options that can be changed by
user when interacting with the GUI.
"""
_INSTANCE = None
PATH = ""
def __init__(self, argv):
assert Application._INSTANCE is None
Application._INSTANCE = self
self._qapp = QApplication(argv)
pjoin = os.path.join
# The application main path (the local where the main script was
# invoked by the interpreter)
Application.PATH = sys.path[0]
settings.change("resources_dir", pjoin(self.PATH, "res"))
settings.change("lang_dir", pjoin(self.PATH, "lang"))
# Holds all the ui files loaded by calling the method load_ui.
self._loaded_ui_objects = {}
# Holds all imported resources modules name
self._loaded_resources = {}
# Current installed QTranslator
self._translator = None
if settings.get("locale_str"):
self.set_language(settings.get("locale_str"))
self.import_resources()
@classmethod
def instance(cls):
"""Return the singleton instance of the Application class.
"""
return cls._INSTANCE
def get_ui(self, name):
"""Return the instance of a previously loaded ui file.
The ui object will be a QObject. To load a new a new ui file use the
`load_ui` method.
Args:
name (str): the ui object's name/alias.
Returns:
QObject: the object that was created when the correspondent ui file
was loaded.
Raises:
KeyError: if there is no ui object with the given name.
"""
if name not in self._loaded_ui_objects:
raise KeyError("No ui object loaded with that name.")
else:
return self._loaded_ui_objects[name]
def import_resources(self):
"""Load the resource files contained in the gui package path.
"""
_, path, _ = imp.find_module("gui")
# Bugfix for OSX
if path not in sys.path:
sys.path.append(path)
for mod in (f for f in os.listdir(path) if f.endswith("_rc.py")):
try:
imported = import_module(mod[:-3])
self._loaded_resources[mod] = imported
except ImportError:
LOG.error("Error when importing resource file %s", mod,
exc_info=True)
def set_language(self, locale_str):
"""Remove the current translator and install one based on the locale_str.
This method will look for an installed qm file with the locale str pro-
vided. If one is found it will remove the current one and install the
new. After the installation it will call the retranslateUi for all the
loaded ui objects. If a loaded ui object does not have the retranslateUi
method, it will just ignore it.
Args:
locale_str (str): The locale code, i.e. "pt_BR", "en_US". It can also
be "default" and if so, the current translator will be removed and
the language will be send to default (en_US).
Raises:
ValueError: If there is no locale installed with the given code.
"""
# Temporary solution for solving circular dependencies errors
join = os.path.join
isfile = os.path.isfile
qm_file = join(settings.get("lang_dir"), "qm", locale_str+".qm")
if isfile(qm_file) or locale_str == "default":
if self._translator is not None:
self._qapp.removeTranslator(self._translator)
self._translator = None
if isfile(qm_file):
self._translator = QTranslator()
self._translator.load(qm_file)
self._qapp.installTranslator(self._translator)
else:
raise ValueError("There is no locale installed with the given code")
def exec_(self):
"""Wraps the QApplication instance `exec_` method.
"""
return self._qapp.exec_()
def release(self):
"""Releases all resources used by the application.
"""
pass
|
brunoabud/ic
|
gui/application.py
|
Python
|
gpl-3.0
| 6,056
|
#!/usr/bin/env python
def odds_minus_evens(l):
sum = 0
for x in l:
if x % 2 == 0:
sum -= x
else:
sum += x
return sum
print(odds_minus_evens(range(2, 6)))
|
veltzer/demos-python
|
src/exercises/basic/odds_minus_evens/solution2.py
|
Python
|
gpl-3.0
| 210
|
#!/usr/bin/env python
import os
import re
from jinja2 import Environment, PackageLoader
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import EngFormatter
import numpy as np
class DatcomPlotter(object):
def __init__(self, parser_dict):
self.d = parser_dict
if not os.path.isdir('fig'):
os.mkdir('fig')
self.figpath = os.path.abspath('fig')
def common_plots(self):
## lift plots
self.plot2d(
title='{name}: Basic Lift Coefficient',
x_name='alpha', x_label='Alpha, deg',
y_name='CL_Basic', y_label='CL')
self.plot2d(
title='{name}: Flap effect on Lift Coefficient',
x_name='flap', x_label='Flap, deg',
y_name='dCL_Flap', y_label='dCL')
self.plot2d(
title='{name}: Elevator effect on Lift Coefficient',
x_name='elev', x_label='Elevator, deg',
y_name='dCL_Elevator', y_label='dCL')
self.plot2d(
title='{name}: Pitch Rate effect on Lift Coefficient',
x_name='alpha', x_label='Alpha, deg',
y_name='dCL_PitchRate', y_label='dCL')
self.plot2d(
title='{name}: Alpha Dot effect on Lift Coefficient',
x_name='alpha', x_label='Alpha, deg',
y_name='dCL_AlphaDot', y_label='dCL')
## drag plots
self.plot2d(
title='{name}: Basic Drag Coefficient',
x_name='alpha', x_label='Alpha, deg',
y_name='CD_Basic', y_label='CD')
self.plot2d(
title='{name}: Drag Polar',
x_name='CL_Basic', x_label='CL',
y_name='CD_Basic', y_label='CD')
self.plot3d(
title='{name}: Flap effect on Drag Coefficient',
x_name='alpha', x_label='Alpha, deg',
y_name='flap', y_label='Flap, deg',
z_name='dCD_Flap', z_label='dCD')
self.plot3d(
title='{name}: Elevator effect on Drag Coefficient',
x_name='alpha', x_label='Alpha, deg',
y_name='elev', y_label='Elevator, deg',
z_name='dCD_Elevator', z_label='dCD')
## side force plots
self.plot2d(
title='{name}: Basic Side Force Coefficient',
x_name='alpha', x_label='Alpha, deg',
y_name='dCY_Beta', y_label='dCY')
self.plot2d(
title='{name}: Roll Rate effect on Side Force Coefficient',
x_name='alpha', x_label='Alpha, deg',
y_name='dCY_RollRate', y_label='dCY')
## roll moment
self.plot2d(
title='{name}: Aileron effect on Roll Moment Coefficient',
x_name='alrn', x_label='Aileron, deg',
y_name='dCl_Aileron', y_label='dCl')
self.plot2d(
title='{name}: Side slip effect on Roll Moment Coefficient',
x_name='alpha', x_label='Alpha, deg',
y_name='dCl_Beta', y_label='dCl')
self.plot2d(
title='{name}: RollRate effect on Roll Moment Coefficient',
x_name='alpha', x_label='Alpha, deg',
y_name='dCl_RollRate', y_label='dCl')
self.plot2d(
title='{name}: YawRate effect on Roll Moment Coefficient',
x_name='alpha', x_label='Alpha, deg',
y_name='dCl_YawRate', y_label='dCl')
## pitch moment
self.plot2d(
title='{name}: Basic Pitch Moment Coefficient',
x_name='alpha', x_label='Alpha, deg',
y_name='Cm_Basic', y_label='Cm')
self.plot2d(
title='{name}: Flap effect on Pitch Moment Coefficient',
x_name='flap', x_label='Flap, deg',
y_name='dCm_Flap', y_label='dCm')
self.plot2d(
title='{name}: Elevator effect on Pitch Moment Coefficient',
x_name='elev', x_label='Elevator, deg',
y_name='dCm_Elevator', y_label='dCm')
self.plot2d(
title='{name}: Pitch Rate effect on Pitch Moment Coefficient',
x_name='alpha', x_label='Alpha, deg',
y_name='dCm_PitchRate', y_label='dCm')
self.plot2d(
title='{name}: Alpha Dot effect on Pitch Moment Coefficient',
x_name='alpha', x_label='Alpha, deg',
y_name='dCm_AlphaDot', y_label='dCm')
## yaw moment
self.plot3d(
title='{name}: Aileron effect on Yaw Moment Coefficient',
x_name='alpha', x_label='Alpha, deg',
y_name='flap', y_label='Flap, deg',
z_name='dCn_Aileron', z_label='dCn')
self.plot2d(
title='{name}: Side Slip effect on Yaw Moment Coefficient',
x_name='alpha', x_label='Alpha, deg',
y_name='dCn_Beta', y_label='dCn')
self.plot2d(
title='{name}: Roll Rate effect on Yaw Moment Coefficient',
x_name='alpha', x_label='Alpha, deg',
y_name='dCn_RollRate', y_label='dCn')
self.plot2d(
title='{name}: Yaw Rate effect on Yaw Moment Coefficient',
x_name='alpha', x_label='Alpha, deg',
y_name='dCn_YawRate', y_label='dCn')
def plot2d(self, title,
x_name, x_label,
y_name, y_label):
fig = plt.figure()
ax = fig.add_subplot(111)
y = self.d[y_name]
x = self.d[x_name][:len(y)]
ax.plot(x, y)
ax.set_xlabel(x_label.format(**self.d))
ax.set_ylabel(y_label.format(**self.d))
ax.set_title(title.format(**self.d))
ax.grid()
plt.savefig(os.path.join(self.figpath,
os.path.join(self.figpath,
title.format(**self.d) + '.pdf')))
plt.close(fig)
def plot3d(self, title,
x_name, x_label,
y_name, y_label,
z_name, z_label):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
Z = np.transpose(self.d[z_name])
x = self.d[x_name][:len(Z[0])]
y = self.d[y_name][:len(Z)]
ax.set_xlabel(x_label.format(**self.d))
ax.set_ylabel(y_label.format(**self.d))
ax.set_zlabel(z_label.format(**self.d))
ax.set_title(title.format(**self.d))
#print 'len Z1:', len(Z)
#print 'len Z2:', len(Z[0])
#print 'len x:', len(x)
#print 'len y:', len(y)
X, Y = np.meshgrid(x, y)
surf = ax.plot_surface(X, Y, Z,
cmap=cm.jet, rstride=1, cstride=1)
fig.colorbar(surf, shrink=0.5, aspect=5)
ax.grid()
plt.savefig(os.path.join(self.figpath,
os.path.join(self.figpath,
title.format(**self.d) + '.pdf')))
plt.close(fig)
@staticmethod
def command_line():
import argparse
from parser import DatcomParser
argparser = argparse.ArgumentParser()
argparser.add_argument("datcom_file",
help="the output file from datcom to parse")
args = argparser.parse_args()
parser = DatcomParser(args.datcom_file)
plotter = DatcomPlotter(parser.get_common())
plotter.common_plots()
if __name__ == "__main__":
DatcomPlotter.command_line()
|
arktools/pydatcom
|
pydatcom/plotter.py
|
Python
|
gpl-3.0
| 7,254
|
###############################
# This file is part of PyLaDa.
#
# Copyright (C) 2013 National Renewable Energy Lab
#
# PyLaDa is a high throughput computational platform for Physics. It aims to make it easier to submit
# large numbers of jobs on supercomputers. It provides a python interface to physical input, such as
# crystal structures, as well as to a number of DFT (VASP, CRYSTAL) and atomic potential programs. It
# is able to organise and launch computational jobs on PBS and SLURM.
#
# PyLaDa is free software: you can redistribute it and/or modify it under the terms of the GNU General
# Public License as published by the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
#
# PyLaDa is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along with PyLaDa. If not, see
# <http://www.gnu.org/licenses/>.
###############################
from pytest import fixture
@fixture
def doplacement():
import pylada
old = pylada.do_multiple_mpi_programs
pylada.do_multiple_mpi_programs = True
yield True
pylada.do_multiple_mpi_programs = old
def test_mpicomm(doplacement):
""" Test MPI Communicator. """
from pylada.process.mpi import Communicator, MPISizeError
root = Communicator(n=32)
for i in range(4):
root.machines["node0{0}".format(i)] = 8
newcomm = root.lend(5)
assert newcomm['n'] == 5
assert newcomm.parent() is root
assert len(newcomm.machines) == 1
assert root.machines[list(newcomm.machines.keys())[0]] == 3
assert root['n'] == 27
newcomm.cleanup()
assert newcomm['n'] == 0
assert len(newcomm.machines) == 0
assert root['n'] == 32
assert all(u == 8 for u in root.machines.values())
newcomm = root.lend(8)
assert newcomm['n'] == 8
assert sum(newcomm.machines.values()) == newcomm['n']
assert newcomm.parent() is root
assert len(newcomm.machines) == 1
key = list(newcomm.machines.keys())[0]
assert key not in root.machines
assert newcomm.machines[key] == 8
assert root['n'] == 24
newcomm.cleanup()
assert newcomm['n'] == 0
assert len(newcomm.machines) == 0
assert root['n'] == 32
assert all(u == 8 for u in root.machines.values())
newcomm = root.lend(12)
assert newcomm['n'] == 12
assert sum(newcomm.machines.values()) == newcomm['n']
assert newcomm.parent() is root
assert len(newcomm.machines) == 2
key0, key1 = newcomm.machines.keys()
if newcomm.machines[key0] != 8:
key0, key1 = key1, key0
assert newcomm.machines[key0] == 8
assert newcomm.machines[key1] == 4
assert key0 not in root.machines
assert root.machines[key1] == 4
assert root['n'] == 20
newcomm.cleanup()
assert newcomm['n'] == 0
assert len(newcomm.machines) == 0
assert root['n'] == 32
assert all(u == 8 for u in root.machines.values())
comms = root.split(4)
assert root['n'] == 0
assert len(root.machines) == 0
machines = []
for comm in comms:
assert comm['n'] == 8
assert sum(comm.machines.values()) == comm['n']
assert len(comm.machines) == 1
assert list(comm.machines.keys())[0] not in machines
machines.append(list(comm.machines.keys())[0])
for comm in comms:
comm.cleanup()
assert root['n'] == 32
assert all(u == 8 for u in root.machines.values())
comms = root.split(5)
assert root['n'] == 0
assert len(root.machines) == 0
machines = {}
for comm in comms:
assert comm['n'] in [6, 7]
assert sum(comm.machines.values()) == comm['n']
for key, value in comm.machines.items():
if key not in machines:
machines[key] = value
else:
machines[key] += value
assert sum(machines.values()) == 32
assert all(u == 8 for u in machines.values())
for comm in comms:
comm.cleanup()
assert root['n'] == 32
assert all(u == 8 for u in root.machines.values())
comms = root.split(3)
assert root['n'] == 0
assert len(root.machines) == 0
machines = {}
for comm in comms:
assert comm.parent() is root
assert comm['n'] in [10, 11]
assert sum(comm.machines.values()) == comm['n']
for key, value in comm.machines.items():
if key not in machines:
machines[key] = value
else:
machines[key] += value
assert sum(machines.values()) == 32
assert all(u == 8 for u in machines.values())
machines = comms[0].machines.copy()
for key, value in comms[1].machines.items():
if key in machines:
machines[key] += value
else:
machines[key] = value
comm = comms.pop(0)
comms[0].acquire(comm)
assert comm.parent is None
assert comm['n'] == 0
assert len(comm.machines) == 0
assert comms[0].parent() is root
assert comms[0]['n'] == sum(machines.values())
assert comms[0]['n'] == sum(comms[0].machines.values())
for key in machines:
assert machines[key] == comms[0].machines[key]
for key in comms[0].machines:
assert machines[key] == comms[0].machines[key]
for comm in comms:
comm.cleanup()
assert root['n'] == 32
assert all(u == 8 for u in root.machines.values())
try:
comm.lend(33)
except MPISizeError:
pass
else:
raise Exception()
try:
comm.split(33)
except MPISizeError:
pass
else:
raise Exception()
if __name__ == "__main__":
from sys import argv, path
from os.path import abspath
if len(argv) > 1:
path.extend(argv[1:])
test()
|
pylada/pylada-light
|
tests/process/test_mpi.py
|
Python
|
gpl-3.0
| 5,949
|
from django.conf.urls import url
import views
urlpatterns = [
url(r'^index/$', views.index),
url(r'^idc/$', views.idc),
url(r'^get_add_idc_page/$', views.get_add_idc_page),
url(r'^add_idc/$', views.add_idc),
url(r'^modify_idc_remark/$', views.modify_idc_remark),
url(r'^group/$', views.group),
url(r'^get_add_group_page/$', views.get_add_group_page),
url(r'^add_group/$', views.add_group),
url(r'^modify_group_remark/$', views.modify_group_remark),
url(r'^host_list/$', views.host_list),
url(r'^modify_host_nickname/$', views.modify_host_nickname),
url(r'^modify_host_ip/$', views.modify_host_ip),
url(r'^modify_host_inner_ip/$', views.modify_host_inner_ip),
url(r'^get_add_host_page/$', views.get_add_host_page),
url(r'^sync_host/$', views.sync_host),
]
|
SnowRomance/CMDB
|
app/urls.py
|
Python
|
gpl-3.0
| 819
|
import re
class EuFileBuf:
def __init__(self, filename):
self.filename = filename
self.indexes = []
self.createIndex()
self.fd = open(self.filename,'r')
self.lastline = {'line':0, 'txt':self.fd.readline()}
def __del__(self):
self.fd.close()
def getLen(self):
return len(self.indexes)
def getLine(self, line):
if line >= self.getLen():
return ""
if self.lastline['line'] != line :
self.fd.seek(self.indexes[line])
self.lastline = {'line':line, 'txt':self.fd.readline()}
return self.lastline['txt']
def createIndex(self):
self.indexes = []
lines = 0
offset = 0
with open(self.filename,'r') as infile:
for line in infile:
lines += 1
self.indexes.append(offset)
offset += len(line)
def search(self, regex):
indexes = []
id = 0
rr = re.compile(regex)
with open(self.filename,'r') as infile:
for line in infile:
ma = rr.match(line)
if ma:
indexes.append(id)
id += 1
return indexes
|
ceccopierangiolieugenio/EuLog
|
EuLog_mod/EuFileBuf.py
|
Python
|
gpl-3.0
| 974
|
#! /usr/bin/env python
from __future__ import print_function, division
from collections import namedtuple
"""
Copyright (C) 2016 Wesley Fraser (westhefras@gmail.com, @wtfastro)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
__author__ = 'Wesley Fraser (@wtfastro, github: fraserw <westhefras@gmail.com>), Academic email: wes.fraser@qub.ac.uk'
import imp
import os
import sys
import pylab as pyl
import scipy as sci
from scipy import optimize as opti, interpolate as interp
from scipy import signal
from . import bgFinder
# import weightedMeanSTD
try:
imp.find_module('astropy')
astropyFound = True
except ImportError:
astropyFound = False
if astropyFound:
from astropy.io import fits as pyf
else:
import pyfits as pyf
from .pill import pillPhot
from .trippy_utils import *
import time
class modelPSF:
"""
Round and moving object psf class.
The intent of this class is to provide a model PSF of both stationary and trailed point sources.
The basic profile is a moffat profile with super sampled constant look up table. Both are used
with linear convolution to calculate the trailed source PSF.
modelPSF takes as input:
-x,y are arrays of length equal to the width and height of the desired PSF image.
eg. x=numpy.arange(50), y=numpy.arange(70) would create a psf 50x70 pix
-alpha, beta are initial guesses for the moffat profile to be used. 5 and 2 are usually pretty good
-repFact is the supersampling factor. 10 is default, though for improvement in speed, 5 can be used
without much loss of PSF or photometric precision
optional arguments:
-verbose = True if you want to see a lot of unnecessary output
-restore = psf filename if you want to restore from a previously saved psf file.
The general steps for psf generation and photometry are:
-initialization
-lookup table generation
-psf generation
-line convolution
-linear aperture correction estimation
"""
def psfStore(self,fn, psfV2 = False):
"""
Store the psf into a fits file that you can view and reopen at a later point. The only option is the fits file
name.
"""
name=fn.split('.fits')[0]
if not psfV2:
HDU=pyf.PrimaryHDU(self.PSF)
hdu=pyf.ImageHDU(self.psf)
lookupHDU=pyf.ImageHDU(self.lookupTable)
lineHDU=pyf.ImageHDU(self.longPSF)
if self.aperCorrs is not None:
aperCorrHDU=pyf.ImageHDU(np.array([self.aperCorrs,self.aperCorrRadii]))
else:
aperCorrHDU=pyf.ImageHDU(np.array([[-1],[-1]]))
if self.lineAperCorrs is not None:
lineAperCorrHDU=pyf.ImageHDU(np.array([self.lineAperCorrs,self.lineAperCorrRadii]))
else:
lineAperCorrHDU=pyf.ImageHDU(np.array([[-1],[-1]]))
#distHDU=pyf.ImageHDU(np.array([self.rDist,self.fDist]))
list=pyf.HDUList([HDU,lookupHDU,lineHDU,hdu,aperCorrHDU,lineAperCorrHDU])
else:
lookupHDU=pyf.PrimaryHDU(self.lookupTable)
if self.aperCorrs is not None:
aperCorrHDU=pyf.ImageHDU(np.array([self.aperCorrs,self.aperCorrRadii]))
else:
aperCorrHDU=pyf.ImageHDU(np.array([[-1],[-1]]))
if self.lineAperCorrs is not None:
lineAperCorrHDU=pyf.ImageHDU(np.array([self.lineAperCorrs,self.lineAperCorrRadii]))
else:
lineAperCorrHDU=pyf.ImageHDU(np.array([[-1],[-1]]))
#distHDU=pyf.ImageHDU(np.array([self.rDist,self.fDist]))
list=pyf.HDUList([lookupHDU,aperCorrHDU,lineAperCorrHDU])
list[0].header.set('REPFACT',self.repFact)
for ii in range(len(self.psfStars)):
list[0].header.set('xSTAR%s'%(ii),self.psfStars[ii][0],'PSF Star x value.')
list[0].header.set('ySTAR%s'%(ii),self.psfStars[ii][1],'PSF Star y value.')
list[0].header['alpha']=self.alpha
list[0].header['beta']=self.beta
list[0].header['A']=self.A
list[0].header['rate']=self.rate
list[0].header['angle']=self.angle
list[0].header['dt']=self.dt
list[0].header['pixScale']=self.pixScale
try:
list.writeto(name + '.fits', overwrite = True)
except:
list.writeto(name + '.fits', clobber = True)
def _fitsReStore(self,fn):
"""
Hidden convenience function to restore a psf file.
"""
print('\nRestoring PSF...')
name=fn.split('.fits')[0]
with pyf.open(name+'.fits') as inHan:
#load the psf file
if len(inHan) == 6:
psfV2 = False
#load the psf file
self.PSF=inHan[0].data
self.lookupTable=inHan[1].data
self.longPSF=inHan[2].data
self.psf=inHan[3].data
self.aperCorrs=inHan[4].data[0]
self.aperCorrRadii=inHan[4].data[1]
self.lineAperCorrs=inHan[5].data[0]
self.lineAperCorrRadii=inHan[5].data[1]
else:
psfV2 = True
self.lookupTable=inHan[0].data
self.aperCorrs=inHan[1].data[0]
self.aperCorrRadii=inHan[1].data[1]
self.lineAperCorrs=inHan[2].data[0]
self.lineAperCorrRadii=inHan[2].data[1]
self.psfStars=[]
header=inHan[0].header
self.repFact=header['REPFACT']
x=header['xSTAR*']#.values()
y=header['ySTAR*']#.values()
for ii in range(len(x)):
self.psfStars.append([x[ii],y[ii]])
self.psfStars = np.array(self.psfStars)
self.alpha=header['alpha']
self.beta=header['beta']
self.A=header['A']
self.rate=header['RATE']
self.angle=header['ANGLE']
self.dt=header['DT']
self.pixScale=header['PIXSCALE']
self.boxSize=int( len(self.lookupTable)/self.repFact/2 )
#now recompute the necessary parameters
if len(self.aperCorrs)!=1:
self.aperCorrFunc=interp.interp1d(self.aperCorrRadii*1.,self.aperCorrs*1.)
if len(self.lineAperCorrs)!=1:
self.lineAperCorrFunc=interp.interp1d(self.lineAperCorrRadii*1.,self.lineAperCorrs*1.)
(A,B) = self.lookupTable.shape
self.shape = [A/self.repFact,B/self.repFact]
self.x=np.arange(self.shape[0])+0.5
self.y=np.arange(self.shape[1])+0.5
self.cent=np.array([len(self.y)/2.,len(self.x)/2.])
self.centx=self.cent[0]
self.centy=self.cent[1]
#self.psf=np.ones([len(self.y),len(self.x)]).astype('float')
self.inds=np.zeros((len(self.y),len(self.x),2)).astype('int')
for ii in range(len(self.y)):
self.inds[ii,:,1]=np.arange(len(self.x))
for ii in range(len(self.x)):
self.inds[:,ii,0]=np.arange(len(self.y))
self.coords=self.inds+np.array([0.5,0.5])
self.r=np.sqrt(np.sum((self.coords-self.cent)**2,axis=2))
self.X=np.arange(len(self.x)*self.repFact)/float(self.repFact)+0.5/self.repFact
self.Y=np.arange(len(self.y)*self.repFact)/float(self.repFact)+0.5/self.repFact
self.Inds=np.zeros((len(self.y)*self.repFact,len(self.x)*self.repFact,2)).astype('int')
for ii in range(len(self.y)*self.repFact):
self.Inds[ii,:,1]=np.arange(len(self.x)*self.repFact)
for ii in range(len(self.x)*self.repFact):
self.Inds[:,ii,0]=np.arange(len(self.y)*self.repFact)
self.Coords=(self.Inds+np.array([0.5,0.5]))/float(self.repFact)
self.R=np.sqrt(np.sum((self.Coords-self.cent)**2,axis=2))
self.genPSF()
self.fitted=True
if psfV2:
###code to generate the PSF and psf
self.PSF=self.moffat(self.R)
self.PSF/=np.sum(self.PSF)
self.psf=downSample2d(self.PSF,self.repFact)
###code to generate the line psf
self.longPSF = None
if self.rate is not None:
self.line(self.rate,self.angle,self.dt,pixScale = self.pixScale,display=False,useLookupTable=True, verbose=True)
print(' PSF restored.\n')
def __init__(self,x=-1,y=-1,alpha=-1,beta=-1,repFact=10,verbose=False,restore=False,ignoreRepFactWarning=False):
"""
Initialize the PSF.
x,y are the size of the PSF (width, height) in pixels. Can either be an integer value or a numpy.arange(x) array.
alpha, beta are the initial moffat parameters
repfact=5,10 is the supersampling factor. Only 5 and 10 are well tested!
verbose to see a bunch of unnecessary, but informative output.
restore=filename to restore a psf having the filename provided.
"""
self.nForFitting=0
self.imData=None
if repFact not in [3,5,10] and not ignoreRepFactWarning:
raise Warning('This has only been robustly tested with repFact=3, 5, or 10. I encourage you to stick with those.')
if not restore:
if type(x)==type(np.ones(1)):
if len(x)==1:
if x[0]%2==0 or x[0]%2==0:
raise Exception('Please use odd width PSFs. Even has not been tested yet.')
elif (len(x)%2==0 or len(y)%2==0):
raise Exception('Please use odd width PSFs. Even has not been tested yet.')
else:
if (x%2==0 or y%2==0):
raise Exception('Please use odd width PSFs. Even has not been tested yet.')
if restore:
self._fitsReStore(restore)
else:
self.A=None
self.alpha=alpha
self.beta=beta
self.chi=None
self.rate = None
self.angle = None
self.dt = None
self.pixScale = None
if type(x)!=type(np.ones(1)):
self.x=np.arange(x)+0.5
self.y=np.arange(y)+0.5
elif len(x)==1:
self.x=np.arange(x)+0.5
self.y=np.arange(y)+0.5
else:
self.x=x*1.0+0.5
self.y=y*1.0+0.5
self.cent=np.array([len(self.y)/2.,len(self.x)/2.])
self.centx=self.cent[0]
self.centy=self.cent[1]
self.repFact=repFact
self.psf=np.ones([len(self.y),len(self.x)]).astype('float')
self.inds=np.zeros((len(self.y),len(self.x),2)).astype('int')
for ii in range(len(self.y)):
self.inds[ii,:,1]=np.arange(len(self.x))
for ii in range(len(self.x)):
self.inds[:,ii,0]=np.arange(len(self.y))
self.coords=self.inds+np.array([0.5,0.5])
self.r=np.sqrt(np.sum((self.coords-self.cent)**2,axis=2))
self.X=np.arange(len(self.x)*self.repFact)/float(self.repFact)+0.5/self.repFact
self.Y=np.arange(len(self.y)*self.repFact)/float(self.repFact)+0.5/self.repFact
self.Inds=np.zeros((len(self.y)*self.repFact,len(self.x)*self.repFact,2)).astype('int')
for ii in range(len(self.y)*self.repFact):
self.Inds[ii,:,1]=np.arange(len(self.x)*self.repFact)
for ii in range(len(self.x)*self.repFact):
self.Inds[:,ii,0]=np.arange(len(self.y)*self.repFact)
self.Coords=(self.Inds+np.array([0.5,0.5]))/float(self.repFact)
self.R=np.sqrt(np.sum((self.Coords-self.cent)**2,axis=2))
self.PSF=self.moffat(self.R)
self.PSF/=np.sum(self.PSF)
self.psf=downSample2d(self.PSF,self.repFact)
self.fullPSF=None
self.fullpsf=None
self.shape=self.psf.shape
self.aperCorrFunc=None
self.aperCorrs=None
self.aperCorrRadii=None
self.lineAperCorrFunc=None
self.lineAperCorrs=None
self.lineAperCorrRadii=None
self.verbose=verbose
self.fitted=False
self.lookupTable=None
self.lookupF=None
self.lookupR=None
#self.rDist=None
#self.fDist=None
self.line2d=None
self.longPSF=None
self.longpsf=None
self.bgNoise=None
#from fitting a psf to a source
self.model=None
self.residual=None
self.psfStars=None
def computeRoundAperCorrFromPSF(self,radii,useLookupTable=True,display=True,displayAperture=True):
"""
This computes the aperture correction directly from the PSF. These vaules will be used for interpolation to
other values. The aperture correction is with respect tothe largest aperture provided in radii. I recommend
4*FWHM.
radii is an array of radii on which to calculate the aperture corrections. I recommend at least 10 values
between 1 and 4 FWHM.
useLookupTable=True/False to calculate either with just the moffat profile, or with lookuptable included.
display=True to show you some plots.
displayAperture=True to show you the aperture at each radius.
"""
self.aperCorrRadii=radii*1.0
aperCorrs=[]
(A,B)=self.PSF.shape
if useLookupTable:
phot=pillPhot(self.fullPSF,repFact=1)
else:
phot=pillPhot(self.PSF,repFact=1)
"""
#old individual radii call version
for iii in range(len(self.aperCorrRadii)):
r=radii[iii]
width=A/2#int(A/(r*self.repFact*2)+0.5)*0.75
phot(B/2.,A/2.,radius=r*self.repFact,l=0.,a=0.,skyRadius=None,zpt=0.0,width=width,display=displayAperture)
m=phot.magnitude
aperCorrs.append(m)
"""
#more efficient version with all radii passed at once.
width=int(A/2)
phot(B / 2., A / 2., radius=radii * self.repFact, l=0., a=0., skyRadius=None, zpt=0.0, width=width,
display=displayAperture)
aperCorrs = phot.magnitude
self.aperCorrs=np.array(aperCorrs)
self.aperCorrFunc=interp.interp1d(self.aperCorrRadii*1.,self.aperCorrs*1.)
if display:
fig=pyl.figure('psf')
pyl.plot(self.aperCorrRadii,self.aperCorrs,'k-o')
pyl.xlabel('Aperture Radius (pix')
pyl.ylabel('Normalized Magnitude')
pyl.show()
#still need to implement this!
def roundAperCorr(self,r):
"""
Return an aperture correction at given radius. Linear interpolation between values found in
computeRoundAperCorrFromPSF is used.
"""
if self.aperCorrFunc!=None:
return self.aperCorrFunc(r)-np.min(self.aperCorrs)
else:
raise Exception('Must first fun computeRoundAperCorrFromPSF before the aperture corrections can be evaluated here.')
def computeLineAperCorrFromTSF(self,radii,l,a,display=True,displayAperture=True):
"""
This computes the aperture correction directly from the TSF. These vaules will be used for interpolation to
other values. The aperture correction is with respect tothe largest aperture provided in radii. I recommend
4*FWHM.
radii is an array of radii on which to calculate the aperture corrections. I recommend at least 10 values
between 1 and 4 FWHM.
l and a are the length (in pixels) and angle of the pill aperture
useLookupTable=True/False to calculate either with just the moffat profile, or with lookuptable included.
display=True to show you some plots.
displayAperture=True to show you the aperture at each radius.
"""
self.lineAperCorrRadii=radii*1.0
self.lineAperCorrs=[]
(A,B)=self.PSF.shape
phot=pillPhot(self.longPSF,repFact=1)
"""
#old version where all radii are passed individually
for ii in range(len(self.lineAperCorrRadii)):
r=self.lineAperCorrRadii[ii]
width=A/2#int(A/(r*self.repFact*2))
phot(B/2.,A/2.,radius=r*self.repFact,l=l*self.repFact,a=a,skyRadius=None,zpt=0.0,width=width,display=displayAperture)
m=phot.magnitude
print ' ',r,phot.sourceFlux,m
self.lineAperCorrs.append(m)
"""
#new version where all radii are passed at once
width = int(A / 2)
phot(B / 2., A / 2., radius=radii * self.repFact, l=l * self.repFact, a=a, skyRadius=None, zpt=0.0, width=width,
display=displayAperture)
fluxes = phot.sourceFlux
self.lineAperCorrs = phot.magnitude
print(" Radius Flux Magnitude")
for ii in range(len(self.lineAperCorrRadii)):
print(' {:6.2f} {:10.3f} {:8.3f}'.format(radii[ii],phot.sourceFlux[ii],phot.magnitude[ii]))
self.lineAperCorrs=np.array(self.lineAperCorrs)
self.lineAperCorrFunc=interp.interp1d(self.lineAperCorrRadii,self.lineAperCorrs)
if display:
fig=pyl.figure('psf')
pyl.plot(self.lineAperCorrRadii,self.lineAperCorrs,'k-o')
pyl.xlabel('Aperture Radius (pix')
pyl.ylabel('Normalized Magnitude')
pyl.show()
def lineAperCorr(self,r):
"""
Return an aperture correction at given radius. Linear interpolation between values found in
computeRoundAperCorrFromTSF is used.
"""
if self.lineAperCorrFunc!=None:
return self.lineAperCorrFunc(r)-np.min(self.lineAperCorrs)
else:
raise Exception('Must first fun computeLineAperCorrFromMoffat before the aperture corrections can be evaluated here.')
def moffat(self,rad):
"""
Return a moffat profile evaluated at the radii in the input numpy array.
"""
#normalized flux profile return 1.-(1.+(rad/self.alpha)**2)**(1.-self.beta)
a2=self.alpha*self.alpha
return (self.beta-1)*(np.pi*a2)*(1.+(rad/self.alpha)**2)**(-self.beta)
def FWHM(self, fromMoffatProfile=False, fromImData = False, method = 'median',frac = 0.5):
"""
Return the moffat profile of the PSF. If fromMoffatProfile=True. the FWHM from a pure
moffat profile is returned. Otherwise the FWHM of the combined moffat profile and lookup
table is used. That is, from the full PSF.
If fromImData=True, then the FWHM is estimated from the image data passed to fitMoffat
function. This option requires that fitMoffat has been run on the source in question
before the FWHM(fromImData=True) is called.
When estimating FWHM from the PSF, or from image data, a running mean/median of pixels
sortred by radius from the centre is used. The width of the running window is 3*repFact.
The method variable takes either "median" or "mean" as input to decide which method is
used in the running window. Default is median.
"""
if fromMoffatProfile:
r=np.arange(0,(2*max(self.x.shape[0]/2.,self.y.shape[0]/2.)**2)**0.5,0.005)
m=self.moffat(r)
m/=np.max(m)
k=np.sum(np.greater(m,frac))
if k<0 or k>=len(m): return None
return r[k]*2.
else:
if fromImData:
im = self.repSubsec-self.bg/(self.repFact*self.repFact)
repRads = self.repRads
else:
im = self.fullPSF
a = self.y.shape[0]/2.
b = self.x.shape[0]/2.
rangeY = np.arange(-a*self.repFact,a*self.repFact)/float(self.repFact)
rangeX = np.arange(-b*self.repFact,b*self.repFact)/float(self.repFact)
dx2 = (0.5/self.repFact-rangeX)**2
repRads = []
for ii in range(len(rangeY)):
repRads.append((0.5/self.repFact-rangeY[ii])**2+dx2)
repRads = np.array(repRads)**0.5
if method not in ['median','mean']:
raise TypeError('Method must be either median or mean.')
numMedPix = self.repFact*3
#below steps through the pixels taking numMedPix
(A,B) = repRads.shape
rr = repRads.reshape(A*B)
rim = im.reshape(A*B)
s = np.max(im)
args = np.argsort(rr)
for ii in range(len(args)-numMedPix):
if method == 'median':
med_i = np.median(rim[args[ii:ii+numMedPix]])
med_r = np.median(rr[args[ii:ii+numMedPix]])
else:
med_i = np.mean(rim[args[ii:ii+numMedPix]])
med_r = np.mean(rr[args[ii:ii+numMedPix]])
if med_i<=frac*s:
return med_r*2.0
if method == 'median':
return np.mean(r[-numMedPix])*2.0
else:
return np.median(r[-numMedPix])*2.0
"""
#below cylces through a preset radius array
#this is probably less accurate than the above version
(A,B) = repRads.shape
r = np.arange(0.0,min(np.max(repRads[int(A/2),:]),np.max(repRads[:,int(B/2)]))+0.05,0.05)
s = np.max(im)
ind = 0
while r[ind+1]<min(np.max(repRads[int(A/2),:]),np.max(repRads[:,int(B/2)])) and ind+1<len(r):
w = np.where((repRads<r[ind+1])&(repRads>r[ind]))
if len(w[0])>=numMedPix:
med = np.median(im[w])
if med<=0.5*s:
return r[ind+1]*2.0
ind+=1
return r[len(r)-1]*2.0
"""
def __getitem__(self,key):
return self.psf[key]
def line(self,rate,angle,dt,pixScale=0.2,display=False,useLookupTable=True, verbose=True):
"""
Compute the TSF given input rate of motion, angle of motion, length of exposure, and pixelScale.
Units choice is irrelevant, as long as they are all the same! eg. rate in "/hr, and dt in hr.
Angle is in degrees +-90 from horizontal.
display=True to see the TSF
useLookupTable=True to use the lookupTable. OTherwise pure moffat is used.
"""
self.rate=rate
self.angle=angle
self.dt=dt
self.pixScale=pixScale
angr=angle*np.pi/180.
self.line2d=self.PSF*0.0
w=np.where(( np.abs(self.X-self.centx)<np.cos(angr)*rate*dt/pixScale/2.))
if len(w[0])>0:
x=self.X[w]*1.0
y=np.tan(angr)*(x-self.centx)+self.centy
X=(x*self.repFact).astype('int')
Y=(y*self.repFact).astype('int')
self.line2d[Y,X]=1.0
w=np.where(self.line2d>0)
yl,yh=np.min(w[0]),np.max(w[0])
xl,xh=np.min(w[1]),np.max(w[1])
self.line2d=self.line2d[yl:yh+1,xl:xh+1]
else:
self.line2d=np.array([[1.0]])
if useLookupTable:
if verbose:
print('Using the lookup table when generating the line PSF.')
#self.longPSF=signal.convolve2d(self.moffProf+self.lookupTable*self.repFact*self.repFact, self.line2d,mode='same')
self.longPSF=signal.fftconvolve(self.moffProf+self.lookupTable*self.repFact*self.repFact, self.line2d,mode='same')
self.longPSF*=np.sum(self.fullPSF)/np.sum(self.longPSF)
else:
if verbose:
print('Not using the lookup table when generating the line PSF')
#self.longPSF=signal.convolve2d(self.moffProf,self.line2d,mode='same')
self.longPSF=signal.fftconvolve(self.moffProf,self.line2d,mode='same')
self.longPSF*=np.sum(self.moffProf)/np.sum(self.longPSF)
self.longpsf=downSample2d(self.longPSF,self.repFact)
if display:
fig=pyl.figure('Line PSF')
pyl.imshow(self.longPSF,interpolation='nearest',origin='lower')
pyl.show()
def plant_old(self, x, y, amp, indata,
useLinePSF=False, returnModel=False, verbose=False,
addNoise=True, plantIntegerValues=False, gain=None, plantBoxWidth = None):
"""
keeping this for testing purposes only.
"""
rf2 = float(self.repFact*self.repFact)
#self.boxSize=len(self.lookupTable)/self.repFact/2
self.boxSize = int(len(self.R[0])/self.repFact/2)
#t1 = time.time()
(A,B) = indata.shape
bigIn = np.zeros((A+2*self.boxSize,B+2*self.boxSize),dtype=indata.dtype)
bigOut = np.zeros((A+2*self.boxSize,B+2*self.boxSize),dtype=indata.dtype)
bigIn[self.boxSize:A+self.boxSize,self.boxSize:B+self.boxSize] = indata
#t2 = time.time()
xint,yint = int(x)-self.boxSize,int(y)-self.boxSize
cx,cy = x-int(x)+self.boxSize,y-int(y)+self.boxSize
sx,sy = int(round((x-int(x))*self.repFact)),int(round((y-int(y))*self.repFact))
cut = np.copy(bigIn[self.boxSize+yint:yint+3*self.boxSize+1,self.boxSize+xint:self.boxSize+xint+3*self.boxSize+1])
if self.imData is not None:
origData = np.copy(self.imData)
else: origData = None
self.imData = cut
if type(cx)==type(1.0):
self._flatRadial(np.array([cx]),np.array([cy]))
else:
self._flatRadial(cx,cy)
if origData is not None:
self.imData = origData
#t2a = time.time()
if not useLinePSF:
###original moffat profile creation
#don't need to shift this up and right because the _flatRadial function handles the moffat sub-pixel centering.
#moff=downSample2d(self.moffat(self.repRads),self.repFact)*amp
if self.lookupTable is not None:
#(pa,pb)=moff.shape
#shift the lookuptable right and up to account for the off-zero centroid
slu = np.copy(self.lookupTable)
(a,b) = slu.shape
if sx>0:
sec = slu[:,b-sx:]
slu[:,sx:] = slu[:,:b-sx]
slu[:,:sx] = sec
if sy>0:
sec = slu[a-sy:,:]
slu[sy:,:] = slu[:a-sy,:]
slu[:sy,:] = sec
###original lookup table creation
#slu = downSample2d(slu,self.repFact)*amp*self.repFact*self.repFact
###this is a merger of the original moffat and lookup table lines above.
###results in a significant performance boost.
psf = downSample2d(slu+self.moffat(self.repRads)/rf2,self.repFact)*amp*rf2
###original sum of lookup table and moffat profile.
###not needed in the newer performance boosted version.
#psf = slu+moff
else:
psf = moff
if verbose: print("Lookup table is none. Just using Moffat profile.")
else:
lpsf = np.copy(self.longPSF)
(a,b) = lpsf.shape
#cubic interpolation doesn't do as good as the x10 subsampling
#quintic does just about as well, linear sucks
#f=sci.interpolate.interp2d(self.dx,self.dy,downSample2d(lpsf,self.repFact),kind='linear')
#psf=f(self.dx-float(sx)/self.repFact,self.dy-float(sy)/self.repFact)*amp
if sx>0:
sec = lpsf[:,b-sx:]
lpsf[:,sx:] = lpsf[:,:b-sx]
lpsf[:,:sx] = sec
if sy>0:
sec = lpsf[a-sy:,:]
lpsf[sy:,:] = lpsf[:a-sy,:]
lpsf[:sy,:] = sec
psf=downSample2d(lpsf,self.repFact)*amp
#this is a cheat to handle the outer edges that can go negative after convolution
w=np.where(psf<0)
psf[w]=0.0
self.fitFluxCorr=1. #HACK! Could get rid of this in the future...
#t3 = time.time()
(a,b) = psf.shape
if addNoise:
if gain is not None:
psf+=sci.randn(a,b)*np.sqrt(np.abs(psf)/float(gain) )
#old poisson experimenting
#psfg = (psf+bg)*gain
#psf = (np.random.poisson(np.clip(psfg,0,np.max(psfg))).astype('float64')/gain).astype(indata.dtype)
else:
print()
print("Please set the gain variable before trying to plant with Poisson noise.")
print()
raise TypeError
if plantIntegerValues:
psf = np.round(psf)
#t4 = time.time()
bigOut[yint+self.boxSize:yint+3*self.boxSize+1,xint+self.boxSize:xint+3*self.boxSize+1]+=psf
if returnModel:
return bigOut[self.boxSize:A+self.boxSize,self.boxSize:B+self.boxSize]
if plantBoxWidth is not None:
a = max(0,int(y)-plantBoxWidth)
b = min(A,int(y)+plantBoxWidth+1)
c = max(0,int(x)-plantBoxWidth)
d = min(B,int(x)+plantBoxWidth+1)
indata[a:b,c:d] += bigOut[self.boxSize:A + self.boxSize, self.boxSize:B + self.boxSize][a:b,c:d]
#indata[int(y)-plantBoxWidth:int(y)+plantBoxWidth+1,int(x)-plantBoxWidth:int(x)+plantBoxWidth] += bigOut[self.boxSize:A + self.boxSize, self.boxSize:B + self.boxSize][int(y)-plantBoxWidth:int(y)+plantBoxWidth+1,int(x)-plantBoxWidth:int(x)+plantBoxWidth]
else:
indata+=bigOut[self.boxSize:A+self.boxSize, self.boxSize:B+self.boxSize]
#t5 = time.time()
#print(t5-t4,t4-t3,t3-t2a,t2a-t2,t2-t1)
return indata
def plant(self, x_in, y_in, amp_in, indata,
useLinePSF=False, returnModel=False, verbose=False,
addNoise=True, plantIntegerValues=False, gain=None, plantBoxWidth = None):
"""
Plant a star at coordinates x_in,y_in with amplitude amp_in.
--x_in, y_in, and amp_in can be individual values, or 1D arrays of values.
-indata is the array in which you want to plant the source. Recommend passing as np.copy(indata)
-addNoise=True to add gaussian noise. gain variable must be set.
-gain must be manually set if adding noise.
-useLinePSF=True to use the TSF rather than the circular PSF.
-returnModel=True to not actually plant in the data, but return an array of the same size containing the TSF or
PSF without noise added.
-plantBoxWidth is the width of the planting region in pixels centred on the source location. If this is set to a
value, then the planted source pixels will only be within a box of width 2*plantBoxWidth+1. Only applies to a
single plant location. If more than one plant location is assigned, the entire image is altered.
-plant integer values will round all float values before adding to input data.
-verbose will do this all verboselly.
"""
if not hasattr(x_in,'__len__'):
x_in,y_in,amp_in = np.array([x_in]),np.array([y_in]),np.array([amp_in])
rf2 = float(self.repFact*self.repFact)
#self.boxSize=len(self.lookupTable)/self.repFact/2
self.boxSize = int(len(self.R[0])/self.repFact/2)
#t1 = time.time()
(A,B) = indata.shape
bigIn = np.zeros((A+2*self.boxSize,B+2*self.boxSize),dtype=indata.dtype)
bigOut = np.zeros((A+2*self.boxSize,B+2*self.boxSize),dtype=indata.dtype)
bigIn[self.boxSize:A+self.boxSize,self.boxSize:B+self.boxSize] = indata
if self.imData is not None:
origData = np.copy(self.imData)
else: origData = None
for ii in range(len(x_in)):
x,y,amp = x_in[ii],y_in[ii],amp_in[ii]
#t2 = time.time()
xint,yint = int(x)-self.boxSize,int(y)-self.boxSize
cx,cy = x-int(x)+self.boxSize,y-int(y)+self.boxSize
sx,sy = int(round((x-int(x))*self.repFact)),int(round((y-int(y))*self.repFact))
cut = np.copy(bigIn[self.boxSize+yint:yint+3*self.boxSize+1,self.boxSize+xint:self.boxSize+xint+3*self.boxSize+1])
self.imData = cut
if type(cx)==type(1.0):
self._flatRadial(np.array([cx]),np.array([cy]))
else:
self._flatRadial(cx,cy)
if origData is not None:
self.imData = origData
#t2a = time.time()
if not useLinePSF:
###original moffat profile creation
#don't need to shift this up and right because the _flatRadial function handles the moffat sub-pixel centering.
#moff=downSample2d(self.moffat(self.repRads),self.repFact)*amp
if self.lookupTable is not None:
#(pa,pb)=moff.shape
#shift the lookuptable right and up to account for the off-zero centroid
slu = np.copy(self.lookupTable)
(a,b) = slu.shape
if sx>0:
sec = slu[:,b-sx:]
slu[:,sx:] = slu[:,:b-sx]
slu[:,:sx] = sec
if sy>0:
sec = slu[a-sy:,:]
slu[sy:,:] = slu[:a-sy,:]
slu[:sy,:] = sec
###original lookup table creation
#slu = downSample2d(slu,self.repFact)*amp*self.repFact*self.repFact
###this is a merger of the original moffat and lookup table lines above.
###results in a significant performance boost.
psf = downSample2d(slu+self.moffat(self.repRads)/rf2,self.repFact)*amp*rf2
###original sum of lookup table and moffat profile.
###not needed in the newer performance boosted version.
#psf = slu+moff
else:
psf = moff
if verbose: print("Lookup table is none. Just using Moffat profile.")
else:
lpsf = np.copy(self.longPSF)
(a,b) = lpsf.shape
#cubic interpolation doesn't do as good as the x10 subsampling
#quintic does just about as well, linear sucks
#f=sci.interpolate.interp2d(self.dx,self.dy,downSample2d(lpsf,self.repFact),kind='linear')
#psf=f(self.dx-float(sx)/self.repFact,self.dy-float(sy)/self.repFact)*amp
if sx>0:
sec = lpsf[:,b-sx:]
lpsf[:,sx:] = lpsf[:,:b-sx]
lpsf[:,:sx] = sec
if sy>0:
sec = lpsf[a-sy:,:]
lpsf[sy:,:] = lpsf[:a-sy,:]
lpsf[:sy,:] = sec
psf=downSample2d(lpsf,self.repFact)*amp
#this is a cheat to handle the outer edges that can go negative after convolution
w=np.where(psf<0)
psf[w]=0.0
bigOut[yint+self.boxSize:yint+3*self.boxSize+1,xint+self.boxSize:xint+3*self.boxSize+1]+=psf
self.fitFluxCorr=1. #HACK! Could get rid of this in the future...
#t3 = time.time()
(a,b) = psf.shape
if addNoise:
if gain is not None:
bigOut+=sci.randn(bigOut.shape[0],bigOut.shape[1])*np.sqrt(np.abs(bigOut)/float(gain) )
#old poisson experimenting
#psfg = (psf+bg)*gain
#psf = (np.random.poisson(np.clip(psfg,0,np.max(psfg))).astype('float64')/gain).astype(indata.dtype)
else:
print("Please set the gain variable before trying to plant with Poisson noise.")
raise TypeError
if plantIntegerValues:
bigOut = np.round(bigOut)
#t4 = time.time()
if returnModel:
return bigOut[self.boxSize:A+self.boxSize,self.boxSize:B+self.boxSize]
if plantBoxWidth is not None and len(x_in) == 1:
x,y,amp = x_in,y_in,amp_in
a = max(0,int(y)-plantBoxWidth)
b = min(A,int(y)+plantBoxWidth+1)
c = max(0,int(x)-plantBoxWidth)
d = min(B,int(x)+plantBoxWidth+1)
indata[a:b,c:d] += bigOut[self.boxSize:A + self.boxSize, self.boxSize:B + self.boxSize][a:b,c:d]
#indata[int(y)-plantBoxWidth:int(y)+plantBoxWidth+1,int(x)-plantBoxWidth:int(x)+plantBoxWidth] += bigOut[self.boxSize:A + self.boxSize, self.boxSize:B + self.boxSize][int(y)-plantBoxWidth:int(y)+plantBoxWidth+1,int(x)-plantBoxWidth:int(x)+plantBoxWidth]
else:
indata+=bigOut[self.boxSize:A+self.boxSize, self.boxSize:B+self.boxSize]
#t5 = time.time()
#print(t5-t4,t4-t3,t3-t2a,t2a-t2,t2-t1)
return indata
def remove(self,x,y,amp,data,useLinePSF=False):
"""
The opposite of plant.
"""
self.model = self.plant(x,y,amp,data,addNoise=False,returnModel=True,useLinePSF=useLinePSF)
return data-self.model
def writeto(self,name):
"""
Convenient file saving function to save the round PSF. Probably not necessary.
"""
try:
os.remove(name)
except: pass
HDU=pyf.PrimaryHDU(self.psf)
List=pyf.HDUList([HDU])
List.writeto(name)
def fitMoffat(self, imData, centX, centY,
boxSize=25, bgRadius=20,
verbose=False, mode='smart',
quickFit = False, fixAB=False,
fitXY=False, fitMaxRadius=None, logRadPlot=False,
ftol = 1.49012e-8, maxfev = 250):
"""
Fit a moffat profile to the input data, imData, at point centX,centY.
- boxSize is the width around the centre used in the fitting.
- bgRadius is the radius beyond which the background is estimated.
This must be smaller than the PSF width itself used when initializing
the modelPSF object (parameters x and y).
- verbose=True to see a lot of fittnig output and a radial plot of each fit.
- logRadPlot=True to see the plot in log radius.
- mode='smart' is the background determination method used. See bgFinder for details.
if mode is set to a floating point number, the background will be fixed to that
value during the fitting procedure.
- fixAB=True to fit only the amplitude.
- fitXY=False *** this is currently not implemented***
- fitMaxRadius ***not currently implemented***
"""
self.verbose = verbose
self.imData = np.copy(imData)
self.boxSize = boxSize
self._flatRadial(centX-0.5,centY-0.5)#set the radial distribution pixels
w = np.where(self.rads>bgRadius)
#bgf = bgFinder.bgFinder(self.subSec[w])
#self.bg = bgf(method=mode)
if not (isinstance(mode, float) or isinstance(mode, int)):
bgf = bgFinder.bgFinder(self.subSec[w])
self.bg = bgf(method=mode)
else:
self.bg = float(mode) # doing this for safety
peakGuess_1 = (np.max(self.subSec)-self.bg)/(np.max(self.moffat(self.rads)))
peakGuess_2 = (np.sum(self.subSec)-self.bg*self.subSec.size)/(np.sum(self.moffat(self.rads)))
if (abs(peakGuess_1-peakGuess_2)/peakGuess_1)<0.5:
peakGuess = peakGuess_1
else:
peakGuess = peakGuess_2
if fitXY:
print('This is hacky and really slow. Not yet meant for production.')
self.verbose = False
best = [1.e8,-1.,-1.,-1.]
print('Fitting XYA')
deltaX = np.arange(-0.3,0.3+1./float(self.repFact),1./float(self.repFact)/2.)
deltaY = np.arange(-0.3,0.3+1./float(self.repFact),1./float(self.repFact)/2.)
for ii in range(len(deltaX)):
for jj in range(len(deltaY)):
self._flatRadial(centX+deltaX[ii],centY+deltaY[jj])
lsqf = opti.leastsq(self._residFAB,(peakGuess),args=(self.alpha,self.beta,fitMaxRadius),maxfev=maxfev)
res = np.sum(self._residFAB((lsqf[0][0]),self.alpha,self.beta,fitMaxRadius)**2)
if best[0]>= res:
best = [res,lsqf[0],deltaX[ii],deltaY[jj]]
return (best[2],best[3])
elif fixAB:
lsqf = opti.leastsq(self._residFAB,(peakGuess),args=(self.alpha,self.beta,fitMaxRadius),maxfev=maxfev)
elif quickFit:
lsqf = opti.leastsq(self._residNoRep,(peakGuess,self.alpha,self.beta),args=(fitMaxRadius),maxfev=maxfev,ftol=ftol)
else:
lsqf = opti.leastsq(self._resid,(peakGuess,self.alpha,self.beta),args=(fitMaxRadius),maxfev=maxfev,ftol=ftol)
if self.verbose: print(lsqf)
self.A = lsqf[0][0]
if not fixAB:
self.alpha = lsqf[0][1]
self.beta = lsqf[0][2]
if fixAB:
res=self._residFAB((self.A),self.alpha,self.beta,fitMaxRadius)
else:
res=self._resid((self.A,self.alpha,self.beta),fitMaxRadius)
self.chi = np.sqrt(np.sum(res**2)/float(len(res)-1))
self.chiFluxNorm = np.sqrt(np.sum((res/self.A)**2)/float(len(res)-1))
self.fitted = True
self.PSF = self.moffat(self.R)
self.PSF /= np.sum(self.PSF)
self.psf = downSample2d(self.PSF,self.repFact)
if self.verbose:
print(' A:%s, alpha:%s, beta:%s'%(self.A,self.alpha,self.beta))
fig = pyl.figure('Radial Profile')
ax = fig.add_subplot(111)
pyl.scatter(downSample2d(self.repRads,self.repFact),self.subSec)
r = np.linspace(0,np.max(self.rads),100)
pyl.plot(r,self.A*self.moffat(r)+self.bg,'r--')
fw = self.FWHM(fromMoffatProfile=True)
print('FWHM: {}'.format(fw))
pyl.title('FWHM: {:.3f} alpha: {:.3f} beta: {:.3f}'.format(fw,self.alpha,self.beta))
if logRadPlot: ax.set_xscale('log')
pyl.show()
return res
def genLookupTable(self, imData, centXs, centYs,
verbose=False, bpMask=None, threeSigCut=True,
bgRadius=20., returnAmpsCutouts = False,
bgMode = 'smart'):
"""
Generate the lookup table from input imData and x/y coordinates in the numpy arrays centX,centY.
verbose=True to see a lot of fitting output.
bpMask=array to provide a bad pixel mask.
threeSigCut=True to apply a 3 sigma cut before reporting the mean lookupTable. Only useful for ~5 or more stars.
returnAmpsCutouts returns the fitted amplitudes of each moffat fit and the image cutouts, and the centroid x and y in each cutout
"""
#(AD,BD) = imData.shape
adjCentXs=centXs-0.5
adjCentYs=centYs-0.5
self.verbose=verbose
self.imData=imData*1.0
self.boxSize=int(len(self.R[0])/self.repFact/2)
self.psfStars=[]
if bpMask!=None:
w=np.where(bpMask==0)
imData[w]=np.median(imData)
shiftIms=[]
fluxes=[]
cutouts = []
cxs = []
cys = []
bgs = []
#print centXs,len(centXs)
for ii in range(len(centXs)):
#store the psf star location
self.psfStars.append([centXs[ii],centYs[ii]])
xint,yint=int(adjCentXs[ii])-self.boxSize-2,int(adjCentYs[ii])-self.boxSize-2
#if xint<=0 or yint<=0 or xint+2*self.boxSize+5>=BD or yint+2*self.boxSize+5>=BD: continue
cx,cy=adjCentXs[ii]-int(adjCentXs[ii])+self.boxSize+2,adjCentYs[ii]-int(adjCentYs[ii])+self.boxSize+2
cx+=0.5
cy+=0.5
cut=imData[yint:yint+2*self.boxSize+5,xint:xint+2*self.boxSize+5]
(cA,cB) = cut.shape
if cA!=2*self.boxSize+5 or cB!=2*self.boxSize+5: continue
self.fitMoffat(cut,np.array([cx]),np.array([cy]),self.boxSize,verbose=verbose,fixAB=True,fitXY=False,fitMaxRadius=3.,bgRadius=bgRadius,mode=bgMode)
self.imData=np.copy(imData) #this is necessary because the imdata gets set to the shifted image subsection
moff=downSample2d(self.moffat(self.repRads),self.repFact)*self.A
if returnAmpsCutouts:
cutouts.append(np.copy(cut))
cxs.append(cx)
cys.append(cy)
bgs.append(self.bg)
diff=cut-self.bg
diff[2:-2,2:-2]-=moff
fluxes.append(self.A)
self.psfStars[ii].append(self.A)
repCut=expand2d(diff,self.repFact)
cx,cy=adjCentXs[ii]-int(adjCentXs[ii])+self.boxSize+2,adjCentYs[ii]-int(adjCentYs[ii])+self.boxSize+2
kx,ky=int(round(cx*self.repFact)),int(round(cy*self.repFact))
shiftedImage=repCut[ky-self.repFact*self.boxSize:ky+self.repFact*self.boxSize+self.repFact,
kx-self.repFact*self.boxSize:kx+self.repFact*self.boxSize+self.repFact]
shiftIms.append(shiftedImage)
shiftIms=np.array(shiftIms)
fluxes=np.array(fluxes)
self.maxFlux=1.0
invFluxes=self.maxFlux/fluxes
for ii in range(len(shiftIms)):
shiftIms[ii]*=invFluxes[ii]
if threeSigCut:
meanLUT=np.median(shiftIms,axis=0)
stdLUT=np.std(shiftIms,axis=0)
bigMean=np.repeat(np.array([meanLUT]),len(shiftIms),axis=0)
w=np.where( np.abs(bigMean-shiftIms)>3*stdLUT)
shiftIms[w]=np.nan
self.lookupTable=np.nanmean(shiftIms,axis=0)/self.maxFlux
else:
self.lookupTable=np.nanmean(shiftIms,axis=0)/self.maxFlux
self.psfStar=np.array(self.psfStars)
self.genPSF()
if returnAmpsCutouts:
return (fluxes,cutouts,cxs,cys,bgs)
return None
def genPSF(self,A=1.0):
"""
generate the psf with lookup table. Convenience function only.
"""
self.moffProf=self.moffat(self.R-np.min(self.R))
self.fullPSF=(self.moffProf+self.lookupTable*self.repFact*self.repFact)*A
self.fullpsf=downSample2d(self.fullPSF,self.repFact)
def _flatRadial(self,centX,centY):
"""
Convenience function for the fitMoffat routines.
"""
if type(centX)!=type(1.) and type(centX)!=type(np.float64(1.)):
centX=centX[0]
centY=centY[0]
(A,B)=self.imData.shape
a=int(max(0,centY-self.boxSize))
b=int(min(A,centY+self.boxSize+1))
c=int(max(0,centX-self.boxSize))
d=int(min(B,centX+self.boxSize+1))
self.subSec=self.imData[a:b,c:d]
self.repSubsec=expand2d(self.subSec,self.repFact)
rangeY=np.arange(a*self.repFact,b*self.repFact)/float(self.repFact)
rangeX=np.arange(c*self.repFact,d*self.repFact)/float(self.repFact)
dx2=(centX-rangeX)**2
####slow version kept for clarity
#repRads=[]
#for ii in range(len(rangeY)):
# repRads.append((centY-rangeY[ii])**2+dx2)
#self.repRads=np.array(repRads)**0.5
#####
#this is the faster version that produces the same result
dy2 = (centY-rangeY)**2
self.repRads = (np.repeat(dy2,len(rangeY)).reshape(len(rangeY),len(rangeX)) + np.repeat(np.array([dx2]),len(rangeY),axis = 0).reshape(len(rangeY),len(rangeX)))**0.5
self.dX=centX-rangeX
self.dY=centY-rangeY
self.dx=centX-np.arange(c,d)
self.dy=centY-np.arange(a,b)
#there are more efficient ways to do this, but I leave it like this for clarity.
#subSec=[]
#arrR=[]
#for ii in range(a,b):
# arrR.append([])
# for jj in range(c,d):
# D=((centY-ii)**2+(centX-jj)**2)**0.5
#
# arrR[-1].append(D)
##faster version of the above just like done with repRads a 20 lines up.
arrR = []
dy2 = (centY - np.arange(a, b)) ** 2
dx2 = (centX - np.arange(c, d)) ** 2
for ii in range(len(dy2)):
arrR.append(dy2[ii] + dx2)
arrR = np.array(arrR) ** 0.5
#subSecFlat=self.subSec.reshape((b-a)*(c-d))
self.rads=np.copy(arrR)
#arrR=arrR.reshape((b-a)*(d-c))
#arg=np.argsort(arrR)
#self.rDist=arrR[arg]*1.
#self.fDist=subSecFlat[arg]*1.
def _resid(self,p,maxRad):
(A,alpha,beta)=p
self.alpha=alpha
self.beta=beta
err=(self.subSec-(self.bg+A*downSample2d(self.moffat(self.repRads),self.repFact))).reshape(self.subSec.size)
if self.alpha<=0 or self.beta<=0:
if maxRad is not None:
w = np.where(self.rads.reshape(self.subSec.size)<maxRad)
return err[w]
return err*np.inf
if self.verbose: print(A,alpha,beta,np.sqrt(np.sum(err**2)/(self.subSec.size-1.)))
if maxRad is not None:
w = np.where(self.rads.reshape(self.subSec.size)<maxRad)
return err[w]
return err
def _residNoRep(self,p,maxRad):
(A,alpha,beta)=p
self.alpha=alpha
self.beta=beta
err=(self.subSec-(self.bg+A*self.moffat(self.rads))).reshape(self.subSec.size)
if self.alpha<0 or self.beta<0:
if maxRad is not None:
w = np.where(self.rads.reshape(self.subSec.size)<maxRad)
return err[w]
return err*np.inf
if self.verbose: print(A,alpha,beta,np.sqrt(np.sum(err**2)/(self.subSec.size-1.)))
if maxRad is not None:
w = np.where(self.rads.reshape(self.subSec.size)<maxRad)
return err[w]
return err
def _residFAB(self,p,alpha,beta,maxRad):
(A)=p
self.alpha=alpha
self.beta=beta
err=(self.subSec-(self.bg+A*downSample2d(self.moffat(self.repRads),self.repFact))).reshape(self.subSec.size)
#if maxRad>0:
# w=np.where(self.rDist<=maxRad)
#else:
# w=np.arange(len(self.rDist))
#err=self.fDist[w]-(self.bg+A*self.moffat(self.rDist[w]))
if self.verbose: print(A,alpha,beta,np.sqrt(np.sum(err**2)/(self.subSec.size-1.)))
return err
"""
#much too slow compared to fitting each star individually
def _residMultiStarTest(self,p,maxRad):
#print p
alpha = p[-2]
beta = p[-1]
#(A,alpha,beta)=p
self.alpha=alpha
self.beta=beta
errs = []
n = 0
for ii in range(len(self.repRadsArr)):
A = p[ii]
err=(self.subSecs[ii]-(self.bgs[ii]+A*downSample2d(self.moffat(self.repRadsArr[ii]),self.repFact))).reshape(self.subSecs[ii].size)
errs.append(np.copy(err))
n+=len(err)
errs = np.array(errs).reshape(n)
if self.alpha<0 or self.beta<0: return np.inf
if self.verbose: print p,np.sqrt(np.sum(errs**2)/(n-1.))
return err
"""
if __name__=="__main__":
import pylab as pyl
psfNoLine=modelPSF(np.arange(25),np.arange(25),alpha=1.5,beta=2.0,repFact=10)
psfNoLine.writeto('noline.fits')
print()
psfLine=modelPSF(np.arange(25),np.arange(25),alpha=1.5,beta=2.0,repFact=10)
psfLine.line(4.0,32.,0.45)
psfLine.writeto('line.fits')
sys.exit()
(A,B)=psf.shape
for i in range(int(A/2),int(A/2+1)):
pyl.plot(psf.x,psf.psf[i,:])
for i in range(int(A*10/2),int(A*10/2+1)):
pyl.plot(psf.X,psf.PSF[i,:],linestyle=':')
pyl.show()
|
fraserw/trippy
|
trippy/psf.py
|
Python
|
gpl-3.0
| 52,236
|
# encoding= utf-8
##
# Playback/Selection test.
# <p>
# Description of the test.
#
##
from qtaste import *
import time
# update in order to cope with the javaGUI extension declared in your testbed configuration.
javaguiMI = testAPI.getJavaGUI(INSTANCE_ID=testData.getValue("JAVAGUI_INSTANCE_NAME"))
subtitler = testAPI.getSubtitler()
importTestScript("TabbedPaneSelection")
def step1():
"""
@step Description of the actions done for this step
@expected Description of the expected result
"""
doSubSteps(TabbedPaneSelection.changeTabById)
subtitler.setSubtitle(testData.getValue("COMMENT"))
component = testData.getValue("COMPONENT_NAME")
occurence = testData.getIntValue("OCCURENCE")
columnName = testData.getValue("COLUMN_NAME")
columnValue = testData.getValue("COLUMN_VALUE")
if javaguiMI.countTableRows(component, columnName, columnValue) < occurence:
testAPI.stopTest(Status.FAIL, "Not enough occurences in the table")
if occurence == -1:
javaguiMI.selectInTable(component, columnName, columnValue)
else:
javaguiMI.selectInTable(component, columnName, columnValue, occurence)
doStep(step1)
|
remybaranx/qtaste
|
demo/TestSuites/PlayBack/SelectionInTable/TestScript.py
|
Python
|
gpl-3.0
| 1,188
|
from pickle import load
layers = load(open('data/snapshot2.dat', 'rb'))
print(layers)
|
bdvllrs/Pyanissimo
|
ex/test.py
|
Python
|
gpl-3.0
| 87
|
# coding: utf-8
"""
Copyright 2013 Oliver Schnabel
This file is part of ADPY.
ADPY is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
ADPY is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with ADPY. If not, see <http://www.gnu.org/licenses/>.
"""
import math
import numpy
import string
#import utils
# override numpy definitions
numpy_function_names = [
'exp', 'log', 'log10', 'sqrt', 'pow',
'sin', 'cos', 'tan',
'arcsin', 'arccos', 'arctan',
'sinh', 'cosh', 'tanh',
'sign']
function_template = string.Template('''
def $function_name(*args, **kwargs):
"""
generic implementation of $function_name
this function calls, depending on the input arguments,
either
* numpy.$function_name
* numpy.linalg.$function_name
* args[i].__class__
"""
case,arg = 0,0
for na,a in enumerate(args):
if hasattr(a.__class__, '$function_name'):
case = 1
arg = na
break
if case==1:
return getattr(args[arg].__class__, '$function_name')(*args, **kwargs)
elif case==0:
return $namespace.__getattribute__('$function_name')(*args, **kwargs)
else:
return $namespace.__getattribute__('$function_name')(*args, **kwargs)
''')
for function_name in numpy_function_names:
exec function_template.substitute(function_name=function_name,
namespace='numpy')
|
zwenson/ADPY
|
ADPY/ADFUN/globalfuncs.py
|
Python
|
gpl-3.0
| 1,896
|
"""
A simple API to retrieve user info
"""
from athena.classes.api import Api
from athena.classes.input_field import InputField
class UserApi(Api):
def __init__(self):
self.save_data = [
InputField('username', require=True),
InputField('full_name'),
InputField('nickname'),
InputField('phone'),
InputField('email'),
]
super().__init__('user_api')
def name(self):
name = None
if hasattr(self, 'nickname'):
name = self.nickname
elif hasattr(self, 'full_name'):
name = self.full_name
return name
|
athena-voice/athena-voice-client
|
athena/api_library/user_api.py
|
Python
|
gpl-3.0
| 677
|
import serial
import pynmea2
import time
filename = time.strftime("surface_data_%Y%m%d.sfc")
print ("Opening new file: ",filename)
file = open(filename,'a')
wxt = serial.Serial('COM1', 19200, timeout=1.0) # open wxt536 serial port
gps = serial.Serial('COM18', 19200, timeout=1.0) # open arduino combo device
#print(wxt.name) # check which port was really used
while True:
newFilename = time.strftime("surface_data_%Y%m%d.sfc")
if newFilename != filename:
file.close()
filename = newFilename
file = open(filename, 'a')
print ("Opening new file: ",filename)
while (wxt.in_waiting):
x = wxt.readline()
print (x.decode("utf-8"),end="")
file.write(x.decode("utf-8"))
while (gps.in_waiting):
x = gps.readline()
print (x.decode("utf-8"),end="")
file.write(x.decode("utf-8"))
# sadly we never get here :(
file.close()
wxt.close()
gps.close()
|
dwkennedy/multi-sensor
|
serialReader.py
|
Python
|
gpl-3.0
| 984
|
import re
import urllib
import urllib2
from urlparse import urlsplit
import argparse
import os
import errno
import unicodedata
import getpass
import netrc
import mechanize
import cookielib
from bs4 import BeautifulSoup
import tempfile
from os import path
import platform
import _version
class CourseraDownloader(object):
"""
Class to download content (videos, lecture notes, ...) from coursera.org for
use offline.
https://github.com/dgorissen/coursera-dl
:param username: username
:param password: password
:keyword proxy: http proxy, eg: foo.bar.com:1234
:keyword parser: xml parser (defaults to lxml)
:keyword ignorefiles: comma separated list of file extensions to skip (e.g., "ppt,srt")
"""
BASE_URL = 'https://class.coursera.org/%s'
HOME_URL = BASE_URL + '/class/index'
LECTURE_URL = BASE_URL + '/lecture/index'
QUIZ_URL = BASE_URL + '/quiz/index'
AUTH_URL = BASE_URL + "/auth/auth_redirector?type=login&subtype=normal"
LOGIN_URL = "https://www.coursera.org/maestro/api/user/login"
#see http://www.crummy.com/software/BeautifulSoup/bs4/doc/#installing-a-parser
DEFAULT_PARSER = "lxml"
# how long to try to open a URL before timing out
TIMEOUT=60.0
def __init__(self,username,password,proxy=None,parser=DEFAULT_PARSER,ignorefiles=None):
self.username = username
self.password = password
self.parser = parser
# Split "ignorefiles" argument on commas, strip, remove prefixing dot
# if there is one, and filter out empty tokens.
self.ignorefiles = [x.strip()[1:] if x[0]=='.' else x.strip()
for x in ignorefiles.split(',') if len(x)]
self.browser = None
self.proxy = proxy
def login(self,className):
"""
Automatically generate a cookie file for the coursera site.
"""
#TODO: use proxy here
hn,fn = tempfile.mkstemp()
cookies = cookielib.LWPCookieJar()
handlers = [
urllib2.HTTPHandler(),
urllib2.HTTPSHandler(),
urllib2.HTTPCookieProcessor(cookies)
]
opener = urllib2.build_opener(*handlers)
url = self.lecture_url_from_name(className)
req = urllib2.Request(url)
try:
res = opener.open(req)
except urllib2.HTTPError as e:
if e.code == 404:
raise Exception("Unknown class %s" % className)
# get the csrf token
csrfcookie = [c for c in cookies if c.name == "csrf_token"]
if not csrfcookie: raise Exception("Failed to find csrf cookie")
csrftoken = csrfcookie[0].value
opener.close()
# call the authenticator url:
cj = cookielib.MozillaCookieJar(fn)
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj),
urllib2.HTTPHandler(),
urllib2.HTTPSHandler())
opener.addheaders.append(('Cookie', 'csrftoken=%s' % csrftoken))
opener.addheaders.append(('Referer', 'https://www.coursera.org'))
opener.addheaders.append(('X-CSRFToken', csrftoken))
req = urllib2.Request(self.LOGIN_URL)
data = urllib.urlencode({'email_address': self.username,'password': self.password})
req.add_data(data)
try:
opener.open(req)
except urllib2.HTTPError as e:
if e.code == 401:
raise Exception("Invalid username or password")
# check if we managed to login
sessionid = [c.name for c in cj if c.name == "sessionid"]
if not sessionid:
raise Exception("Failed to authenticate as %s" % self.username)
# all should be ok now, mechanize can handle the rest if we give it the
# cookies
br = mechanize.Browser()
#br.set_debug_http(True)
#br.set_debug_responses(False)
#br.set_debug_redirects(True)
br.set_handle_robots(False)
br.set_cookiejar(cj)
if self.proxy:
br.set_proxies({"http":self.proxy})
self.browser = br
# also use this cookiejar for other mechanize operations (e.g., urlopen)
opener = mechanize.build_opener(mechanize.HTTPCookieProcessor(cj))
mechanize.install_opener(opener)
def course_name_from_url(self,course_url):
"""Given the course URL, return the name, e.g., algo2012-p2"""
return course_url.split('/')[3]
def lecture_url_from_name(self,course_name):
"""Given the name of a course, return the video lecture url"""
return self.LECTURE_URL % course_name
def get_downloadable_content(self,course_url):
"""Given the video lecture URL of the course, return a list of all
downloadable resources."""
cname = self.course_name_from_url(course_url)
print "* Collecting downloadable content from " + course_url
# get the course name, and redirect to the course lecture page
vidpage = self.browser.open(course_url,timeout=self.TIMEOUT)
# extract the weekly classes
soup = BeautifulSoup(vidpage,self.parser)
# extract the weekly classes
weeks = soup.findAll("div", { "class" : "course-item-list-header" })
weeklyTopics = []
allClasses = {}
# for each weekly class
for week in weeks:
h3 = week.findNext('h3')
sanitisedHeaderName = sanitise_filename(h3.text)
weeklyTopics.append(sanitisedHeaderName)
ul = week.next_sibling
lis = ul.findAll('li')
weekClasses = {}
# for each lecture in a weekly class
classNames = []
for li in lis:
# the name of this lecture/class
className = li.a.text.strip()
# Many class names have the following format:
# "Something really cool (12:34)"
# If the class name has this format, replace the colon in the
# time with a hyphen.
if re.match(".+\(\d?\d:\d\d\)$",className):
head,sep,tail = className.rpartition(":")
className = head + "-" + tail
className = sanitise_filename(className)
classNames.append(className)
classResources = li.find('div', {'class':'course-lecture-item-resource'})
if not classResources:
print " Warning: No resources found"
else:
hrefs = classResources.findAll('a')
# collect the resources for a particular lecture (slides, pdf,
# links,...)
resourceLinks = []
for a in hrefs:
# get the hyperlink itself
h = a.get('href')
if not h: continue
# Sometimes the raw, uncompresed source videos are available as
# well. Don't download them as they are huge and available in
# compressed form anyway.
if h.find('source_videos') > 0:
print " - will skip raw source video " + h
else:
# Dont set a filename here, that will be inferred from the week
# titles
resourceLinks.append( (h,None) )
# check if the video is included in the resources, if not, try
# do download it directly
hasvid = [x for x,_ in resourceLinks if x.find('.mp4') > 0]
if not hasvid:
ll = li.find('a',{'class':'lecture-link'})
lurl = ll['data-modal-iframe']
try:
pg = self.browser.open(lurl,timeout=self.TIMEOUT)
bb = BeautifulSoup(pg,self.parser)
vobj = bb.find('source',type="video/mp4")
if not vobj:
print " Warning: Failed to find video for %s" % className
else:
vurl = vobj['src']
# build the matching filename
fn = className + ".mp4"
resourceLinks.append( (vurl,fn) )
except urllib2.HTTPError as e:
# sometimes there is a lecture without a vidio (e.g.,
# genes-001) so this can happen.
print " Warning: failed to download video directly url %s: %s" % (lurl,e)
weekClasses[className] = resourceLinks
# keep track of the list of classNames in the order they appear in the html
weekClasses['classNames'] = classNames
allClasses[sanitisedHeaderName] = weekClasses
return (weeklyTopics, allClasses)
def get_headers(self,url):
"""
Get the headers
"""
r = self.browser.open(url,timeout=self.TIMEOUT)
return r.info()
def download(self, url, target_dir=".", target_fname=None):
"""
Download the url to the given filename
"""
# get the headers
headers = self.get_headers(url)
# get the content length (if present)
clen = int(headers.get('Content-Length',-1))
# build the absolute path we are going to write to
fname = target_fname or filename_from_header(headers) or filename_from_url(url)
# split off the extension
_,ext = path.splitext(fname)
# check if we should skip it (remember to remove the leading .)
if ext and ext[1:] in self.ignorefiles:
print ' - skipping "%s" (extension ignored)' % fname
return
filepath = path.join(target_dir,fname)
dl = True
if path.exists(filepath):
if clen > 0:
fs = path.getsize(filepath)
delta = clen - fs
# all we know is that the current filesize may be shorter than it should be and the content length may be incorrect
# overwrite the file if the reported content length is bigger than what we have already by at least k bytes (arbitrary)
# TODO this is still not foolproof as the fundamental problem is that the content length cannot be trusted
# so this really needs to be avoided and replaced by something else, eg., explicitly storing what downloaded correctly
if delta > 2:
print ' - "%s" seems incomplete, downloading again' % fname
else:
print ' - "%s" already exists, skipping' % fname
dl = False
else:
# missing or invalid content length
# assume all is ok...
dl = False
try:
if dl:
self.browser.retrieve(url,filepath,timeout=self.TIMEOUT)
except Exception as e:
print "Failed to download url %s to %s: %s" % (url,filepath,e)
def download_course(self,cname,dest_dir=".",reverse_sections=False):
"""
Download all the contents (quizzes, videos, lecture notes, ...) of the course to the given destination directory (defaults to .)
"""
# open the main class page
self.browser.open(self.AUTH_URL % cname,timeout=self.TIMEOUT)
# get the lecture url
course_url = self.lecture_url_from_name(cname)
(weeklyTopics, allClasses) = self.get_downloadable_content(course_url)
if not weeklyTopics:
print " Warning: no downloadable content found for %s, did you accept the honour code?" % cname
return
else:
print '* Got all downloadable content for ' + cname
if reverse_sections:
weeklyTopics.reverse()
print "* Sections reversed"
course_dir = path.abspath(path.join(dest_dir,cname))
# ensure the target dir exists
if not path.exists(course_dir):
os.mkdir(course_dir)
print "* " + cname + " will be downloaded to " + course_dir
# ensure the course directory exists
if not path.exists(course_dir):
os.makedirs(course_dir)
# download the standard pages
print " - Downloading lecture/syllabus pages"
self.download(self.HOME_URL % cname,target_dir=course_dir,target_fname="index.html")
self.download(course_url,target_dir=course_dir,target_fname="lectures.html")
# now download the actual content (video's, lecture notes, ...)
for j,weeklyTopic in enumerate(weeklyTopics,start=1):
if weeklyTopic not in allClasses:
#TODO: refactor
print 'Warning: Weekly topic not in all classes:', weeklyTopic
continue
# ensure the week dir exists
# add a numeric prefix to the week directory name to ensure chronological ordering
wkdirname = str(j).zfill(2) + " - " + weeklyTopic
wkdir = path.join(course_dir,wkdirname)
if not path.exists(wkdir):
os.makedirs(wkdir)
weekClasses = allClasses[weeklyTopic]
classNames = weekClasses['classNames']
print " - " + weeklyTopic
for i,className in enumerate(classNames,start=1):
if className not in weekClasses:
#TODO: refactor
print "Warning:",className,"not in",weekClasses.keys()
continue
classResources = weekClasses[className]
# ensure the class dir exists
clsdirname = str(i).zfill(2) + " - " + className
clsdir = path.join(wkdir,clsdirname)
if not path.exists(clsdir):
os.makedirs(clsdir)
print " - Downloading resources for " + className
for classResource,tfname in classResources:
if not isValidURL(classResource):
absoluteURLGen = AbsoluteURLGen(course_url)
classResource = absoluteURLGen.get_absolute(classResource)
print " -" + classResource, ' - is not a valid url'
if not isValidURL(classResource):
print " -" + classResource, ' - is not a valid url'
continue
try:
#print ' - Downloading ', classResource
self.download(classResource,target_dir=clsdir,target_fname=tfname)
except Exception as e:
print " - failed: ",classResource,e
def filename_from_header(header):
try:
cd = header['Content-Disposition']
pattern = 'attachment; filename="(.*?)"'
m = re.search(pattern, cd)
g = m.group(1)
return sanitise_filename(g)
except Exception:
return ''
def filename_from_url(url):
# parse the url into its components
u = urlsplit(url)
# split the path into parts and unquote
parts = [urllib2.unquote(x).strip() for x in u.path.split('/')]
# take the last component as filename
fname = parts[-1]
# if empty, url ended with a trailing slash
# so join up the hostnam/path and use that as a filename
if len(fname) < 1:
s = u.netloc + u.path[:-1]
fname = s.replace('/','_')
else:
# unquoting could have cuased slashes to appear again
# split and take the last element if so
fname = fname.split('/')[-1]
# add an extension if none
ext = path.splitext(fname)[1]
if len(ext) < 1 or len(ext) > 5: fname += ".html"
# remove any illegal chars and return
return sanitise_filename(fname)
def sanitise_filename(fileName):
# ensure a clean, valid filename (arg may be both str and unicode)
# ensure a unicode string, problematic ascii chars will get removed
if isinstance(fileName,str):
fn = unicode(fileName,errors='ignore')
else:
fn = fileName
# normalize it
fn = unicodedata.normalize('NFKD',fn)
# encode it into ascii, again ignoring problematic chars
s = fn.encode('ascii','ignore')
# remove any characters not in the whitelist
s = re.sub('[^\w\-\(\)\[\]\., ]','',s).strip()
# ensure it is within a sane maximum
max = 250
# split off extension, trim, and re-add the extension
fn,ext = path.splitext(s)
s = fn[:max-len(ext)] + ext
return s
# TODO: simplistic
def isValidURL(url):
return url.startswith('http') or url.startswith('https')
# TODO: is this really still needed
class AbsoluteURLGen(object):
"""
Generate absolute URLs from relative ones
Source: AbsoluteURLGen copy pasted from http://www.python-forum.org/pythonforum/viewtopic.php?f=5&t=12515
"""
def __init__(self, base='', replace_base=False):
self.replace_base = replace_base
self.base_regex = re.compile('^(https?://)(.*)$')
self.base = self.normalize_base(base)
def normalize_base(self, url):
base = url
if self.base_regex.search(base):
# rid thyself of 'http(s)://'
base = self.base_regex.search(url).group(2)
if not base.rfind('/') == -1:
# keep only the directory, not the filename
base = base[:base.rfind('/')+1]
base = self.base_regex.search(url).group(1) + base
return base
def get_absolute(self, url=''):
if not self.base or (
self.replace_base and self.base_regex.search(url)):
self.base = self.normalize_base(url)
return url
elif self.base_regex.search(url):
# it's an absolute url, but we don't want to keep it's base
return url
else:
# now, it's time to do some converting.
if url.startswith("../"):
# they want the parent dir
if not self.base[:-2].rfind("/") == -1:
base = self.base[:self.base[:-2].rfind("/")+1]
return base + url[3:]
else:
# there are no subdirs... broken link?
return url
elif url.startswith("/"):
# file is in the root dir
protocol, base = self.base_regex.search(self.base).groups()
# remove subdirs until we're left with the root
while not base[:-2].rfind("/") == -1:
base = base[:base[:-2].rfind('/')]
return protocol + base + url
else:
if url.startswith("./"):
url = url[2:]
return self.base + url
def get_netrc_creds():
"""
Read username/password from the users' netrc file. Returns None if no
coursera credentials can be found.
"""
# inspired by https://github.com/jplehmann/coursera
if platform.system() == 'Windows':
# where could the netrc file be hiding, try a number of places
env_vars = ["HOME","HOMEDRIVE", "HOMEPATH","USERPROFILE","SYSTEMDRIVE"]
env_dirs = [os.environ[e] for e in env_vars if os.environ.get(e,None)]
# also try the root/cur dirs
env_dirs += ["C:", ""]
# possible filenames
file_names = [".netrc", "_netrc"]
# all possible paths
paths = [path.join(dir,fn) for dir in env_dirs for fn in file_names]
else:
# on *nix just put None, and the correct default will be used
paths = [None]
# try the paths one by one and return the first one that works
creds = None
for p in paths:
try:
auths = netrc.netrc(p).authenticators('coursera-dl')
creds = (auths[0], auths[2])
print "Credentials found in .netrc file"
break
except (IOError, TypeError, netrc.NetrcParseError) as e:
pass
return creds
# is lxml available?
def haslxml():
try:
import lxml
return True
except:
return False
def main():
# parse the commandline arguments
parser = argparse.ArgumentParser(description='Download Coursera.org course videos/docs for offline use.')
parser.add_argument("-u", dest='username', type=str, help='coursera username (.netrc used if omitted)')
parser.add_argument("-p", dest='password', type=str, help='coursera password')
parser.add_argument("-d", dest='dest_dir', type=str, default=".", help='destination directory where everything will be saved')
parser.add_argument("-n", dest='ignorefiles', type=str, default="", help='comma-separated list of file extensions to skip, e.g., "ppt,srt,pdf"')
parser.add_argument("-q", dest='parser', type=str, default=CourseraDownloader.DEFAULT_PARSER,
help="the html parser to use, see http://www.crummy.com/software/BeautifulSoup/bs4/doc/#installing-a-parser")
parser.add_argument("-x", dest='proxy', type=str, default=None, help="proxy to use, e.g., foo.bar.com:3125")
parser.add_argument("--reverse-sections", dest='reverse', action="store_true",
default=False, help="download and save the sections in reverse order")
parser.add_argument('course_names', nargs="+", metavar='<course name>',
type=str, help='one or more course names from the url (e.g., comnets-2012-001)')
args = parser.parse_args()
# check the parser
html_parser = args.parser
if html_parser == 'lxml' and not haslxml():
print " Warning: lxml not available, falling back to built-in 'html.parser' (see -q option), this may cause problems on Python < 2.7.3"
html_parser = 'html.parser'
else:
pass
print "Coursera-dl v%s (%s)" % (_version.__version__,html_parser)
# search for login credentials in .netrc file if username hasn't been provided in command-line args
username, password = args.username, args.password
if not username:
creds = get_netrc_creds()
if not creds:
raise Exception("No username passed and no .netrc credentials found, unable to login")
else:
username, password = creds
else:
# prompt the user for his password if not specified
if not password:
password = getpass.getpass()
# instantiate the downloader class
d = CourseraDownloader(username,password,proxy=args.proxy,parser=html_parser,ignorefiles=args.ignorefiles)
# authenticate, only need to do this once but need a classaname to get hold
# of the csrf token, so simply pass the first one
print "Logging in as '%s'..." % username
d.login(args.course_names[0])
# download the content
for i,cn in enumerate(args.course_names,start=1):
print
print "Course %s of %s" % (i,len(args.course_names))
d.download_course(cn,dest_dir=args.dest_dir,reverse_sections=args.reverse)
if __name__ == '__main__':
main()
|
rydnr/coursera-dl
|
courseradownloader/courseradownloader.py
|
Python
|
gpl-3.0
| 23,257
|
import random
import math
import util
joblist = {
'CookJob':{'Inputs':{'Food':9.92, 'CookJob': 1.0},'Outputs':{'Meals':9.92,'SanitationJob':0.1}}, #fresh food, dirty kitchen
'SanitationJob':{'Inputs':{'General Consumables':0.55, 'SanitationJob': 1.0},'Outputs':{'Solid Waste':0.55}},
'MaintenanceJob':{'Inputs':{'Parts':1.0, 'MaintenanceJob': 1.0},'Outputs':{'Scrap':0.50,'SanitationJob':0.1}}, #working stuff, dirty guy
'HydroJob':{'Inputs':{'HydroJob':1.0},'Outputs':{'Hydro':1,'SanitationJob':0.03}},
'EVAJob':{'Inputs':{'General Consumables':0.55, 'Oxygen':1.0, 'EVAJob': 1.0},'Outputs':{'EVA':1.0,'SanitationJob':0.1 }} #TODO find real numbers
}
def get_job_from_priority_skillset_prefs(station,skills,prefs=None):
jobs = joblist.keys()
jobs.sort(key = lambda j: station.get_item(j)*skills[j]*(prefs[j] if prefs else 1.0), reverse=True)
#print jobs, [skills[j] for j in jobs]
return jobs[0]
def generate_random_skillset():
skills=dict()
skillnames= joblist.keys()
for skill in skillnames:
skills[skill] = .1 + .5*random.random()
return skills
def run(job,actor,station,duration):
#print job, actor.name, station.name, duration
if job not in joblist.keys(): return 0.0
new_dur = duration * actor.skill[job]
_eff = station.satisfy_reaction(joblist[job],new_dur)
#increase skill of actor
tc = util.seconds(1,'year')
score = -1.0 * math.log(1 - actor.skill[job]) * tc
score += duration
actor.skill[job] = 1 - math.exp(-score/tc)
return _eff
|
facepalm/personal-space
|
src/job.py
|
Python
|
gpl-3.0
| 1,622
|
# encoding= utf-8
# Copyright 2007-2009 QSpin - www.qspin.be
#
# This file is part of QTaste framework.
#
# QTaste is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# QTaste is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with QTaste. If not, see <http://www.gnu.org/licenses/>.
##
# QTaste Data driven test: QTaste double with precision data equality test.
# <p>
# The goal of this test is to check the equals method of DoubleWithPrecision.
# @preparation None
# @data DOUBLE_WITH_PRECISION_DATA [DoubleWithPrecision] double with precision data
##
from qtaste import *
def Step1():
"""
@step In the CSV, define DOUBLE_WITH_PRECISION_DATA to 10(0.5)
@expected None
"""
pass
def Step2():
"""
@step Check the equals() method of DoubleWithPrecision with differents values
@expected Check is successful
"""
doubleWithPrecisionValue = testData.getDoubleWithPrecisionValue('DOUBLE_WITH_PRECISION_DATA')
if not doubleWithPrecisionValue.equals(10):
testAPI.stopTest(Status.FAIL, "Double with precision value 10(0.5) should be equal to 10")
if not doubleWithPrecisionValue.equals(10.2):
testAPI.stopTest(Status.FAIL, "Double with precision value 10(0.5) should be equal to 10.2")
if not doubleWithPrecisionValue.equals(10.5):
testAPI.stopTest(Status.FAIL, "Double with precision value 10(0.5) should be equal to 10.5")
if not doubleWithPrecisionValue.equals(9.5):
testAPI.stopTest(Status.FAIL, "Double with precision value 10(0.5) should be equal to 9.5")
if doubleWithPrecisionValue.equals(9.49):
testAPI.stopTest(Status.FAIL, "Double with precision value 10(0.5) should not be equal to 9.49")
if doubleWithPrecisionValue.equals(10.51):
testAPI.stopTest(Status.FAIL, "Double with precision value 10(0.5) should not be equal to 10.51")
doStep(Step1)
doStep(Step2)
|
remybaranx/qtaste
|
TestSuites/TestSuite_QTaste/EngineSuite/QTASTE_DATA/QTASTE_DATA_05/TestScript.py
|
Python
|
gpl-3.0
| 2,302
|
from django import forms
from django.forms import formset_factory
from django.forms.extras.widgets import SelectDateWidget
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from django.forms.utils import flatatt
import datetime
from .models import Resource_Type, Resource_Inst, Meal, Dish, Standard_Inst, \
Unit
class Html5DateInput(forms.DateInput):
input_type = 'date'
class MealForm(forms.ModelForm):
cons_time = forms.DateTimeField(
label = 'Meal date',
initial = datetime.date.today,
widget = Html5DateInput(format='%Y-%m-%d'), )
meal_type = forms.ChoiceField(
label = "Which meal",
choices = Meal.MEAL_TYPES,
initial = "Lunch", )
cons_time.widget.attrs.update({'autofocus': 'autofocus'})
class Meta:
model = Meal
fields = [ 'cons_time', 'meal_type' ]
class DishForm(forms.ModelForm):
cooking_style = forms.ChoiceField(
label = "Dish style",
choices = Dish.COOKING_STYLES,
initial = 'frying', )
cooking_style.widget.attrs.update({'autofocus': 'autofocus'})
class Meta:
model = Dish
fields = [ 'cooking_style' ]
class NewInstForm(forms.ModelForm):
res_name = forms.CharField(
label = "Item name",
max_length = 40, )
res_type = forms.ModelChoiceField(
label = "Type",
queryset = Resource_Type.objects.all(),
to_field_name = 'r_name', )
price = forms.IntegerField(
label = "Price (pence)",
min_value = 0, )
orig_unit = forms.ModelChoiceField(
label = "Units",
queryset = Unit.objects.all(), )
amt_original = forms.FloatField(
label = "Quantity",
min_value = 0, )
best_bef_date = forms.DateTimeField(
label = "Best before",
widget = Html5DateInput(), )
best_before = forms.BooleanField(
label = "Expiry type",
widget = forms.RadioSelect(choices=
[ (True, 'Best Before'),
(False, 'Expiry') ] ),
initial = True, )
purchase_date = forms.DateTimeField(
label = "Purchase date",
widget = Html5DateInput(format='%Y-%m-%d'),
initial = datetime.date.today, )
class Meta:
model = Resource_Inst
exclude = [ 'inst_owner',
'last_mod',
'unit_use_formal',
'used_so_far',
'exhausted' ]
class NewInstStdForm(forms.ModelForm):
std_inst = forms.ModelChoiceField(
label = "Standard instance",
queryset = Standard_Inst.objects.all(),
)
price = forms.IntegerField(
label = "Price (pence)",
min_value = 0, )
amt_dummy = forms.FloatField(
label = "Amount",
required = False, )
best_bef_date = forms.DateTimeField(
label = "Best before",
widget = Html5DateInput(), )
purchase_date = forms.DateTimeField(
label = "Purchase date",
widget = Html5DateInput(format='%Y-%m-%d'),
initial = datetime.date.today, )
std_inst.widget.attrs.update({'autofocus': 'autofocus'})
class Meta:
model = Resource_Inst
fields = [ 'std_inst',
'price',
'amt_dummy',
'best_bef_date',
'purchase_date' ]
class NewStdInstForm(forms.ModelForm):
# New standard instances
inst_name = forms.CharField(
label = "Instance name",
max_length = 40, )
inst_type = forms.ModelChoiceField(
label = "Type",
queryset = Resource_Type.objects.all(),
to_field_name = 'r_name', )
usual_price = forms.IntegerField(
label = "Usual price (pence)",
min_value = 0, )
use_formal = forms.BooleanField(
label = "Use formal units",
required = False, )
use_bestbef = forms.BooleanField(
label = "Expiry type",
widget = forms.RadioSelect(choices=
[ (True, 'Best Before'),
(False, 'Expiry') ] ),
initial = True, )
orig_unit = forms.ModelChoiceField(
label = "Units",
queryset = Unit.objects.all(), )
orig_amt = forms.FloatField(
label = "Quantity",
min_value = 0, )
class Meta:
model = Standard_Inst
fields = [ 'inst_name',
'inst_type',
'usual_price',
'use_formal',
'use_bestbef',
'is_relative',
'orig_unit',
'orig_amt' ]
class TicketForm(forms.Form):
resource_inst = forms.ModelChoiceField(
queryset = Resource_Inst.objects.all())
resource_inst.widget.attrs.update({'autofocus': 'autofocus'})
units_used = forms.FloatField(min_value=0)
exhausted = forms.BooleanField(required=False)
def __init__(self, user, *args, **kwargs):
super(TicketForm, self).__init__(*args, **kwargs)
self.fields['resource_inst'].queryset \
= Resource_Inst.objects.filter(exhausted = False, inst_owner=user) \
.order_by('id')
TicketFormSet = formset_factory(TicketForm, extra=2)
class InstPriceForm(forms.Form):
price = forms.IntegerField(min_value=0)
class InstAmtForm(forms.Form):
orig_amt = forms.FloatField(min_value=0)
|
nw0/mealy
|
forms.py
|
Python
|
gpl-3.0
| 7,338
|
#!/usr/bin/python
#
# Copyright (c) 2018 Zim Kalinowski, <zikalino@microsoft.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_resource
version_added: "2.6"
short_description: Create any Azure resource.
description:
- Create, update or delete any Azure resource using Azure REST API.
- This module gives access to resources that are not supported via Ansible modules.
- Refer to https://docs.microsoft.com/en-us/rest/api/ regarding details related to specific resource REST API.
options:
url:
description:
- Azure RM Resource URL.
api_version:
description:
- Specific API version to be used.
required: yes
provider:
description:
- Provider type.
- Required if URL is not specified.
resource_group:
description:
- Resource group to be used.
- Required if URL is not specified.
resource_type:
description:
- Resource type.
- Required if URL is not specified.
resource_name:
description:
- Resource name.
- Required if URL Is not specified.
subresource:
description:
- List of subresources
suboptions:
namespace:
description:
- Subresource namespace
type:
description:
- Subresource type
name:
description:
- Subresource name
body:
description:
- The body of the http request/response to the web service.
method:
description:
- The HTTP method of the request or response. It MUST be uppercase.
choices: [ "GET", "PUT", "POST", "HEAD", "PATCH", "DELETE", "MERGE" ]
default: "PUT"
status_code:
description:
- A valid, numeric, HTTP status code that signifies success of the
request. Can also be comma separated list of status codes.
default: [ 200, 201, 202 ]
idempotency:
description:
- If enabled, idempotency check will be done by using GET method first and then comparing with I(body)
default: no
type: bool
polling_timeout:
description:
- If enabled, idempotency check will be done by using GET method first and then comparing with I(body)
default: 0
type: int
version_added: "2.8"
polling_interval:
description:
- If enabled, idempotency check will be done by using GET method first and then comparing with I(body)
default: 60
type: int
version_added: "2.8"
state:
description:
- Assert the state of the resource. Use C(present) to create or update resource or C(absent) to delete resource.
default: present
choices:
- absent
- present
extends_documentation_fragment:
- azure
author:
- "Zim Kalinowski (@zikalino)"
'''
EXAMPLES = '''
- name: Update scaleset info using azure_rm_resource
azure_rm_resource:
resource_group: "{{ resource_group }}"
provider: compute
resource_type: virtualmachinescalesets
resource_name: "{{ scaleset_name }}"
api_version: "2017-12-01"
body: "{{ body }}"
'''
RETURN = '''
response:
description: Response specific to resource type.
returned: always
type: dict
'''
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
from ansible.module_utils.azure_rm_common_rest import GenericRestClient
from ansible.module_utils.common.dict_transformations import dict_merge
try:
from msrestazure.azure_exceptions import CloudError
from msrest.service_client import ServiceClient
from msrestazure.tools import resource_id, is_valid_resource_id
import json
except ImportError:
# This is handled in azure_rm_common
pass
class AzureRMResource(AzureRMModuleBase):
def __init__(self):
# define user inputs into argument
self.module_arg_spec = dict(
url=dict(
type='str',
required=False
),
provider=dict(
type='str',
),
resource_group=dict(
type='str',
),
resource_type=dict(
type='str',
),
resource_name=dict(
type='str',
),
subresource=dict(
type='list',
default=[]
),
api_version=dict(
type='str',
required=True
),
method=dict(
type='str',
default='PUT',
choices=["GET", "PUT", "POST", "HEAD", "PATCH", "DELETE", "MERGE"]
),
body=dict(
type='raw'
),
status_code=dict(
type='list',
default=[200, 201, 202]
),
idempotency=dict(
type='bool',
default=False
),
polling_timeout=dict(
type='int',
default=0
),
polling_interval=dict(
type='int',
default=60
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
)
)
# store the results of the module operation
self.results = dict(
changed=False,
response=None
)
self.mgmt_client = None
self.url = None
self.api_version = None
self.provider = None
self.resource_group = None
self.resource_type = None
self.resource_name = None
self.subresource_type = None
self.subresource_name = None
self.subresource = []
self.method = None
self.status_code = []
self.idempotency = False
self.polling_timeout = None
self.polling_interval = None
self.state = None
self.body = None
super(AzureRMResource, self).__init__(self.module_arg_spec, supports_tags=False)
def exec_module(self, **kwargs):
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient,
base_url=self._cloud_environment.endpoints.resource_manager)
if self.state == 'absent':
self.method = 'DELETE'
self.status_code.append(204)
if self.url is None:
orphan = None
rargs = dict()
rargs['subscription'] = self.subscription_id
rargs['resource_group'] = self.resource_group
if not (self.provider is None or self.provider.lower().startswith('.microsoft')):
rargs['namespace'] = "Microsoft." + self.provider
else:
rargs['namespace'] = self.provider
if self.resource_type is not None and self.resource_name is not None:
rargs['type'] = self.resource_type
rargs['name'] = self.resource_name
for i in range(len(self.subresource)):
resource_ns = self.subresource[i].get('namespace', None)
resource_type = self.subresource[i].get('type', None)
resource_name = self.subresource[i].get('name', None)
if resource_type is not None and resource_name is not None:
rargs['child_namespace_' + str(i + 1)] = resource_ns
rargs['child_type_' + str(i + 1)] = resource_type
rargs['child_name_' + str(i + 1)] = resource_name
else:
orphan = resource_type
else:
orphan = self.resource_type
self.url = resource_id(**rargs)
if orphan is not None:
self.url += '/' + orphan
query_parameters = {}
query_parameters['api-version'] = self.api_version
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
needs_update = True
response = None
if self.idempotency:
original = self.mgmt_client.query(self.url, "GET", query_parameters, None, None, [200, 404], 0, 0)
if original.status_code == 404:
if self.state == 'absent':
needs_update = False
else:
try:
response = json.loads(original.text)
needs_update = (dict_merge(response, self.body) != response)
except Exception:
pass
if needs_update:
response = self.mgmt_client.query(self.url,
self.method,
query_parameters,
header_parameters,
self.body,
self.status_code,
self.polling_timeout,
self.polling_interval)
if self.state == 'present':
try:
response = json.loads(response.text)
except Exception:
response = response.text
else:
response = None
self.results['response'] = response
self.results['changed'] = needs_update
return self.results
def main():
AzureRMResource()
if __name__ == '__main__':
main()
|
orgito/ansible
|
lib/ansible/modules/cloud/azure/azure_rm_resource.py
|
Python
|
gpl-3.0
| 9,861
|
from django.db.models import CharField
import uuid
UUID_BASE_ID = 'fff0'
UUID_BASE_VERSION = 1
UUID_BASE_NAME = 'anorg.net'
UUID_BASE_NAMESPACE = uuid.NAMESPACE_DNS
class UUIDVersionError(Exception):
pass
class UUIDField(CharField):
""" UUIDField
By default uses UUID version 1 (generate from host ID, sequence number and current time)
The field support all uuid versions which are natively supported by the uuid python module.
For more information see: http://docs.python.org/lib/module-uuid.html
"""
def __init__(self, verbose_name=None, name=None, auto=True, version=UUID_BASE_VERSION, node=None, clock_seq=None, namespace=None, **kwargs):
kwargs['max_length'] = 36
if auto:
kwargs['blank'] = True
kwargs.setdefault('editable', False)
self.auto = auto
self.version = version
if version == 1:
self.node, self.clock_seq = node, clock_seq
elif version == 3 or version == 5:
self.namespace, self.name = namespace, name
CharField.__init__(self, verbose_name, name, **kwargs)
def get_internal_type(self):
return CharField.__name__
def contribute_to_class(self, cls, name):
if self.primary_key:
assert not cls._meta.has_auto_field, \
"A model can't have more than one AutoField: %s %s %s; have %s" % \
(self, cls, name, cls._meta.auto_field)
super(UUIDField, self).contribute_to_class(cls, name)
cls._meta.has_auto_field = True
cls._meta.auto_field = self
else:
super(UUIDField, self).contribute_to_class(cls, name)
def create_uuid(self):
print 'UUID - Version: %s' % self.version
if not self.version or self.version == 4:
res = uuid.uuid4()
elif self.version == 1:
res = uuid.uuid1(self.node, self.clock_seq)
elif self.version == 2:
raise UUIDVersionError("UUID version 2 is not supported.")
elif self.version == 3:
res = uuid.uuid3(self.namespace, self.name)
elif self.version == 5:
res = uuid.uuid5(UUID_BASE_NAMESPACE, UUID_BASE_NAME)
else:
raise UUIDVersionError("UUID version %s is not valid." % self.version)
#if UUID_BASE_ID:
# res = "%s%s" % (UUID_BASE_ID, str(res)[4:])
return res
def pre_save(self, model_instance, add):
value = super(UUIDField, self).pre_save(model_instance, add)
if self.auto and add and value is None:
value = unicode(self.create_uuid())
setattr(model_instance, self.attname, value)
return value
else:
if self.auto and not value:
value = unicode(self.create_uuid())
setattr(model_instance, self.attname, value)
return value
def south_field_triple(self):
"Returns a suitable description of this field for South."
# We'll just introspect the _actual_ field.
from south.modelsinspector import introspector
field_class = "django.db.models.fields.CharField"
args, kwargs = introspector(self)
# That's our definition!
return (field_class, args, kwargs)
|
hzlf/openbroadcast
|
website/lib/fields/uuidfield.py
|
Python
|
gpl-3.0
| 3,309
|
#!/usr/local/bin/python
__author__ = 'kalcho'
# Standard multi-threaded TCP server
import socket
import threading
bind_ip = '0.0.0.0'
bind_port = 9999
# create a floating socket object
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# bind socket to address
server.bind((bind_ip, bind_port))
# make server listen for incomming connections
server.listen(5)
print("[*] listening on {:s}:{:d}".format(bind_ip, bind_port))
# this is our client-handling thread
def handle_client(client_socket):
# print out what the client sends
request = client_socket.recv(1024)
print("[*] received: {:s}".format(request.decode('utf-8')))
# send back a packet
client_socket.send(b'ACK!')
client_socket.close()
while True:
client, addr = server.accept()
print("[*] accepted connection from: {:s}:{:d}".format(addr[0], addr[1]))
# spin up our client thread to handle incoming data
client_handler = threading.Thread(target=handle_client, args=(client,))
client_handler.start()
|
kalcho83/black-hat-python
|
tcp_server.py
|
Python
|
gpl-3.0
| 1,025
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=40)),
],
),
]
|
cpscanarias/ssfinder-back-end
|
ssfinder_back_end/social_service/migrations/0001_initial.py
|
Python
|
gpl-3.0
| 483
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from lib.meos import MEoS
from lib import unidades
class CF3I(MEoS):
"""Multiparameter equation of state for trifluoroiodomethane
>>> trifluoroiodometano=CF3I(T=300, P=0.1)
>>> print "%0.1f %0.5f %0.2f %0.3f %0.5f %0.4f %0.4f %0.2f" % (trifluoroiodometano.T, trifluoroiodometano.rho, trifluoroiodometano.u.kJkg, trifluoroiodometano.h.kJkg, trifluoroiodometano.s.kJkgK, trifluoroiodometano.cv.kJkgK, trifluoroiodometano.cp.kJkgK, trifluoroiodometano.w)
300.0 8.02849 7.17 19.624 0.09457 0.3043 0.3514 118.60
"""
name = "trifluoroiodomethane"
CASNumber = "2314-97-8"
formula = "CF3I"
synonym = ""
rhoc = unidades.Density(868.)
Tc = unidades.Temperature(396.44)
Pc = unidades.Pressure(3953., "kPa")
M = 195.9103796 # g/mol
Tt = unidades.Temperature(120.)
Tb = unidades.Temperature(251.3)
f_acent = 0.18
momentoDipolar = unidades.DipoleMoment(0.92, "Debye")
id = 645
CP1 = {"ao": 4.,
"an": [], "pow": [],
"ao_exp": [6.2641], "exp": [694.1467],
"ao_hyp": [], "hyp": []}
helmholtz1 = {
"__type__": "Helmholtz",
"__name__": "short Helmholtz equation of state for CF3I of McLinden and Lemmon (2013)",
"__doi__": {"autor": "McLinden, M.O. and Lemmon, E.W.",
"title": "Thermodynamic Properties of R-227ea, R-365mfc, R-115, and R-13I1",
"ref": "to be submitted to J. Chem. Eng. Data, 2013.",
"doi": ""},
"R": 8.314472,
"cp": CP1,
"ref": "IIR",
"Tmin": Tt, "Tmax": 420., "Pmax": 20000.0, "rhomax": 14.1,
"Pmin": 0.0004623, "rhomin": 14.05,
"nr1": [0.112191e1, -0.308087e1, 0.111307e1, -0.184885, 0.110971,
0.325005e-3],
"d1": [1, 1, 1, 2, 3, 7],
"t1": [0.23, 1.125, 1.5, 1.375, 0.25, 0.875],
"nr2": [0.333357, -0.288288e-1, -0.371554, -0.997985e-1, -0.333205e-1,
0.207882e-1],
"d2": [2, 5, 1, 4, 3, 4],
"t2": [0.625, 1.75, 3.625, 3.625, 14.5, 12.],
"c2": [1, 1, 2, 2, 3, 3],
"gamma2": [1]*6}
eq = helmholtz1,
_surface = {"sigma": [0.05767], "exp": [1.298]}
_vapor_Pressure = {
"eq": 5,
"ao": [-6.8642, 1.7877, -1.0619, -2.1677],
"exp": [1.0, 1.5, 1.9, 3.8]}
_liquid_Density = {
"eq": 1,
"ao": [2.0711, 1.562, -2.599, 1.7177],
"exp": [0.38, 1.3, 1.9, 2.5]}
_vapor_Density = {
"eq": 3,
"ao": [-3.0987, -6.8771, -19.701, -46.86, -100.02],
"exp": [0.41, 1.33, 3.5, 7.4, 16.0]}
|
edusegzy/pychemqt
|
lib/mEoS/CF3I.py
|
Python
|
gpl-3.0
| 2,646
|
#-----------------------------------------------------------------------------
# Copyright (c) 2014-2016, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import os
from PyInstaller.utils.hooks import get_qmake_path
import PyInstaller.compat as compat
hiddenimports = ["sip",
"PyQt5.QtCore",
"PyQt5.QtGui",
"PyQt5.QtNetwork",
"PyQt5.QtWebChannel",
]
# Find the additional files necessary for QtWebEngine.
# Currently only implemented for OSX.
# Note that for QtWebEngineProcess to be able to find icudtl.dat the bundle_identifier
# must be set to 'org.qt-project.Qt.QtWebEngineCore'. This can be done by passing
# bundle_identifier='org.qt-project.Qt.QtWebEngineCore' to the BUNDLE command in
# the .spec file. FIXME: This is not ideal and a better solution is required.
qmake = get_qmake_path('5')
if qmake:
libdir = compat.exec_command(qmake, "-query", "QT_INSTALL_LIBS").strip()
if compat.is_darwin:
binaries = [
(os.path.join(libdir, 'QtWebEngineCore.framework', 'Versions', '5',\
'Helpers', 'QtWebEngineProcess.app', 'Contents', 'MacOS', 'QtWebEngineProcess'),
os.path.join('QtWebEngineProcess.app', 'Contents', 'MacOS'))
]
resources_dir = os.path.join(libdir, 'QtWebEngineCore.framework', 'Versions', '5', 'Resources')
datas = [
(os.path.join(resources_dir, 'icudtl.dat'),''),
(os.path.join(resources_dir, 'qtwebengine_resources.pak'), ''),
# The distributed Info.plist has LSUIElement set to true, which prevents the
# icon from appearing in the dock.
(os.path.join(libdir, 'QtWebEngineCore.framework', 'Versions', '5',\
'Helpers', 'QtWebEngineProcess.app', 'Contents', 'Info.plist'),
os.path.join('QtWebEngineProcess.app', 'Contents'))
]
|
ijat/Hotspot-PUTRA-Auto-login
|
PyInstaller-3.2/PyInstaller/hooks/hook-PyQt5.QtWebEngineWidgets.py
|
Python
|
gpl-3.0
| 2,207
|
#!/usr/bin/env python
# This file is part of nexdatas - Tango Server for NeXus data writer
#
# Copyright (C) 2012-2017 DESY, Jan Kotanski <jkotan@mail.desy.de>
#
# nexdatas is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# nexdatas is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with nexdatas. If not, see <http://www.gnu.org/licenses/>.
# \package test nexdatas
# \file FElementH5PYTest.py
# unittests for field Tags running Tango Server
#
import unittest
import os
import sys
import random
import struct
import binascii
import time
from nxswriter.Element import Element
from nxswriter.FElement import FElement
from nxswriter.Errors import XMLSettingSyntaxError
from nxstools import filewriter as FileWriter
from nxstools import h5pywriter as H5PYWriter
try:
from TstDataSource import TstDataSource
except Exception:
from .TstDataSource import TstDataSource
# if 64-bit machione
IS64BIT = (struct.calcsize("P") == 8)
if sys.version_info > (3,):
long = int
# test fixture
class FElementH5PYTest(unittest.TestCase):
# constructor
# \param methodName name of the test method
def __init__(self, methodName):
unittest.TestCase.__init__(self, methodName)
self._fname = "test.h5"
self._nxFile = None
self._eFile = None
self._tfname = "field"
self._tfname = "group"
self._fattrs = {"short_name": "test", "units": "m"}
self._gname = "testGroup"
self._gtype = "NXentry"
self._fdname = "testField"
self._fdtype = "int64"
self._bint = "int64" if IS64BIT else "int32"
self._buint = "uint64" if IS64BIT else "uint32"
self._bfloat = "float64" if IS64BIT else "float32"
try:
self.__seed = long(binascii.hexlify(os.urandom(16)), 16)
except NotImplementedError:
self.__seed = long(time.time() * 256) # use fractional seconds
self.__rnd = random.Random(self.__seed)
# test starter
# \brief Common set up
def setUp(self):
# file handle
FileWriter.writer = H5PYWriter
self._nxFile = FileWriter.create_file(
self._fname, overwrite=True).root()
# element file objects
self._group = self._nxFile.create_group(self._gname, self._gtype)
self._field = self._group.create_field(self._fdname, self._fdtype)
print("\nsetting up...")
print("SEED = %s" % self.__seed)
# test closer
# \brief Common tear down
def tearDown(self):
print("tearing down ...")
self._nxFile.close()
os.remove(self._fname)
# Exception tester
# \param exception expected exception
# \param method called method
# \param args list with method arguments
# \param kwargs dictionary with method arguments
def myAssertRaise(self, exception, method, *args, **kwargs):
try:
error = False
method(*args, **kwargs)
except Exception:
error = True
self.assertEqual(error, True)
# default constructor test
# \brief It tests default settings
def test_default_constructor(self):
print("Run: %s.test_default_constructor() " % self.__class__.__name__)
el = FElement(self._tfname, self._fattrs, None)
self.assertTrue(isinstance(el, Element))
self.assertTrue(isinstance(el, FElement))
self.assertEqual(el.tagName, self._tfname)
self.assertEqual(el.content, [])
self.assertEqual(el.doc, "")
self.assertEqual(el.source, None)
self.assertEqual(el.error, None)
self.assertEqual(el.h5Object, None)
# constructor test
# \brief It tests default settings
def test_constructor(self):
print("Run: %s.test_constructor() " % self.__class__.__name__)
el = FElement(self._tfname, self._fattrs, None)
el2 = FElement(self._tfname, self._fattrs, el, self._group)
self.assertTrue(isinstance(el2, Element))
self.assertTrue(isinstance(el2, FElement))
self.assertEqual(el2.tagName, self._tfname)
self.assertEqual(el2.content, [])
self.assertEqual(el2.doc, "")
self.assertEqual(el.source, None)
self.assertEqual(el.error, None)
self.assertEqual(el.h5Object, None)
self.assertEqual(el2.h5Object, self._group)
# store method test
# \brief It tests default settings
def test_store(self):
print("Run: %s.test_store() " % self.__class__.__name__)
el = FElement(self._tfname, self._fattrs, None, self._group)
self.assertEqual(el.tagName, self._tfname)
self.assertEqual(el.content, [])
self.assertEqual(el.doc, "")
self.assertEqual(el.store(), None)
self.assertEqual(el.store("<tag/>"), None)
# run method test
# \brief It tests run method
def test_run(self):
print("Run: %s.test_run() " % self.__class__.__name__)
el = FElement(self._tfname, self._fattrs, None, self._group)
self.assertEqual(el.tagName, self._tfname)
self.assertEqual(el.content, [])
self.assertEqual(el.doc, "")
self.assertEqual(el.run(), None)
self.assertEqual(el.source, None)
ds = TstDataSource()
el.source = ds
self.assertEqual(el.source, ds)
self.assertTrue(hasattr(el.source, "getData"))
self.assertTrue(not ds.dataTaken)
self.assertEqual(el.run(), None)
self.assertTrue(ds.dataTaken)
# run _findShape test
# \brief It tests _findShape method
def test_findShape_lengths_1d(self):
print("Run: %s.test_findShape_lengths_1d() " % self.__class__.__name__)
el = FElement(self._tfname, self._fattrs, None)
self.myAssertRaise(ValueError, el._findShape, "")
self.assertEqual(el._findShape("0"), [])
self.assertEqual(el._findShape("0", None, extraD=True), [0])
for i in range(-2, 5):
self.assertEqual(
el._findShape("0", None, extraD=True, grows=i), [0])
for i in range(-2, 5):
self.assertEqual(
el._findShape("0", None, extraD=False, grows=i), [])
self.myAssertRaise(XMLSettingSyntaxError, el._findShape, "1")
mlen = self.__rnd.randint(1, 10000)
lens = {'1': str(mlen)}
self.assertEqual(
el._findShape("1", lengths=lens, extraD=False), [mlen])
for i in range(-2, 5):
self.assertEqual(
el._findShape("1", lengths=lens, extraD=False, grows=i),
[mlen])
self.assertEqual(
el._findShape("1", lengths=lens, extraD=True), [0, mlen])
for i in range(-2, 2):
self.assertEqual(
el._findShape("1", lengths=lens, extraD=True, grows=i),
[0, mlen])
for i in range(2, 5):
self.assertEqual(
el._findShape("1", lengths=lens, extraD=True, grows=i),
[mlen, 0])
lens = {'1': str(0)}
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "1", lengths=lens,
extraD=False)
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "1", lengths=lens,
extraD=True)
mlen = self.__rnd.randint(-10000, 0)
lens = {'1': str(mlen)}
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "1", lengths=lens,
extraD=False)
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "1", lengths=lens,
extraD=True)
for i in range(-2, 5):
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "1", lengths=lens,
extraD=True, grows=i)
mlen = self.__rnd.randint(1, 1000)
lens = {'2': str(mlen)}
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "1", lengths=lens)
mlen = self.__rnd.randint(1, 1000)
lens = {'2': str(mlen)}
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "1", lengths=lens,
extraD=True)
# run _findShape test
# \brief It tests _findShape method
def test_findShape_lengths_2d(self):
print("Run: %s.test_findShape_lengths_2d() " % self.__class__.__name__)
el = FElement(self._tfname, self._fattrs, None)
self.myAssertRaise(XMLSettingSyntaxError, el._findShape, "2")
mlen = [self.__rnd.randint(1, 1000), self.__rnd.randint(1, 1000)]
lens = {'1': str(mlen[0]), '2': str(mlen[1])}
self.assertEqual(
el._findShape("2", lengths=lens, extraD=False), mlen)
for i in range(-2, 5):
self.assertEqual(
el._findShape("2", lengths=lens, extraD=False, grows=i),
mlen)
self.assertEqual(
el._findShape("2", lengths=lens, extraD=True), [0] + mlen)
for i in range(-2, 2):
self.assertEqual(
el._findShape("2", lengths=lens, extraD=True, grows=i),
[0] + mlen)
self.assertEqual(
el._findShape("2", lengths=lens, extraD=True, grows=2),
[mlen[0], 0, mlen[1]])
for i in range(3, 5):
self.assertEqual(
el._findShape("2", lengths=lens, extraD=True, grows=i),
mlen + [0])
lens = {'1': '0', '2': str(mlen[0])}
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", lengths=lens,
extraD=False)
lens = {'2': '0', '1': str(mlen[0])}
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", lengths=lens,
extraD=False)
lens = {'2': '0', '1': '0'}
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", lengths=lens,
extraD=False)
lens = {'1': '0', '2': str(mlen[0])}
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", lengths=lens,
extraD=True)
lens = {'2': '0', '1': str(mlen[0])}
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", lengths=lens,
extraD=True)
lens = {'1': '0', '2': '0'}
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", lengths=lens,
extraD=True)
nlen = [self.__rnd.randint(-1000, 0), self.__rnd.randint(-1000, 0)]
lens = {'1': str(mlen[0]), '2': str(nlen[1])}
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", lengths=lens,
extraD=False)
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", lengths=lens,
extraD=True)
for i in range(-2, 5):
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", lengths=lens,
extraD=True, grows=i)
mlen = self.__rnd.randint(1, 1000)
lens = {'2': str(mlen), '3': str(mlen)}
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", lengths=lens)
mlen = self.__rnd.randint(1, 1000)
lens = {'2': str(mlen)}
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", lengths=lens,
extraD=True)
# run _findShape test
# \brief It tests _findShape method
def test_findShape_lengths_3d(self):
print("Run: %s.test_findShape_lengths_3d() " % self.__class__.__name__)
el = FElement(self._tfname, self._fattrs, None)
self.myAssertRaise(XMLSettingSyntaxError, el._findShape, "3")
mlen = [self.__rnd.randint(1, 100), self.__rnd.randint(
1, 100), self.__rnd.randint(1, 100)]
lens = {'1': str(mlen[0]), '2': str(mlen[1]), '3': str(mlen[2])}
self.assertEqual(
el._findShape("3", lengths=lens, extraD=False), mlen)
for i in range(-2, 5):
self.assertEqual(
el._findShape("3", lengths=lens, extraD=False, grows=i), mlen)
self.assertEqual(
el._findShape("3", lengths=lens, extraD=True), [0] + mlen)
for i in range(-2, 2):
self.assertEqual(
el._findShape("3", lengths=lens, extraD=True, grows=i),
[0] + mlen)
self.assertEqual(
el._findShape("3", lengths=lens, extraD=True, grows=2),
[mlen[0], 0, mlen[1], mlen[2]])
self.assertEqual(
el._findShape("3", lengths=lens, extraD=True, grows=3),
[mlen[0], mlen[1], 0, mlen[2]])
for i in range(4, 5):
self.assertEqual(
el._findShape("3", lengths=lens, extraD=True, grows=i),
mlen + [0])
lens = {'1': '0', '2': str(mlen[0]), '3': str(mlen[1])}
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "3", lengths=lens,
extraD=False)
lens = {'2': '0', '1': str(mlen[0]), '3': str(mlen[1])}
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "3", lengths=lens,
extraD=False)
lens = {'1': '0', '2': '0', '3': str(mlen[0])}
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "3", lengths=lens,
extraD=False)
lens = {'2': '0', '3': '0', '1': str(mlen[0])}
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "3", lengths=lens,
extraD=False)
lens = {'3': '0', '1': '0', '2': str(mlen[0])}
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "3", lengths=lens,
extraD=False)
lens = {'1': '0', '2': str(mlen[0]), '3': str(mlen[1])}
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "3", lengths=lens,
extraD=True)
lens = {'2': '0', '1': str(mlen[0]), '3': str(mlen[1])}
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "3", lengths=lens,
extraD=True)
lens = {'1': '0', '2': '0', '3': str(mlen[0])}
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "3", lengths=lens,
extraD=True)
lens = {'2': '0', '3': '0', '1': str(mlen[0])}
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "3", lengths=lens,
extraD=True)
lens = {'3': '0', '1': '0', '2': str(mlen[0])}
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "3", lengths=lens,
extraD=True)
nlen = [self.__rnd.randint(-100, 0), self.__rnd.randint(-100, 0)]
lens = {'1': str(mlen[0]), '2': str(nlen[1]), '3': str(mlen[1])}
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "3", lengths=lens,
extraD=False)
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "3", lengths=lens,
extraD=True)
for i in range(-2, 5):
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "3", lengths=lens,
extraD=True, grows=i)
mlen = self.__rnd.randint(1, 1000)
lens = {'2': str(mlen), '3': str(mlen), '4': str(mlen)}
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "3", lengths=lens)
mlen = self.__rnd.randint(1, 1000)
lens = {'2': str(mlen)}
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "3", lengths=lens,
extraD=True)
# run _findShape test
# \brief It tests _findShape method
def test_findShape_ds_1d(self):
print("Run: %s.test_findShape_ds_1d() " % self.__class__.__name__)
ds = TstDataSource()
el = FElement(self._tfname, self._fattrs, None)
el.source = ds
self.assertEqual(el._findShape("0"), [])
self.assertEqual(el._findShape("0", None, extraD=True), [0])
for i in range(-2, 5):
self.assertEqual(
el._findShape("0", None, extraD=True, grows=i), [0])
for i in range(-2, 5):
self.assertEqual(
el._findShape("0", None, extraD=False, grows=i), [])
mlen = self.__rnd.randint(1, 10000)
el.source.dims = [mlen]
self.assertEqual(
el._findShape("1", extraD=False, checkData=True), [mlen])
for i in range(-2, 5):
self.assertEqual(
el._findShape("1", extraD=False, grows=i, checkData=True),
[mlen])
self.assertEqual(
el._findShape("1", extraD=True, checkData=True), [0, mlen])
for i in range(-2, 2):
self.assertEqual(
el._findShape("1", extraD=True, grows=i, checkData=True),
[0, mlen])
for i in range(2, 5):
self.assertEqual(
el._findShape("1", extraD=True, grows=i, checkData=True),
[mlen, 0])
mlen = self.__rnd.randint(1, 10000)
el.source.dims = [mlen]
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "1", extraD=False)
for i in range(-2, 5):
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "1", extraD=False,
grows=i)
for i in range(-2, 5):
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "1", extraD=True)
for i in range(-2, 2):
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "1", extraD=True,
grows=i)
for i in range(2, 5):
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "1", extraD=True,
grows=i)
el.source.dims = [0]
self.assertEqual(el._findShape("1", checkData=True), [])
el.source.dims = [0]
self.myAssertRaise(XMLSettingSyntaxError, el._findShape, "1")
el.source.numpy = False
mlen = self.__rnd.randint(1, 10000)
el.source.dims = [mlen]
self.assertEqual(
el._findShape("1", extraD=False, checkData=True), [mlen])
for i in range(-2, 5):
self.assertEqual(
el._findShape("1", extraD=False, grows=i, checkData=True),
[mlen])
self.assertEqual(
el._findShape("1", extraD=True, checkData=True), [0, mlen])
for i in range(-2, 2):
self.assertEqual(
el._findShape("1", extraD=True, grows=i, checkData=True),
[0, mlen])
for i in range(2, 5):
self.assertEqual(
el._findShape("1", extraD=True, grows=i, checkData=True),
[mlen, 0])
el.source.dims = [0]
self.assertEqual(el._findShape("1", checkData=True), [])
mlen = self.__rnd.randint(1, 10000)
el.source.dims = [mlen]
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "1", extraD=False)
for i in range(-2, 5):
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "1", extraD=False,
grows=i)
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "1", extraD=True)
for i in range(-2, 2):
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "1", extraD=True,
grows=i)
for i in range(2, 5):
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "1", extraD=True,
grows=i)
el.source.dims = [0]
self.myAssertRaise(XMLSettingSyntaxError, el._findShape, "1")
# run _findShape test
# \brief It tests _findShape method
def test_findShape_ds_2d(self):
print("Run: %s.test_findShape_ds_2d() " % self.__class__.__name__)
ds = TstDataSource()
el = FElement(self._tfname, self._fattrs, None)
el.source = ds
el.source.numpy = True
mlen = [self.__rnd.randint(2, 1000), self.__rnd.randint(2, 1000)]
el.source.dims = mlen
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", extraD=False)
for i in range(-2, 5):
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", extraD=False,
grows=i)
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", extraD=True)
for i in range(-2, 2):
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", extraD=True,
grows=i)
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", extraD=True, grows=2)
for i in range(3, 5):
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", extraD=True,
grows=i)
el.source.numpy = False
mlen = [self.__rnd.randint(2, 1000), self.__rnd.randint(2, 1000)]
el.source.dims = mlen
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", extraD=False)
for i in range(-2, 5):
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", extraD=False,
grows=i)
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", extraD=True)
for i in range(-2, 2):
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", extraD=True,
grows=i)
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", extraD=True, grows=2)
for i in range(3, 5):
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", extraD=True,
grows=i)
# run _findShape test
# \brief It tests _findShape method
def test_findShape_ds_2d_checkData(self):
print("Run: %s.test_findShape_ds_2d() " % self.__class__.__name__)
ds = TstDataSource()
el = FElement(self._tfname, self._fattrs, None)
el.source = ds
el.source.numpy = True
mlen = [self.__rnd.randint(2, 1000), self.__rnd.randint(2, 1000)]
el.source.dims = mlen
self.assertEqual(
el._findShape("2", extraD=False, checkData=True), mlen)
for i in range(-2, 5):
self.assertEqual(
el._findShape("2", extraD=False, grows=i, checkData=True),
mlen)
self.assertEqual(
el._findShape("2", extraD=True, checkData=True), [0] + mlen)
for i in range(-2, 2):
self.assertEqual(
el._findShape("2", extraD=True, grows=i, checkData=True),
[0] + mlen)
self.assertEqual(
el._findShape("2", extraD=True, grows=2, checkData=True),
[mlen[0], 0, mlen[1]])
for i in range(3, 5):
self.assertEqual(
el._findShape("2", extraD=True, grows=i, checkData=True),
mlen + [0])
el.source.numpy = False
mlen = [self.__rnd.randint(2, 1000), self.__rnd.randint(2, 1000)]
el.source.dims = mlen
self.assertEqual(
el._findShape("2", extraD=False, checkData=True), mlen)
for i in range(-2, 5):
self.assertEqual(
el._findShape("2", extraD=False, grows=i, checkData=True),
mlen)
self.assertEqual(
el._findShape("2", extraD=True, checkData=True), [0] + mlen)
for i in range(-2, 2):
self.assertEqual(
el._findShape("2", extraD=True, grows=i, checkData=True),
[0] + mlen)
self.assertEqual(
el._findShape("2", extraD=True, grows=2, checkData=True),
[mlen[0], 0, mlen[1]])
for i in range(3, 5):
self.assertEqual(
el._findShape("2", extraD=True, grows=i, checkData=True),
mlen + [0])
# run _findShape test
# \brief It tests _findShape method
def test_findShape_ds_2d_ext_checkData(self):
print("Run: %s.test_findShape_ds_2d() " % self.__class__.__name__)
ds = TstDataSource()
el = FElement(self._tfname, self._fattrs, None)
el.source = ds
el.source.numpy = True
mlen = [self.__rnd.randint(1, 2), self.__rnd.randint(1, 2)]
el.source.dims = mlen
self.assertEqual(
el._findShape("2", extraD=False, extends=True, checkData=True),
mlen)
for i in range(-2, 5):
self.assertEqual(
el._findShape("2", extraD=False, grows=i, extends=True,
checkData=True), mlen)
self.assertEqual(
el._findShape("2", extraD=True, extends=True, checkData=True),
[0] + mlen)
for i in range(-2, 2):
self.assertEqual(
el._findShape("2", extraD=True, grows=i, extends=True,
checkData=True), [0] + mlen)
self.assertEqual(
el._findShape("2", extraD=True, grows=2, extends=True,
checkData=True), [mlen[0], 0, mlen[1]])
for i in range(3, 5):
self.assertEqual(
el._findShape("2", extraD=True, grows=i, extends=True,
checkData=True), mlen + [0])
el.source.numpy = False
mlen = [self.__rnd.randint(2, 1000), self.__rnd.randint(2, 1000)]
el.source.dims = mlen
self.assertEqual(
el._findShape("2", extraD=False, extends=True, checkData=True),
mlen)
for i in range(-2, 5):
self.assertEqual(
el._findShape("2", extraD=False, grows=i, extends=True,
checkData=True), mlen)
self.assertEqual(
el._findShape("2", extraD=True, extends=True, checkData=True),
[0] + mlen)
for i in range(-2, 2):
self.assertEqual(
el._findShape("2", extraD=True, grows=i, extends=True,
checkData=True), [0] + mlen)
self.assertEqual(
el._findShape("2", extraD=True, grows=2, extends=True,
checkData=True), [mlen[0], 0, mlen[1]])
for i in range(3, 5):
self.assertEqual(
el._findShape("2", extraD=True, grows=i, extends=True,
checkData=True), mlen + [0])
# run _findShape test
# \brief It tests _findShape method
def test_findShape_ds_2d_extends_checkData(self):
print("Run: %s.test_findShape_ds_2d() " % self.__class__.__name__)
ds = TstDataSource()
el = FElement(self._tfname, self._fattrs, None)
el.source = ds
el.source.numpy = True
mlen = [self.__rnd.randint(2, 1000), 1]
el.source.dims = mlen
self.assertEqual(
el._findShape("2", extraD=False, checkData=True), [mlen[0], 1])
for i in range(-2, 5):
self.assertEqual(
el._findShape("2", extraD=False, grows=i, checkData=True),
[mlen[0], 1])
self.assertEqual(
el._findShape("2", extraD=True, checkData=True),
[0] + [mlen[0]] + [1])
for i in range(-2, 2):
self.assertEqual(
el._findShape("2", extraD=True, grows=i, checkData=True),
[0] + [mlen[0]] + [1])
self.assertEqual(
el._findShape("2", extraD=True, grows=2, checkData=True),
[mlen[0], 0, 1])
for i in range(3, 5):
self.assertEqual(
el._findShape("2", extraD=True, grows=i, checkData=True),
[mlen[0], 1, 0])
el.source.numpy = False
mlen = [self.__rnd.randint(2, 1000), 1]
el.source.dims = mlen
self.assertEqual(
el._findShape("2", extraD=False, checkData=True), [mlen[0], 1])
for i in range(-2, 5):
self.assertEqual(
el._findShape("2", extraD=False, grows=i, checkData=True),
[mlen[0], 1])
self.assertEqual(
el._findShape("2", extraD=True, checkData=True),
[0] + [mlen[0]] + [1])
for i in range(-2, 2):
self.assertEqual(
el._findShape("2", extraD=True, grows=i, checkData=True),
[0] + [mlen[0]] + [1])
self.assertEqual(
el._findShape("2", extraD=True, grows=2, checkData=True),
[mlen[0], 0, 1])
for i in range(3, 5):
self.assertEqual(
el._findShape("2", extraD=True, grows=i, checkData=True),
[mlen[0]] + [1, 0])
# run _findShape test
# \brief It tests _findShape method
def test_findShape_ds_2d_extends2_checkData(self):
print("Run: %s.test_findShape_ds_2d() " % self.__class__.__name__)
ds = TstDataSource()
el = FElement(self._tfname, self._fattrs, None)
el.source = ds
el.source.numpy = True
mlen = [1, self.__rnd.randint(2, 1000)]
el.source.dims = mlen
self.assertEqual(
el._findShape("2", extraD=False, checkData=True),
[1, mlen[1]])
for i in range(-2, 5):
self.assertEqual(
el._findShape("2", extraD=False, grows=i, checkData=True),
[1, mlen[1]])
self.assertEqual(
el._findShape("2", extraD=True, checkData=True),
[0, 1] + [mlen[1]])
for i in range(-2, 2):
self.assertEqual(
el._findShape("2", extraD=True, grows=i, checkData=True),
[0, 1] + [mlen[1]])
self.assertEqual(
el._findShape("2", extraD=True, grows=2, checkData=True),
[1, 0, mlen[1]])
for i in range(3, 5):
self.assertEqual(
el._findShape("2", extraD=True, grows=i, checkData=True),
[1] + [mlen[1]] + [0])
el.source.numpy = False
mlen = [1, self.__rnd.randint(2, 1000)]
el.source.dims = mlen
self.assertEqual(
el._findShape("2", extraD=False, checkData=True), [1, mlen[1]])
for i in range(-2, 5):
self.assertEqual(
el._findShape("2", extraD=False, grows=i, checkData=True),
[1, mlen[1]])
self.assertEqual(
el._findShape("2", extraD=True, checkData=True),
[0, 1] + [mlen[1]])
for i in range(-2, 2):
self.assertEqual(
el._findShape("2", extraD=True, grows=i, checkData=True),
[0, 1] + [mlen[1]])
self.assertEqual(
el._findShape("2", extraD=True, grows=2, checkData=True),
[1, 0, mlen[1]])
for i in range(3, 5):
self.assertEqual(
el._findShape("2", extraD=True, grows=i, checkData=True),
[1] + [mlen[1]] + [0])
# run _findShape test
# \brief It tests _findShape method
def test_findShape_ds_2d_extends3_checkData(self):
print("Run: %s.test_findShape_ds_2d() " % self.__class__.__name__)
ds = TstDataSource()
el = FElement(self._tfname, self._fattrs, None)
el.source = ds
el.source.numpy = True
mlen = [1, 1]
el.source.dims = mlen
self.assertEqual(
el._findShape("2", extraD=False, checkData=True), [1, 1])
for i in range(-2, 5):
self.assertEqual(
el._findShape("2", extraD=False, grows=i, checkData=True),
[1, 1])
self.assertEqual(
el._findShape("2", extraD=True, checkData=True), [0] + [1, 1])
for i in range(-2, 2):
self.assertEqual(
el._findShape("2", extraD=True, grows=i, checkData=True),
[0] + [1, 1])
self.assertEqual(
el._findShape("2", extraD=True, grows=2, checkData=True),
[1, 0, 1])
for i in range(3, 5):
self.assertEqual(
el._findShape("2", extraD=True, grows=i, checkData=True),
[1, 1] + [0])
el.source.numpy = False
mlen = [1, 1]
el.source.dims = mlen
self.assertEqual(
el._findShape("2", extraD=False, checkData=True),
[1, 1])
for i in range(-2, 5):
self.assertEqual(
el._findShape("2", extraD=False, grows=i, checkData=True),
[1, 1])
self.assertEqual(
el._findShape("2", extraD=True, checkData=True),
[0] + [1, 1])
for i in range(-2, 2):
self.assertEqual(
el._findShape("2", extraD=True, grows=i, checkData=True),
[0] + [1, 1])
self.assertEqual(
el._findShape("2", extraD=True, grows=2, checkData=True),
[1, 0, 1])
for i in range(3, 5):
self.assertEqual(
el._findShape("2", extraD=True, grows=i, checkData=True),
[1, 1] + [0])
# run _findShape test
# \brief It tests _findShape method
def test_findShape_ds_2d_ext(self):
print("Run: %s.test_findShape_ds_2d() " % self.__class__.__name__)
ds = TstDataSource()
el = FElement(self._tfname, self._fattrs, None)
el.source = ds
el.source.numpy = True
mlen = [self.__rnd.randint(1, 2), self.__rnd.randint(1, 2)]
el.source.dims = mlen
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", extraD=False,
extends=True)
for i in range(-2, 5):
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", extraD=False,
grows=i, extends=True)
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", extraD=True,
extends=True)
for i in range(-2, 2):
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", extraD=True,
grows=i, extends=True)
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", extraD=True, grows=2,
extends=True)
for i in range(3, 5):
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", extraD=True,
grows=i, extends=True)
el.source.numpy = False
mlen = [self.__rnd.randint(2, 1000), self.__rnd.randint(2, 1000)]
el.source.dims = mlen
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", extraD=False,
extends=True)
for i in range(-2, 5):
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", extraD=False,
grows=i, extends=True)
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", extraD=True,
extends=True)
for i in range(-2, 2):
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", extraD=True,
grows=i, extends=True)
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", extraD=True, grows=2,
extends=True)
for i in range(3, 5):
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", extraD=True,
grows=i, extends=True)
# run _findShape test
# \brief It tests _findShape method
def test_findShape_ds_2d_extends(self):
print("Run: %s.test_findShape_ds_2d() " % self.__class__.__name__)
ds = TstDataSource()
el = FElement(self._tfname, self._fattrs, None)
el.source = ds
el.source.numpy = True
mlen = [self.__rnd.randint(2, 1000), 1]
el.source.dims = mlen
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", extraD=False)
for i in range(-2, 5):
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", extraD=False,
grows=i)
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", extraD=True)
for i in range(-2, 2):
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", extraD=True,
grows=i)
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", extraD=True, grows=2)
for i in range(3, 5):
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", extraD=True,
grows=i)
el.source.numpy = False
mlen = [self.__rnd.randint(2, 1000), 1]
el.source.dims = mlen
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", extraD=False)
for i in range(-2, 5):
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", extraD=False,
grows=i)
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", extraD=True)
for i in range(-2, 2):
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", extraD=True,
grows=i)
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", extraD=True, grows=2)
for i in range(3, 5):
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", extraD=True,
grows=i)
# run _findShape test
# \brief It tests _findShape method
def test_findShape_ds_2d_extends2(self):
print("Run: %s.test_findShape_ds_2d() " % self.__class__.__name__)
ds = TstDataSource()
el = FElement(self._tfname, self._fattrs, None)
el.source = ds
el.source.numpy = True
mlen = [1, self.__rnd.randint(2, 1000)]
el.source.dims = mlen
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", extraD=False)
for i in range(-2, 5):
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", extraD=False,
grows=i)
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", extraD=True)
for i in range(-2, 2):
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", extraD=True,
grows=i)
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", extraD=True, grows=2)
for i in range(3, 5):
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", extraD=True,
grows=i)
el.source.numpy = False
mlen = [1, self.__rnd.randint(2, 1000)]
el.source.dims = mlen
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", extraD=False)
for i in range(-2, 5):
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", extraD=False,
grows=i)
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", extraD=True)
for i in range(-2, 2):
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", extraD=True,
grows=i)
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", extraD=True, grows=2)
for i in range(3, 5):
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", extraD=True,
grows=i)
# run _findShape test
# \brief It tests _findShape method
def test_findShape_ds_2d_extends3(self):
print("Run: %s.test_findShape_ds_2d() " % self.__class__.__name__)
ds = TstDataSource()
el = FElement(self._tfname, self._fattrs, None)
el.source = ds
el.source.numpy = True
mlen = [1, 1]
el.source.dims = mlen
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", extraD=False)
for i in range(-2, 5):
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", extraD=False,
grows=i)
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", extraD=True)
for i in range(-2, 2):
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", extraD=True,
grows=i)
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", extraD=True, grows=2)
for i in range(3, 5):
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", extraD=True,
grows=i)
el.source.numpy = False
mlen = [1, 1]
el.source.dims = mlen
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", extraD=False)
for i in range(-2, 5):
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", extraD=False,
grows=i)
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", extraD=True)
for i in range(-2, 2):
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", extraD=True,
grows=i)
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", extraD=True, grows=2)
for i in range(3, 5):
self.myAssertRaise(
XMLSettingSyntaxError, el._findShape, "2", extraD=True,
grows=i)
# run _findShape test
# \brief It tests _findShape method
def test_findShape_xml(self):
print("Run: %s.test_findShape_xml() " % self.__class__.__name__)
el = FElement(self._tfname, self._fattrs, None)
el.content = ["123"]
self.assertEqual(el._findShape("0"), [])
self.assertEqual(el._findShape("0", None, extraD=True), [0])
for i in range(-2, 5):
self.assertEqual(
el._findShape("0", None, extraD=True, grows=i), [0])
for i in range(-2, 5):
self.assertEqual(
el._findShape("0", None, extraD=False, grows=i), [])
mlen = self.__rnd.randint(1, 10000)
el.content = ["123 " * mlen]
self.assertEqual(el._findShape("1", extraD=False), [mlen])
for i in range(-2, 5):
self.assertEqual(
el._findShape("1", extraD=False, grows=i), [mlen])
self.assertEqual(el._findShape("1", extraD=True), [mlen])
for i in range(-2, 5):
self.assertEqual(
el._findShape("1", extraD=True, grows=i), [mlen])
mlen = [self.__rnd.randint(1, 1000), self.__rnd.randint(1, 1000)]
el.content = ["123 " * mlen[1] + "\n "] * mlen[0]
self.assertEqual(el._findShape("2", extraD=False), mlen)
for i in range(-2, 5):
self.assertEqual(
el._findShape("2", extraD=False, grows=i), mlen)
self.assertEqual(el._findShape("2", extraD=True), mlen)
for i in range(-2, 5):
self.assertEqual(el._findShape("2", extraD=True, grows=i), mlen)
# run setMessage test
# \brief It tests setMessage method
def test_setMessage(self):
print("Run: %s.test_setMessage() " % self.__class__.__name__)
message = "My Exception"
text = "Data for %s not found. DATASOURCE:%s"
uob = "unnamed object"
uds = "unknown datasource"
ds = TstDataSource()
el = FElement(self._tfname, self._fattrs, None)
self.assertEqual(
el.setMessage(), (text % (uob, uds), None))
self.assertEqual(
el.setMessage(message), (text % (uob, uds), message))
el.source = ds
self.assertEqual(
el.setMessage(), (text % (uob, str(ds)), None))
self.assertEqual(
el.setMessage(message), (text % (uob, str(ds)), message))
el2 = FElement(self._tfname, self._fattrs, el, self._group)
self.assertEqual(
el2.setMessage(),
(text % ("/" + self._group.name + ":NXentry", uds),
None))
self.assertEqual(
el2.setMessage(message),
(text % ("/" + self._group.name + ":NXentry", uds),
message))
el2.source = ds
self.assertEqual(
el2.setMessage(),
(text % ("/" + self._group.name + ":NXentry", str(ds)),
None))
self.assertEqual(
el2.setMessage(message),
(text % ("/" + self._group.name + ":NXentry", str(ds)),
message))
if __name__ == '__main__':
unittest.main()
|
nexdatas/writer
|
test/FElementH5PY_test.py
|
Python
|
gpl-3.0
| 45,164
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013, John McNamara, jmcnamara@cpan.org
#
import unittest
import os
from ...workbook import Workbook
from ..helperfunctions import _compare_xlsx_files
class TestCompareXLSXFiles(unittest.TestCase):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'image13.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.image_dir = test_dir + 'images/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
filename = self.got_filename
####################################################
workbook = Workbook(filename)
worksheet = workbook.add_worksheet()
worksheet.set_row(1, 75)
worksheet.set_column('C:C', 32)
worksheet.insert_image('C2', self.image_dir + 'logo.png', {'x_offset': 8, 'y_offset': 5})
workbook.close()
####################################################
got, exp = _compare_xlsx_files(self.got_filename,
self.exp_filename,
self.ignore_files,
self.ignore_elements)
self.assertEqual(got, exp)
def tearDown(self):
# Cleanup.
if os.path.exists(self.got_filename):
os.remove(self.got_filename)
if __name__ == '__main__':
unittest.main()
|
ivmech/iviny-scope
|
lib/xlsxwriter/test/comparison/test_image13.py
|
Python
|
gpl-3.0
| 1,787
|
from django.db import models
from django.contrib.auth.models import User
class Website(models.Model):
user = models.ForeignKey(User, related_name='websites')
name = models.CharField(null=True, max_length=100, unique=True)
url = models.CharField(unique=True, max_length=200, db_index=True)
embed_src = models.CharField(max_length=200, blank=True)
def save(self, *args, **kwargs):
pages = self.pages.all()
for page in pages:
relative_url = page.url[page.url.find('/'):]
page.url = self.url + relative_url
page.save()
super(Website, self).save(*args, **kwargs)
class Page(models.Model):
name = models.CharField(null=True, max_length=100)
url = models.CharField(max_length=200, db_index=True)
website = models.ForeignKey(Website, related_name='pages')
extractions_file = models.CharField(max_length=200, blank=True)
def save(self, *args, **kwargs):
relative_url = self.url[self.url.find('/'):]
self.url = self.website.url + relative_url
super(Page, self).save(*args, **kwargs)
|
dash1291/major
|
webserver/conceptual/webapp/models.py
|
Python
|
gpl-3.0
| 1,105
|
"""
Copyright 2013 Steven Diamond
This file is part of CVXPY.
CVXPY is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CVXPY is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CVXPY. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
import abc
import cvxpy.utilities as u
import cvxpy.lin_ops.lin_utils as lu
from cvxpy.atoms.atom import Atom
import canonInterface
import scipy.sparse as sp
if sys.version_info >= (3, 0):
from functools import reduce
class AffAtom(Atom):
""" Abstract base class for affine atoms. """
__metaclass__ = abc.ABCMeta
def sign_from_args(self):
"""By default, the sign is the most general of all the argument signs.
"""
return u.sign.sum_signs([arg for arg in self.args])
def is_atom_convex(self):
"""Is the atom convex?
"""
return True
def is_atom_concave(self):
"""Is the atom concave?
"""
return True
def is_incr(self, idx):
"""Is the composition non-decreasing in argument idx?
"""
# Defaults to increasing.
return True
def is_decr(self, idx):
"""Is the composition non-increasing in argument idx?
"""
# Defaults to increasing.
return False
def is_quadratic(self):
return all([arg.is_quadratic() for arg in self.args])
def _grad(self, values):
"""Gives the (sub/super)gradient of the atom w.r.t. each argument.
Matrix expressions are vectorized, so the gradient is a matrix.
Args:
values: A list of numeric values for the arguments.
Returns:
A list of SciPy CSC sparse matrices or None.
"""
# TODO should be a simple function in CVXcanon for this.
# Make a fake lin op tree for the function.
fake_args = []
var_offsets = {}
offset = 0
for idx, arg in enumerate(self.args):
if arg.is_constant():
fake_args += [lu.create_const(arg.value, arg.size)]
else:
fake_args += [lu.create_var(arg.size, idx)]
var_offsets[idx] = offset
offset += arg.size[0]*arg.size[1]
fake_expr, _ = self.graph_implementation(fake_args, self.size,
self.get_data())
# Get the matrix representation of the function.
V, I, J, _ = canonInterface.get_problem_matrix(
[lu.create_eq(fake_expr)],
var_offsets,
None
)
shape = (offset, self.size[0]*self.size[1])
stacked_grad = sp.coo_matrix((V, (J, I)), shape=shape).tocsc()
# Break up into per argument matrices.
grad_list = []
start = 0
for arg in self.args:
if arg.is_constant():
grad_shape = (arg.size[0]*arg.size[1], shape[1])
if grad_shape == (1, 1):
grad_list += [0]
else:
grad_list += [sp.coo_matrix(grad_shape, dtype='float64')]
else:
stop = start + arg.size[0]*arg.size[1]
grad_list += [stacked_grad[start:stop,:]]
start = stop
return grad_list
|
mwytock/cvxpy
|
cvxpy/atoms/affine/affine_atom.py
|
Python
|
gpl-3.0
| 3,669
|
# import vim
normal = lambda s: vim().command('normal %s' % s)
normal_silent = lambda s: vim().command('silent! normal %s' % s)
def vim():
""" call Vim.
This is wrapped so that it can easily be mocked.
"""
import vim
return vim
def _goto_window_for_buffer(expr):
""" Moves the cursor to the first window associated with buffer b in the
current tab page (only).
Arguments
---------
expr : int or str
The target buffer - either a buffer number (int) or a file-pattern
(str). See :h bufwinnr for a more detailed description.
"""
if not isinstance(expr, int) and not isinstance(expr, str):
raise TypeError('b has invalid type, str or int expected.')
if isinstance(expr, str):
expr = "'{0}'".format(expr)
winnr = int(vim().eval('bufwinnr({0})'.format(expr)))
assert winnr != -1
vim().command('%dwincmd w' % int(winnr))
# Rendering utility functions
def _output_preview_text(lines):
""" Output a list of lines to the mundo preview window. """
_goto_window_for_buffer('__Mundo_Preview__')
vim().command('setlocal modifiable')
vim().current.buffer[:] = [line.rstrip() for line in lines]
vim().command('setlocal nomodifiable')
def _undo_to(n):
n = int(n)
if n == 0:
vim().command('silent earlier %s' % (int(vim().eval('&undolevels')) + 1))
else:
vim().command('silent undo %d' % int(n))
|
SpaceVim/SpaceVim
|
bundle/vim-mundo/autoload/mundo/util.py
|
Python
|
gpl-3.0
| 1,462
|
#
# Copyright (C) 2012 Uninett AS
#
# This file is part of Network Administration Visualized (NAV).
#
# NAV is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 3 as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details. You should have received a copy of the GNU General Public
# License along with NAV. If not, see <http://www.gnu.org/licenses/>.
#
"eventengine daemon interface"
import sys
import os
import logging
from optparse import OptionParser
import signal
from nav import buildconf
import nav.daemon
from nav.eventengine.engine import EventEngine
import nav.logs
from nav.config import NAV_CONFIG
PIDFILE = 'eventengine.pid'
LOGFILE = os.path.join(NAV_CONFIG['LOG_DIR'], 'eventengine.log')
_logger = logging.getLogger(__name__)
def main():
"main execution entry"
options, _args = parse_options()
nav.logs.init_stderr_logging()
exit_if_already_running()
if not options.foreground:
daemonize()
else:
nav.daemon.writepidfile(PIDFILE)
start_engine()
def parse_options():
"Parses the program's command line options"
parser = make_option_parser()
options, args = parser.parse_args()
return options, args
def make_option_parser():
"Makes an OptionParser instance for the program"
parser = OptionParser(
version="NAV " + buildconf.VERSION,
epilog=(
"This program monitors NAV's event queue and decides which "
"actions to take when events occur"
),
)
opt = parser.add_option
opt(
"-f",
"--foreground",
action="store_true",
dest="foreground",
help="run in foreground instead of daemonizing",
)
return parser
def exit_if_already_running():
"Exits the process if another eventengine process is already running"
try:
nav.daemon.justme(PIDFILE)
except nav.daemon.DaemonError as error:
_logger.error(error)
sys.exit(1)
def daemonize():
"Daemonizes the program"
try:
nav.daemon.daemonize(PIDFILE, stderr=open(LOGFILE, "a"))
except nav.daemon.DaemonError as error:
_logger.fatal(error)
sys.exit(1)
install_signal_handlers()
def install_signal_handlers():
"""Installs signal handlers"""
signal.signal(signal.SIGTERM, sigterm_handler)
signal.signal(signal.SIGHUP, sighup_handler)
def sigterm_handler(signum, _frame):
"""Logs the imminent shutdown"""
_logger.info(
"--- %s received: shutting down eventengine ---", nav.daemon.signame(signum)
)
sys.exit(0)
def sighup_handler(_signum, _frame):
"""Reopens log files."""
_logger.info("SIGHUP received; reopening log files")
nav.logs.reopen_log_files()
nav.daemon.redirect_std_fds(stderr=open(LOGFILE, "a"))
nav.logs.reset_log_levels()
nav.logs.set_log_config()
_logger.info("Log files reopened, log levels reloaded.")
def start_engine():
"Starts event queue processing"
engine = EventEngine()
engine.start()
if __name__ == '__main__':
main()
|
hmpf/nav
|
python/nav/eventengine/daemon.py
|
Python
|
gpl-3.0
| 3,336
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#This file is part of pydsl.
#
#pydsl is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#pydsl is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with pydsl. If not, see <http://www.gnu.org/licenses/>.
__author__ = "Nestor Arocha"
__copyright__ = "Copyright 2008-2014, Nestor Arocha"
__email__ = "nesaro@gmail.com"
import unittest
from pydsl.grammar import String
from pydsl.grammar.parsley import ParsleyGrammar
from pydsl.grammar.PEG import OneOrMore, Choice
from pydsl.translator import ParsleyTranslator
class TestBinaryAlphabet(unittest.TestCase):
def test_binaryAlphabet(self):
binary_alphabet = Choice([String('0'), String('1')])
binary_number = OneOrMore(binary_alphabet)
parsley_grammar = ParsleyGrammar("""digit = anything:x ?(x in '01')
number = <digit+>:ds -> int(ds)
expr = number:left ( '+' number:right -> left + right
| -> left)""", "expr")
binary_addition = ParsleyTranslator(parsley_grammar)
self.assertEqual(binary_addition('01+10'), 11)
|
nesaro/pydsl
|
tests/functional/test_Binary.py
|
Python
|
gpl-3.0
| 1,500
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from twosampyle.plot_utils import plot_hist
class ChiSquaredTest():
def __init__(self, observed=None, expected=None):
self.observed = observed
self.expected = expected
@staticmethod
def chi_squared_statistic(o,e):
""""""
o = np.array(o)
e = np.array(e) + 1e-6 # add so we don't get 0-division error
return np.sum(1.0*(o - e)**2 / e)
def plot_dsn(self, k=100):
simulated_stats = self.simPermDsn(k=k)
tit = "Chi-Squared Test Statistic Distribution for k={} Simulations".format(k)
plot_hist(simulated_stats, title=tit, xlabel="Test Statistic Value", ylabel="Frequency")
def testStat(self, input_observed=None, input_expected=None):
if input_observed or input_expected:
chisq_teststat = ChiSquaredTest.chi_squared_statistic(input_observed, input_expected)
else:
chisq_teststat = ChiSquaredTest.chi_squared_statistic(self.observed, self.expected)
return chisq_teststat
def simPermDsn(self, input_observed=None, input_expected=None, k=100):
if input_observed or input_expected:
test_stat = self.testStat(input_observed, input_expected)
n = len(input_observed)
else:
test_stat = self.testStat()
n = len(self.observed)
# create sampling distribution
chisqrd_vals = []
for i in range(k):
values = np.random.random((n,))
ex = 1.0*n/2
values[values<.5]=0
values[values>=.5]=1
diff1 = ChiSquaredTest.chi_squared_statistic(sum(values==0),ex)
diff2 = ChiSquaredTest.chi_squared_statistic(sum(values==1),ex)
chisqrd_vals.append(diff1+diff2)
return chisqrd_vals
def pvalue(self, input_observed=None, input_expected=None):
current_stat = self.testStat(input_observed, input_expected)
simulated_stats = self.simPermDsn(input_observed, input_expected)
# p-value = proportion of test stats greater than ours
p_value = 1.0*sum(simulated_stats >= current_stat) / len(simulated_stats)
return p_value
|
jwilber/twosampyle
|
twosampyle/permchisquare.py
|
Python
|
gpl-3.0
| 2,315
|
#
# Copyright 2009 Eigenlabs Ltd. http://www.eigenlabs.com
#
# This file is part of EigenD.
#
# EigenD is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# EigenD is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EigenD. If not, see <http://www.gnu.org/licenses/>.
#
import wx
import picross
import piw
import random
import optparse
import sys
from pi import atom,action,node,domain,logic,bundles,resource,version,agent
from pisession import gui
from pi.logic.shortcuts import T
from pigui import fonts, language
from app_commander import mainframe,history,command,dictionary,upgrade
class ScrollerDelegate(piw.scrolldelegate):
def __init__(self,scrollfunc,tapfunc):
self.__scrollfunc = scrollfunc
self.__tapfunc=tapfunc
piw.scrolldelegate.__init__(self)
def scroll(self,h,v):
if self.__scrollfunc:
self.__scrollfunc(h,v)
def tap(self):
if self.__tapfunc:
self.__tapfunc()
class Scroller(piw.scroller):
def __init__(self,func1,func2):
self.__delegate = ScrollerDelegate(func1,func2)
piw.scroller.__init__(self,self.__delegate,0.5,0.5,100)
class ViewManager(agent.Agent):
def __init__(self,name):
agent.Agent.__init__(self,signature=upgrade.Signature(),volatile=True,names='eigencommander',ordinal=1)
self.name = name
self.node = random.randrange(0, 1<<48L) | 0x010000000000L
self.langModel=gui.call_bg_sync(self.__create_lang)
self.historyModel=history.HistoryModel(self.langModel)
self.commandModel=command.CommandModel(self.langModel)
self.cdomain = piw.clockdomain_ctl()
self.cdomain.set_source(piw.makestring('*',0))
self.scroller1 = Scroller(self.__scroll1,self.__tap1) # scrolls the dictionary pane
self.scroller2 = Scroller(self.__scroll2,self.__tap2) # scrolls the history pane
self.scroller3 = Scroller(self.__scroll3,self.__tap3) # empty
self.scroller_in1 = bundles.VectorInput(self.scroller1.cookie(),self.cdomain,signals=(1,2,3))
self.scroller_in2 = bundles.VectorInput(self.scroller2.cookie(),self.cdomain,signals=(1,2,3))
self.scroller_in3 = bundles.VectorInput(self.scroller3.cookie(),self.cdomain,signals=(1,2,3))
nudge=(T('inc',0.1),)
self[1] = atom.Atom(domain=domain.BoundedFloat(-1,1,hints=nudge), init=0, names='sideways nudger', ordinal=1, policy=self.scroller_in1.vector_policy(1,False))
#self[2] = atom.Atom(domain=domain.BoundedFloat(-1,1,hints=nudge), init=0, names='vertical nudger', ordinal=1, policy=self.scroller_in1.vector_policy(2,False),rtransient=True)
self[2] = atom.Atom(domain=domain.BoundedFloat(-1,1,hints=nudge), init=0, names='vertical nudger', ordinal=1, policy=self.scroller_in1.vector_policy(2,False))
self[3] = atom.Atom(domain=domain.BoundedFloat(-1,1,hints=nudge), init=0, names='sideways nudger', ordinal=2, policy=self.scroller_in2.vector_policy(1,False))
self[4] = atom.Atom(domain=domain.BoundedFloat(-1,1,hints=nudge), init=0, names='vertical nudger', ordinal=2, policy=self.scroller_in2.vector_policy(2,False))
#self[8]=browse.BrowseAgent(self,browse.getName())
self[9]=dictionary.DictionaryAgent(self,dictionary.getName())
# self.add_verb2(1,'browse([],None,role(None,[proto(browse),singular]))',callback=self.__do_browse)
self.add_verb2(2,'minimise([],None)',callback=self.__minimise)
self.add_verb2(3,'maximise([],None)',callback=self.__maximise)
self.font=wx.SystemSettings_GetFont(wx.SYS_DEFAULT_GUI_FONT)
self.font.SetPointSize(fonts.DEFAULT_PTS)
self[12]=atom.Atom(domain=domain.BoundedInt(5,20,rest=11),names='text',protocols='nostage',policy=atom.default_policy(self.__set_fontsize))
self[15] = atom.Atom(domain=domain.BoundedFloat(-1,1,hints=nudge), init=-1, names='sideways nudger', ordinal=3, policy=self.scroller_in3.vector_policy(1,False))
self[16] = atom.Atom(domain=domain.BoundedFloat(-1,1,hints=nudge), init=-1, names='vertical nudger', ordinal=3, policy=self.scroller_in3.vector_policy(2,False))
self[17] = atom.Atom(domain=domain.BoundedFloat(0,1), init=0, names='tapper', ordinal=1, policy=self.scroller_in1.vector_policy(3,False))
self[18] = atom.Atom(domain=domain.BoundedFloat(0,1), init=0, names='tapper', ordinal=2, policy=self.scroller_in2.vector_policy(3,False))
self[19] = atom.Atom(domain=domain.BoundedFloat(0,1), init=0, names='tapper', ordinal=3, policy=self.scroller_in3.vector_policy(3,False))
self.__size=node.Server(change=self.__size_changed)
self.set_private(node.Server())
self.get_private()[1]=self.__size
self.__x=100
self.__y=100
self.__setup_size()
self.__minX=60
self.__minY=80
self.__maxX=100
self.__maxY=100
self.__rootFrame=None
self.__createRootFrame()
self.scroller1.reset(1,-1)
# self.scroller3.reset(-1,-1)
self.scroller2.reset(-1,1)
# self.scroller1.set_scheme(1)
self.scroller1.enable()
self.scroller2.enable()
self.scroller3.enable()
# def __do_browse(self,subject,*args):
# print '__do_browse',args
# #self[8].model.changeTargetArgs(args)
def __size_changed(self,d):
print '__size_changed'
if not d.is_string():
return False
l = logic.parse_clause(d.as_string())
self.__x=l[0]
self.__y=l[1]
self.__setup_size()
if self.__rootFrame:
self.__doSetSize()
def __setup_size(self):
print '__setup_size',self.__x,self.__y
self.__size.set_data(piw.makestring(logic.render_term((self.__x,self.__y)),0))
def __create_lang(self):
return language.LanguageModel()
def reset1(self,h=None,v=None):
#print 'reset1: v=',v
self.scroller1.reset(h,v*-1)
def reset2(self,h=None,v=None):
self.scroller2.reset(h,v*-1)
def reset3(self,h=None,v=None):
self.scroller3.reset(h,v*-1)
def getTitlePanel(self):
if self.__rootFrame:
return self.__rootFrame.getTitlePanel()
def updateStatus(self,text):
print 'commander_cli:updateStatus',text
self.__rootFrame.updateStatus(text)
def doSize(self):
print 'doSize'
if self.__rootFrame:
size=self.__rootFrame.GetSize()
screenSize=wx.DisplaySize()
self.__x=int(100*(float(size[0])/float(screenSize[0])))
self.__y=int(100*(float(size[1])/float(screenSize[1])))
print 'doSize',size,screenSize,self.__x,self.__y
self.__setup_size()
def __set_fontsize(self, value):
print '__set_fontsize',value
self.font=wx.Font(value,wx.FONTFAMILY_SWISS,wx.FONTSTYLE_NORMAL,weight=wx.FONTWEIGHT_NORMAL)
return True
def __scroll1(self,h,v):
print '__scroll1',h,v
picross.display_active()
#self.__scroll_dict(h,v*-1)
self.__scroll_dict(h*-1,v)
def __scroll3(self,h,v):
picross.display_active()
#self.__scroll_info(h,v*-1)
def __scroll2(self,h,v):
picross.display_active()
self.__scroll_history(h,v*-1)
def __tap1(self):
print 'tap 1'
def __tap2(self):
print 'tap 2'
def __tap3(self):
print 'tap 3'
def onTap3(self):
pass
def __getDictionaryPanel(self):
if self.__rootFrame:
return self.__rootFrame.getDictionaryPanel()
def __getInfoPanel(self):
if self.__rootFrame:
return self.__rootFrame.getInfoPanel()
def __getHistoryPanel(self):
if self.__rootFrame:
return self.__rootFrame.getHistoryPanel()
def __minimise(self,*args):
self.__x=self.__minX
self.__y=self.__minY
self.__setup_size()
self.__doSetSize()
def __doSetSize(self):
size=wx.DisplaySize()
size=( (0.01*self.__x*size[0])-10 , (0.01*self.__y*size[1])-50 )
self.__rootFrame.SetSize(size)
def __maximise(self,*args):
self.__x=self.__maxX
self.__y=self.__maxY
self.__setup_size()
self.__doSetSize()
def __scroll_dict(self,h,v):
vp=self.__getDictionaryPanel()
if vp:
#print '__scroll_list: v=',v
vp.test_scroll_both(h,v)
def __scroll_history(self,h,v):
vp=self.__getHistoryPanel()
if vp:
vp.scroll_both(h,v)
def __scroll_info(self,h,v):
vp=self.__getInfoPanel()
if vp:
vp.scroll_both(h,v)
def __createRootFrame(self):
size=wx.DisplaySize()
size=( (size[0]-10),(size[1]-50) )
self.__rootFrame=mainframe.MainFrame('',self,size)
self.__rootFrame.Show()
self.updateStatus('Connecting...')
self.__doSetSize()
# def doHints(self,hints):
# pass
# view,target=logic.parse_clause(hints[0])
# print 'Browser:doHints',view,target
# if view=='browseview':
# self[8].model.change_target(target)
# else:
# print 'No hint'
def getMainFrame(self):
return self.__rootFrame
class commanderApp(gui.App):
def __init__(self,name,logfunc):
gui.App.__init__(self,logfunc=logfunc,name=name)
# gui.App.__init__(self,name=name)
print 'commander starting'
imageName=resource.find_release_resource('app_commander','commander_splash.png')
if imageName:
image=wx.Image(imageName,wx.BITMAP_TYPE_PNG)
bmp=image.ConvertToBitmap()
wx.SplashScreen(bmp,wx.SPLASH_CENTRE_ON_SCREEN|wx.SPLASH_TIMEOUT,2000,None,-1)
wx.Yield()
print 'commander starting 2'
self.agent = ViewManager(name)
print 'commander starting 3'
piw.tsd_server('<%s>' % name, self.agent)
print 'commander starting 4'
self.agent.advertise('<main>')
print 'commander starting 5'
def cli():
parser = optparse.OptionParser()
parser.add_option('--stdout',action='store_true',dest='stdout',default=False,help='log to stdout')
x = [ a for a in sys.argv if not a.startswith('-psn') ]
(opts,args) = parser.parse_args(x)
name = 'eigencommander1'
lock = resource.LockFile(name)
if not lock.lock():
print 'cannot get lock: aborting'
sys.exit(-1)
if opts.stdout:
logfile=sys.__stdout__
else:
logfile=resource.open_logfile(name)
def logger(msg):
if logfile:
print >>logfile,name,msg
logfile.flush()
app=commanderApp(name,logfunc=logger)
app.MainLoop()
picross.exit(0)
|
Eigenlabs/EigenD
|
app_commander/commander_cli.py
|
Python
|
gpl-3.0
| 11,251
|
import random
import sys
import datetime
time = datetime.datetime.now() #Set up the seed based on the clock.
table = [] #The table of words in the input.
output = [] #The table of words in the output.
final = "\n " #The final output.
line = 0
random.seed = time
print( "Enter Text. \n" )
for inp in sys.stdin:
for word in inp.split():
table.append( word )
for i in range(len(table)):
j = random.randint(0,len(table))
for k in range(random.randint(0,5)):
n = j - k - 1
output.append( table[n] )
for m in range(len(output)):
final = final + (output[m] + " ")
line = line + 1
if line == 10: # Line length.
final = (final + "\n \n")
line = 0
print( "Output: \n")
print( final )
|
Wolfarc49/MassOfTextGens
|
Textgens/Backrunner.py
|
Python
|
gpl-3.0
| 750
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# jsonify.py
# TODO: extend to `macro` objects
# TODO: resolve schema issues with `vphi` and other external consumers
# TODO: somehow check schema instead of version?
"""
PyPhi- and NumPy-aware JSON serialization.
To be properly serialized and deserialized, PyPhi models must implement a
``to_json`` method which returns a dictionary of attribute names and attribute
values. These attributes should be the names of arguments passed to the model
constructor. If the constructor takes additional, fewer, or differnt arguments,
the model needs to implement a custom ``from_json`` classmethod which takes a
Python dictionary as an argument and returns a PyPhi object. For example::
class Phi:
def __init__(self, phi):
self.phi = phi
def to_json(self):
return {'phi': self.phi, 'twice_phi': 2 * self.phi}
@classmethod
def from_json(cls, json):
return Phi(json['phi'])
The model must also be added to ``jsonify._loadable_models``.
The JSON encoder adds the name of the model and the current PyPhi version to
the JSON stream. The JSON decoder uses this metadata to recursively deserialize
the stream to a nested PyPhi model structure. The decoder will raise an
exception if the version of the JSON does not match the current version of
PyPhi.
"""
import json
import numpy as np
import pyphi
from pyphi import cache
CLASS_KEY = '__class__'
VERSION_KEY = '__version__'
ID_KEY = '__id__'
def _loadable_models():
"""A dictionary of loadable PyPhi models.
These are stored in this function (instead of module scope) to resolve
circular import issues.
"""
classes = [
pyphi.Network,
pyphi.Subsystem,
pyphi.models.Cut,
pyphi.models.Part,
pyphi.models.Bipartition,
pyphi.models.Mip,
pyphi.models.Mice,
pyphi.models.Concept,
pyphi.models.Constellation,
pyphi.models.BigMip,
]
return {cls.__name__: cls for cls in classes}
def _jsonify_dict(dct):
return {key: jsonify(value) for key, value in dct.items()}
def _push_metadata(dct, obj):
dct.update({
CLASS_KEY: obj.__class__.__name__,
VERSION_KEY: pyphi.__version__,
ID_KEY: id(obj)
})
return dct
def _pop_metadata(dct):
return dct.pop(CLASS_KEY), dct.pop(VERSION_KEY), dct.pop(ID_KEY)
def jsonify(obj):
"""Return a JSON-encodable representation of an object, recursively using
any available ``to_json`` methods, converting NumPy arrays and datatypes to
native lists and types along the way."""
# Call the `to_json` method if available and add metadata.
if hasattr(obj, 'to_json'):
d = obj.to_json()
_push_metadata(d, obj)
return jsonify(d)
# If we have a numpy array, convert it to a list.
if isinstance(obj, np.ndarray):
return obj.tolist()
# If we have NumPy datatypes, convert them to native types.
if isinstance(obj, (np.int32, np.int64)):
return int(obj)
if isinstance(obj, np.float64):
return float(obj)
# Recurse over dictionaries.
if isinstance(obj, dict):
return _jsonify_dict(obj)
# Recurse over object dictionaries.
if hasattr(obj, '__dict__'):
return _jsonify_dict(obj.__dict__)
# Recurse over lists and tuples.
if isinstance(obj, (list, tuple)):
return [jsonify(item) for item in obj]
# Otherwise, give up and hope it's serializable.
return obj
class PyPhiJSONEncoder(json.JSONEncoder):
"""Extension of the default JSONEncoder that allows for serializing PyPhi
objects with ``jsonify``."""
def encode(self, obj):
"""Encode the output of ``jsonify`` with the default encoder."""
return super().encode(jsonify(obj))
def iterencode(self, obj, **kwargs):
"""Analog to `encode` used by json.dump."""
return super().iterencode(jsonify(obj), **kwargs)
def _encoder_kwargs(user_kwargs):
"""Update kwargs for `dump` and `dumps` to use the PyPhi encoder."""
kwargs = {'separators': (',', ':'), 'cls': PyPhiJSONEncoder}
kwargs.update(user_kwargs)
return kwargs
def dumps(obj, **user_kwargs):
"""Serialize ``obj`` as JSON-formatted stream."""
return json.dumps(obj, **_encoder_kwargs(user_kwargs))
def dump(obj, fp, **user_kwargs):
"""Serialize ``obj`` as a JSON-formatted stream and write to ``fp`` (a
``.write()``-supporting file-like object."""
return json.dump(obj, fp, **_encoder_kwargs(user_kwargs))
def _check_version(version):
"""Check whether the JSON version matches the PyPhi version."""
if version != pyphi.__version__:
raise pyphi.exceptions.JSONVersionError(
'Cannot load JSON from a different version of PyPhi. '
'JSON version = {0}, current version = {1}.'.format(
version, pyphi.__version__))
def _is_model(dct):
"""Check if `dct` is a PyPhi model serialization."""
return CLASS_KEY in dct
class _ObjectCache(cache.DictCache):
"""Cache mapping ids to loaded objects, keyed by the id of the object."""
def key(self, dct, **kwargs):
return dct[ID_KEY]
class PyPhiJSONDecoder(json.JSONDecoder):
"""Extension of the default encoder which automatically deserializes
PyPhi JSON to the appropriate model classes.
"""
def __init__(self, *args, **kwargs):
kwargs['object_hook'] = self._load_object
super().__init__(*args, **kwargs)
# Memoize available models
self._models = _loadable_models()
# Cache for loaded objects
self._object_cache = _ObjectCache()
def _load_object(self, obj):
"""Recursively load a PyPhi object.
PyPhi models are recursively loaded, using the model metadata to
recreate the original object relations. Lists are cast to tuples
because most objects in PyPhi which are serialized to lists (eg.
mechanisms and purviews) are ultimately tuples. Other lists (tpms,
repertoires) should be cast to the correct type in init methods.
"""
if isinstance(obj, dict):
obj = {k: self._load_object(v) for k, v in obj.items()}
# Load a serialized PyPhi model
if _is_model(obj):
return self._load_model(obj)
elif isinstance(obj, list):
return tuple(self._load_object(item) for item in obj)
return obj
@cache.method('_object_cache')
def _load_model(self, dct):
"""Load a serialized PyPhi model.
The object is memoized for reuse elsewhere in the object graph.
"""
classname, version, id_ = _pop_metadata(dct)
_check_version(version)
cls = self._models[classname]
# Use `from_json` if available
if hasattr(cls, 'from_json'):
return cls.from_json(dct)
# Default to object constructor
return cls(**dct)
def loads(string):
"""Deserialize a JSON string to a Python object."""
return json.loads(string, cls=PyPhiJSONDecoder)
def load(fp):
"""Deserialize a JSON stream to a Python object."""
return json.load(fp, cls=PyPhiJSONDecoder)
|
rlmv/pyphi
|
pyphi/jsonify.py
|
Python
|
gpl-3.0
| 7,250
|
"""
Job Base Class
This class provides generic job definition functionality suitable for any VO.
Helper functions are documented with example usage for the DIRAC API. An example
script (for a simple executable) would be::
from DIRAC.Interfaces.API.Dirac import Dirac
from DIRAC.Interfaces.API.Job import Job
j = Job()
j.setCPUTime(500)
j.setExecutable('/bin/echo hello')
j.setExecutable('yourPythonScript.py')
j.setExecutable('/bin/echo hello again')
j.setName('MyJobName')
dirac = Dirac()
jobID = dirac.submit(j)
print 'Submission Result: ',jobID
Note that several executables can be provided and wil be executed sequentially.
"""
__RCSID__ = "$Id$"
import re, os, types, urllib
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.Core.Workflow.Parameter import Parameter
from DIRAC.Core.Workflow.Workflow import Workflow
from DIRAC.Core.Base.API import API
from DIRAC.Core.Utilities.ClassAd.ClassAdLight import ClassAd
from DIRAC.ConfigurationSystem.Client.Config import gConfig
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getVOForGroup
from DIRAC.Core.Utilities.Subprocess import shellCall
from DIRAC.Core.Utilities.List import uniqueElements
from DIRAC.Core.Utilities.SiteCEMapping import getSiteForCE, getSiteCEMapping
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.ConfigurationSystem.Client.Helpers import Resources
from DIRAC.Interfaces.API.Dirac import Dirac
from DIRAC.Workflow.Utilities.Utils import getStepDefinition, addStepToWorkflow
COMPONENT_NAME = '/Interfaces/API/Job'
class Job( API ):
""" DIRAC jobs
"""
#############################################################################
def __init__( self, script = None, stdout = 'std.out', stderr = 'std.err' ):
"""Instantiates the Workflow object and some default parameters.
"""
super( Job, self ).__init__()
self.dbg = False
if gConfig.getValue( self.section + '/LogLevel', 'DEBUG' ) == 'DEBUG':
self.dbg = True
#gConfig.getValue('Tier0SE-tape','SEName')
self.stepCount = 0
self.owner = 'NotSpecified'
self.name = 'Name'
self.type = 'User'
self.priority = 1
vo = ''
ret = getProxyInfo( disableVOMS = True )
if ret['OK'] and 'group' in ret['Value']:
vo = getVOForGroup( ret['Value']['group'] )
self.group = vo
self.site = 'ANY' #ANY
#self.setup = 'Development'
self.origin = 'DIRAC'
self.stdout = stdout
self.stderr = stderr
self.logLevel = 'info'
self.executable = '$DIRACROOT/scripts/dirac-jobexec' # to be clarified
self.addToInputSandbox = []
self.addToOutputSandbox = []
self.addToInputData = []
##Add member to handle Parametric jobs
self.parametric = {}
self.script = script
if not script:
self.workflow = Workflow()
self.__setJobDefaults()
else:
self.workflow = Workflow( script )
#############################################################################
def setExecutable( self, executable, arguments = '', logFile = '',
modulesList = ['Script'],
parameters = [( 'executable', 'string', '', "Executable Script" ),
( 'arguments', 'string', '', 'Arguments for executable Script' ),
( 'applicationLog', 'string', '', "Log file name" )],
paramValues = [] ):
"""Helper function.
Specify executable script to run with optional arguments and log file
for standard output.
These can be either:
- Submission of a python or shell script to DIRAC
- Can be inline scripts e.g. C{'/bin/ls'}
- Scripts as executables e.g. python or shell script file
Example usage:
>>> job = Job()
>>> job.setExecutable('myScript.py')
@param executable: Executable
@type executable: string
@param arguments: Optional arguments to executable
@type arguments: string
@param logFile: Optional log file name
@type logFile: string
@param modulesList: Optional list of modules (to be used mostly when extending this method)
@type modulesList: list
@param parameters: Optional list of parameters (to be used mostly when extending this method)
@type parameters: list of tuples
@param paramValues: Optional list of parameters values (to be used mostly when extending this method)
@type parameters: list of tuples
"""
kwargs = {'executable':executable, 'arguments':arguments, 'logFile':logFile}
if not type( executable ) == type( ' ' ) or not type( arguments ) == type( ' ' ) or \
not type( logFile ) == type( ' ' ):
return self._reportError( 'Expected strings for executable and arguments', **kwargs )
if os.path.exists( executable ):
self.log.verbose( 'Found script executable file %s' % ( executable ) )
self.addToInputSandbox.append( executable )
logName = '%s.log' % ( os.path.basename( executable ) )
else:
self.log.warn( 'The executable code could not be found locally' )
logName = 'CodeOutput.log'
self.stepCount += 1
stepName = 'RunScriptStep%s' % ( self.stepCount )
if logFile:
if type( logFile ) == type( ' ' ):
logName = str(logFile)
else:
logName = "Script%s_%s" %( self.stepCount, logName )
step = getStepDefinition( 'ScriptStep%s' % ( self.stepCount ), modulesList, parametersList = parameters )
self.addToOutputSandbox.append( logName )
stepInstance = addStepToWorkflow( self.workflow, step, stepName )
stepInstance.setValue( 'applicationLog', logName )
stepInstance.setValue( 'executable', executable )
if arguments:
stepInstance.setValue( 'arguments', arguments )
if paramValues:
for param, value in paramValues:
stepInstance.setValue( param, value )
return S_OK( stepInstance )
#############################################################################
def setName( self, jobName ):
"""Helper function.
A name for the job can be specified if desired. This will appear
in the JobName field of the monitoring webpage. If nothing is
specified a default value will appear.
Example usage:
>>> job=Job()
>>> job.setName("myJobName")
:param jobName: Name of job
:type jobName: string
"""
kwargs = {'jobname':jobName}
if not type( jobName ) == type( ' ' ):
return self._reportError( 'Expected strings for job name', **kwargs )
else:
self.workflow.setName( jobName )
self._addParameter( self.workflow, 'JobName', 'JDL', jobName, 'User specified name' )
return S_OK()
#############################################################################
def setInputSandbox( self, files ):
"""Helper function.
Specify input sandbox files less than 10MB in size. If over 10MB, files
or a directory may be uploaded to Grid storage, see C{dirac.uploadSandbox()}.
Paths to the options file and (if required) 'lib/' directory of the DLLs
are specified here. Default is local directory. CMT requirements files or
executables may be placed in the lib/ directory if desired. The lib/ directory
is transferred to the Grid Worker Node before the job executes.
Files / directories can be specified using the `*` character e.g. `*.txt` these
are resolved correctly before job execution on the WN.
Example usage:
>>> job = Job()
>>> job.setInputSandbox(['DaVinci.opts'])
:param files: Input sandbox files, can specify full path
:type files: Single string or list of strings ['','']
"""
if type( files ) == list and len( files ):
resolvedFiles = self._resolveInputSandbox( files )
fileList = ';'.join( resolvedFiles )
description = 'Input sandbox file list'
self._addParameter( self.workflow, 'InputSandbox', 'JDL', fileList, description )
#self.sandboxFiles=resolvedFiles
elif type( files ) == type( " " ):
resolvedFiles = self._resolveInputSandbox( [files] )
fileList = ';'.join( resolvedFiles )
description = 'Input sandbox file'
#self.sandboxFiles = [files]
self._addParameter( self.workflow, 'InputSandbox', 'JDL', fileList, description )
else:
kwargs = {'files':files}
return self._reportError( 'Expected file string or list of files for input sandbox contents', **kwargs )
return S_OK()
#############################################################################
def setParametricInputSandbox( self, files ):
"""Helper function.
Specify input sandbox files to used as parameters in the Parametric jobs. The possibilities are identical to the setInputSandbox.
Example usage:
>>> job = Job()
>>> job.setParametricInputSandbox(['LFN:/Some_file','LFN:/Some_other_file'])
:param files: Logical File Names
:type files: Single LFN string or list of LFNs
"""
kwargs = {'files':files}
if type( files ) == list and len( files ):
for fileName in files:
if not fileName.lower().count( "lfn:" ):
return self._reportError( 'All files should be LFNs', **kwargs )
resolvedFiles = self._resolveInputSandbox( files )
self.parametric['InputSandbox'] = resolvedFiles
#self.sandboxFiles=resolvedFiles
elif type( files ) == type( " " ):
if not files.lower().count( "lfn:" ):
return self._reportError( 'All files should be LFNs', **kwargs )
resolvedFiles = self._resolveInputSandbox( [files] )
self.parametric['InputSandbox'] = resolvedFiles
#self.sandboxFiles = [files]
else:
return self._reportError( 'Expected file string or list of files for input sandbox contents', **kwargs )
return S_OK()
#############################################################################
def setOutputSandbox( self, files ):
"""Helper function.
Specify output sandbox files. If specified files are over 10MB, these
may be uploaded to Grid storage with a notification returned in the
output sandbox.
Example usage:
>>> job = Job()
>>> job.setOutputSandbox(['DaVinci_v19r12.log','DVNTuples.root'])
:param files: Output sandbox files
:type files: Single string or list of strings ['','']
"""
if type( files ) == list and len( files ):
fileList = ';'.join( files )
description = 'Output sandbox file list'
self._addParameter( self.workflow, 'OutputSandbox', 'JDL', fileList, description )
elif type( files ) == type( " " ):
description = 'Output sandbox file'
self._addParameter( self.workflow, 'OutputSandbox', 'JDL', files, description )
else:
kwargs = {'files':files}
return self._reportError( 'Expected file string or list of files for output sandbox contents', **kwargs )
return S_OK()
#############################################################################
def setInputData( self, lfns ):
"""Helper function.
Specify input data by Logical File Name (LFN).
Example usage:
>>> job = Job()
>>> job.setInputData(['/lhcb/production/DC04/v2/DST/00000742_00003493_10.dst'])
:param lfns: Logical File Names
:type lfns: Single LFN string or list of LFNs
"""
if type( lfns ) == list and len( lfns ):
for i in xrange( len( lfns ) ):
lfns[i] = lfns[i].replace( 'LFN:', '' )
inputData = ['LFN:' + x for x in lfns ]
inputDataStr = ';'.join( inputData )
description = 'List of input data specified by LFNs'
self._addParameter( self.workflow, 'InputData', 'JDL', inputDataStr, description )
elif type( lfns ) == type( ' ' ): #single LFN
description = 'Input data specified by LFN'
self._addParameter( self.workflow, 'InputData', 'JDL', lfns, description )
else:
kwargs = {'lfns':lfns}
return self._reportError( 'Expected lfn string or list of lfns for input data', **kwargs )
return S_OK()
#############################################################################
def setParametricInputData( self, lfns ):
"""Helper function.
Specify input data by Logical File Name (LFN) to be used as a parameter in a parametric job
Example usage:
>>> job = Job()
>>> job.setParametricInputData(['/lhcb/production/DC04/v2/DST/00000742_00003493_10.dst'])
:param lfns: Logical File Names
:type lfns: Single LFN string or list of LFNs
"""
if type( lfns ) == list and len( lfns ):
for i in xrange( len( lfns ) ):
if type( lfns[i] ) == list and len( lfns[i] ):
for k in xrange( len( lfns[i] ) ):
lfns[i][k] = 'LFN:' + lfns[i][k].replace( 'LFN:', '' )
else:
lfns[i] = 'LFN:' + lfns[i].replace( 'LFN:', '' )
self.parametric['InputData'] = lfns
elif type( lfns ) == type( ' ' ): #single LFN
self.parametric['InputData'] = lfns
else:
kwargs = {'lfns':lfns}
return self._reportError( 'Expected lfn string or list of lfns for parametric input data', **kwargs )
return S_OK()
#############################################################################
def setGenericParametricInput( self, inputlist ):
""" Helper function
Define a generic parametric job with this function. Should not be used when
the ParametricInputData of ParametricInputSandbox are used.
:param inputlist: Input list of parameters to build the parametric job
:type inputlist: list
"""
kwargs = {'inputlist':inputlist}
if not type( inputlist ) == type( [] ):
return self._reportError( 'Expected list for parameters', **kwargs )
self.parametric['GenericParameters'] = inputlist
return S_OK()
#############################################################################
def setInputDataPolicy( self, policy, dataScheduling = True ):
"""Helper function.
Specify a job input data policy, this takes precedence over any site specific or
global settings.
Possible values for policy are 'Download' or 'Protocol' (case-insensitive). This
requires that the module locations are defined for the VO in the CS.
Example usage:
>>> job = Job()
>>> job.setInputDataPolicy('download')
"""
kwargs = {'policy':policy, 'dataScheduling':dataScheduling}
csSection = 'InputDataPolicy'
possible = ['Download', 'Protocol']
finalPolicy = ''
for value in possible:
if policy.lower() == value.lower():
finalPolicy = value
if not finalPolicy:
return self._reportError( 'Expected one of %s for input data policy' % ( ', '.join( possible ) ),
__name__, **kwargs )
jobPolicy = Operations().getValue( '%s/%s' % ( csSection, finalPolicy ), '' )
if not jobPolicy:
return self._reportError( 'Could not get value for Operations option %s/%s' % ( csSection, finalPolicy ),
__name__, **kwargs )
description = 'User specified input data policy'
self._addParameter( self.workflow, 'InputDataPolicy', 'JDL', jobPolicy, description )
if not dataScheduling and policy.lower() == 'download':
self.log.verbose( 'Scheduling by input data is disabled, jobs will run anywhere and download input data' )
self._addParameter( self.workflow, 'DisableDataScheduling', 'JDL', 'True', 'Disable scheduling by input data' )
if not dataScheduling and policy.lower() != 'download':
self.log.error( 'Expected policy to be "download" for bypassing data scheduling' )
return self._reportError( 'Expected policy to be "download" for bypassing data scheduling',
__name__, **kwargs )
return S_OK()
#############################################################################
def setOutputData( self, lfns, outputSE = None, outputPath = '' ):
"""Helper function.
For specifying output data to be registered in Grid storage. If a list
of OutputSEs are specified the job wrapper will try each in turn until
successful. If the OutputPath is specified this will appear only after
/ <VO> / user / <initial> / <username>
directory.
Example usage:
>>> job = Job()
>>> job.setOutputData(['DVNtuple.root'])
:param lfns: Output data file or files
:type lfns: Single string or list of strings ['','']
:param outputSE: Optional parameter to specify the Storage Element
:param outputPath: Optional parameter to specify part of the path in the storage (see above)
Element to store data or files, e.g. CERN-tape
:type outputSE: string or list
:type outputPath: string
"""
if outputSE == None:
outputSE = []
kwargs = {'lfns':lfns, 'OutputSE':outputSE, 'OutputPath':outputPath}
if type( lfns ) == list and len( lfns ):
outputDataStr = ';'.join( lfns )
description = 'List of output data files'
self._addParameter( self.workflow, 'OutputData', 'JDL', outputDataStr, description )
elif type( lfns ) == type( " " ):
description = 'Output data file'
self._addParameter( self.workflow, 'OutputData', 'JDL', lfns, description )
else:
return self._reportError( 'Expected file name string or list of file names for output data', **kwargs )
if outputSE:
description = 'User specified Output SE'
if type( outputSE ) in types.StringTypes:
outputSE = [outputSE]
elif type( outputSE ) != types.ListType:
return self._reportError( 'Expected string or list for OutputSE', **kwargs )
outputSE = ';'.join( outputSE )
self._addParameter( self.workflow, 'OutputSE', 'JDL', outputSE, description )
if outputPath:
description = 'User specified Output Path'
if not type( outputPath ) in types.StringTypes:
return self._reportError( 'Expected string for OutputPath', **kwargs )
# Remove leading "/" that might cause problems with os.path.join
# FIXME: this will prevent to set OutputPath outside the Home of the User
while outputPath[0] == '/':
outputPath = outputPath[1:]
self._addParameter( self.workflow, 'OutputPath', 'JDL', outputPath, description )
return S_OK()
#############################################################################
def setPlatform( self, platform ):
"""Developer function: sets the target platform, e.g. Linux_x86_64_glibc-2.5.
This platform is in the form of what it is returned by the dirac-platform script
(or dirac-architecture if your extension provides it)
"""
kwargs = {'platform':platform}
if not type( platform ) == type( " " ):
return self._reportError( "Expected string for platform", **kwargs )
if not platform.lower() == 'any':
availablePlatforms = Resources.getDIRACPlatforms()
if not availablePlatforms['OK']:
return self._reportError( "Can't check for platform", **kwargs )
if platform in availablePlatforms['Value']:
self._addParameter( self.workflow, 'Platform', 'JDL', platform, 'Platform ( Operating System )' )
else:
return self._reportError( "Invalid platform", **kwargs )
return S_OK()
#############################################################################
def setSubmitPool( self, backend ):
"""Developer function.
Choose submission pool on which job is executed.
Default in place for users.
"""
#should add protection here for list of supported platforms
kwargs = {'backend':backend}
if not type( backend ) in types.StringTypes:
return self._reportError( 'Expected string for SubmitPool', **kwargs )
if not backend.lower() == 'any':
self._addParameter( self.workflow, 'SubmitPools', 'JDL', backend, 'Submit Pool' )
return S_OK()
#############################################################################
def setCPUTime( self, timeInSecs ):
"""Helper function.
Under Development. Specify CPU time requirement in DIRAC units.
Example usage:
>>> job = Job()
>>> job.setCPUTime(5000)
:param timeInSecs: CPU time
:type timeInSecs: Int
"""
kwargs = {'timeInSecs':timeInSecs}
if not type( timeInSecs ) == int:
try:
timeInSecs = int( timeInSecs )
except Exception:
if not re.search( '{{', timeInSecs ):
return self._reportError( 'Expected numerical string or int for CPU time in seconds', **kwargs )
description = 'CPU time in secs'
self._addParameter( self.workflow, 'CPUTime', 'JDL', timeInSecs, description )
return S_OK()
#############################################################################
def setDestination( self, destination ):
"""Helper function.
Can specify a desired destination site or sites for job. This can be useful
for debugging purposes but often limits the possible candidate sites
and overall system response time.
Example usage:
>>> job = Job()
>>> job.setDestination('LCG.CERN.ch')
:param destination: site string
:type destination: string or list
"""
kwargs = {'destination':destination}
if type( destination ) == type( " " ):
if not re.search( '^DIRAC.', destination ) and not destination.lower() == 'any':
result = self.__checkSiteIsValid( destination )
if not result['OK']:
return self._reportError( '%s is not a valid destination site' % ( destination ), **kwargs )
description = 'User specified destination site'
self._addParameter( self.workflow, 'Site', 'JDL', destination, description )
elif type( destination ) == list:
for site in destination:
if not re.search( '^DIRAC.', site ) and not site.lower() == 'any':
result = self.__checkSiteIsValid( site )
if not result['OK']:
return self._reportError( '%s is not a valid destination site' % ( destination ), **kwargs )
destSites = ';'.join( destination )
description = 'List of sites selected by user'
self._addParameter( self.workflow, 'Site', 'JDL', destSites, description )
else:
return self._reportError( '%s is not a valid destination site, expected string' % ( destination ), **kwargs )
return S_OK()
#############################################################################
def __checkSiteIsValid( self, site ):
"""Internal function to check that a site name is valid.
"""
sites = getSiteCEMapping()
if not sites['OK']:
return S_ERROR( 'Could not get site CE mapping' )
siteList = sites['Value'].keys()
if not site in siteList:
return S_ERROR( 'Specified site %s is not in list of defined sites' % site )
return S_OK( '%s is valid' % site )
#############################################################################
def setDestinationCE( self, ceName, diracSite = '' ):
""" Developer function.
Allows to direct a job to a particular Grid CE.
"""
kwargs = {'ceName':ceName}
if not diracSite:
diracSite = getSiteForCE( ceName )
if not diracSite['OK']:
return self._reportError( diracSite['Message'], **kwargs )
if not diracSite['Value']:
return self._reportError( 'No DIRAC site name found for CE %s' % ( ceName ), **kwargs )
diracSite = diracSite['Value']
self.setDestination( diracSite )
# Keep GridRequiredCEs for backward compatibility
self._addJDLParameter( 'GridRequiredCEs', ceName )
self._addJDLParameter( 'GridCE', ceName )
return S_OK()
#############################################################################
def setBannedSites( self, sites ):
"""Helper function.
Can specify a desired destination site for job. This can be useful
for debugging purposes but often limits the possible candidate sites
and overall system response time.
Example usage:
>>> job = Job()
>>> job.setBannedSites(['LCG.GRIDKA.de','LCG.CNAF.it'])
:param sites: single site string or list
:type sites: string or list
"""
if type( sites ) == list and len( sites ):
bannedSites = ';'.join( sites )
description = 'List of sites excluded by user'
self._addParameter( self.workflow, 'BannedSites', 'JDL', bannedSites, description )
elif type( sites ) == type( " " ):
description = 'Site excluded by user'
self._addParameter( self.workflow, 'BannedSites', 'JDL', sites, description )
else:
kwargs = {'sites':sites}
return self._reportError( 'Expected site string or list of sites', **kwargs )
return S_OK()
#############################################################################
def setOwner( self, ownerProvided ):
"""Developer function.
Normally users should always specify their immutable DIRAC nickname.
"""
if not type( ownerProvided ) == type( " " ):
return self._reportError( 'Expected string for owner', **{'ownerProvided':ownerProvided} )
self._addParameter( self.workflow, 'Owner', 'JDL', ownerProvided, 'User specified ID' )
return S_OK()
#############################################################################
def setOwnerGroup( self, ownerGroup ):
"""Developer function.
Allows to force expected owner group of proxy.
"""
if not type( ownerGroup ) == type( " " ):
return self._reportError( 'Expected string for job owner group', **{'ownerGroup':ownerGroup} )
self._addParameter( self.workflow, 'OwnerGroup', 'JDL', ownerGroup, 'User specified owner group.' )
return S_OK()
#############################################################################
def setOwnerDN( self, ownerDN ):
"""Developer function.
Allows to force expected owner DN of proxy.
"""
if not type( ownerDN ) == type( " " ):
return self._reportError( 'Expected string for job owner DN', **{'ownerGroup':ownerDN} )
self._addParameter( self.workflow, 'OwnerDN', 'JDL', ownerDN, 'User specified owner DN.' )
return S_OK()
#############################################################################
def setType( self, jobType ):
"""Developer function.
Specify job type for testing purposes.
"""
if not type( jobType ) == type( " " ):
return self._reportError( 'Expected string for job type', **{'jobType':jobType} )
self._addParameter( self.workflow, 'JobType', 'JDL', jobType, 'User specified type' )
self.type = jobType
return S_OK()
#############################################################################
def setTag( self, tags ):
""" Set the Tags job requirements
Example usage:
>>> job = Job()
>>> job.setTag( ['WholeNode','8GBMemory'] )
:param tags: single tag string or a list of tags
:type tags: string or list
"""
if type( tags ) in types.StringTypes:
tagValue = tags
elif type( tags ) == types.ListType:
tagValue = ";".join( tags )
else:
return self._reportError( 'Expected string or list for job tags', tags = tags )
self._addParameter( self.workflow, 'Tags', 'JDL', tagValue, 'User specified job tags' )
self.tags = tags
return S_OK()
#############################################################################
def _setSoftwareTags( self, tags ):
"""Developer function.
Choose any software tags if desired. These are not compulsory but will ensure jobs only
arrive at an LCG site where the software is preinstalled. Without the tags, missing software is
installed automatically by the Job Agent.
Example usage:
>>> job=Job()
>>> job.setSoftwareTags(['VO-lhcb-Brunel-v30r17','VO-lhcb-Boole-v12r10','VO-lhcb-Gauss-v25r12'])
:param tags: software tags
:type tags: string or list
"""
if type( tags ) == type( " " ):
self._addParameter( self.workflow, 'SoftwareTag', 'JDL', tags, 'VO software tag' )
elif type( tags ) == list:
swTags = ';'.join( tags )
self._addParameter( self.workflow, 'SoftwareTag', 'JDL', swTags, 'List of VO software tags' )
else:
kwargs = {'tags':tags}
return self._reportError( 'Expected String or List of software tags', **kwargs )
return S_OK()
#############################################################################
def setJobGroup( self, jobGroup ):
"""Helper function.
Allows to group certain jobs according to an ID.
Example usage:
>>> job = Job()
>>> job.setJobGroup('Bs2JPsiPhi')
:param jobGroup: JobGroup name
:type jobGroup: string
"""
if not type( jobGroup ) == type( " " ):
return self._reportError( 'Expected string for job group name', **{'jobGroup':jobGroup} )
description = 'User specified job group'
self._addParameter( self.workflow, 'JobGroup', 'JDL', jobGroup, description )
return S_OK()
#############################################################################
def setLogLevel( self, logLevel ):
"""Helper function.
Optionally specify a DIRAC logging level for the job, e.g.
ALWAYS, INFO, VERBOSE, WARN, DEBUG
by default this is set to the info level.
Example usage:
>>> job = Job()
>>> job.setLogLevel('debug')
:param logLevel: Logging level
:type logLevel: string
"""
kwargs = {'logLevel':logLevel}
if type( logLevel ) in types.StringTypes:
if logLevel.upper() in gLogger._logLevels.getLevels():
description = 'User specified logging level'
self.logLevel = logLevel
self._addParameter( self.workflow, 'LogLevel', 'JDL', logLevel, description )
else:
return self._reportError( 'Error Level "%s" not valid' % logLevel, **kwargs )
else:
return self._reportError( 'Expected string for logging level', **kwargs )
return S_OK()
#############################################################################
def setConfigArgs( self, cfgString ):
"""Developer function. Allow to pass arbitrary settings to the payload
configuration service environment.
"""
if not type( cfgString ) == type( " " ):
return self._reportError( 'Expected string for DIRAC Job Config Args', **{'cfgString':cfgString} )
description = 'User specified cfg settings'
self._addParameter( self.workflow, 'JobConfigArgs', 'JDL', cfgString, description )
return S_OK()
#############################################################################
def setExecutionEnv( self, environmentDict ):
"""Helper function.
Optionally specify a dictionary of key, value pairs to be set before
the job executes e.g. {'MYVAR':3}
The standard application environment variables are always set so this
is intended for user variables only.
Example usage:
>>> job = Job()
>>> job.setExecutionEnv({'<MYVARIABLE>':'<VALUE>'})
:param environmentDict: Environment variables
:type environmentDict: dictionary
"""
kwargs = {'environmentDict':environmentDict}
if not type( environmentDict ) == type( {} ):
return self._reportError( 'Expected dictionary of environment variables', **kwargs )
if environmentDict:
environment = []
for var, val in environmentDict.items():
try:
environment.append( '='.join( [str( var ), urllib.quote( str( val ) )] ) )
except Exception:
return self._reportError( 'Expected string for environment variable key value pairs', **kwargs )
envStr = ';'.join( environment )
description = 'Env vars specified by user'
self._addParameter( self.workflow, 'ExecutionEnvironment', 'JDL', envStr, description )
return S_OK()
#############################################################################
def execute( self ):
"""Developer function. Executes the job locally.
"""
self.workflow.createCode()
self.workflow.execute()
#############################################################################
def _getParameters( self ):
"""Developer function.
Method to return the workflow parameters.
"""
wfParams = {}
params = self.workflow.parameters
for par in params:
wfParams[par.getName()] = par.getValue()
return wfParams
#############################################################################
def _dumpParameters( self, showType = None ):
"""Developer function.
Method to print the workflow parameters.
"""
paramsDict = {}
paramList = self.workflow.parameters
for param in paramList:
paramsDict[param.getName()] = {'type':param.getType(), 'value':param.getValue()}
self.log.info( '--------------------------------------' )
self.log.info( 'Workflow parameter summary: ' )
self.log.info( '--------------------------------------' )
#print self.workflow.parameters
#print params.getParametersNames()
for name, _props in paramsDict.items():
ptype = paramsDict[name]['type']
value = paramsDict[name]['value']
if showType:
if ptype == showType:
self.log.info( 'NAME: %s\nTYPE: %s\nVALUE: %s ' % ( name, ptype, value ) )
self.log.info( '--------------------------------------' )
else:
self.log.info( 'NAME: %s\nTYPE: %s\nVALUE: %s ' % ( name, ptype, value ) )
self.log.info( '--------------------------------------' )
#############################################################################
def __setJobDefaults( self ):
"""Set job default values. Note that the system configuration is set to "ANY".
"""
self._addParameter( self.workflow, 'JobType', 'JDL', self.type, 'Job Type' )
self._addParameter( self.workflow, 'Priority', 'JDL', self.priority, 'User Job Priority' )
self._addParameter( self.workflow, 'JobGroup', 'JDL', self.group, 'Name of the JobGroup' )
self._addParameter( self.workflow, 'JobName', 'JDL', self.name, 'Name of Job' )
#self._addParameter(self.workflow,'DIRACSetup','JDL',self.setup,'DIRAC Setup')
self._addParameter( self.workflow, 'Site', 'JDL', self.site, 'Site Requirement' )
self._addParameter( self.workflow, 'Origin', 'JDL', self.origin, 'Origin of client' )
self._addParameter( self.workflow, 'StdOutput', 'JDL', self.stdout, 'Standard output file' )
self._addParameter( self.workflow, 'StdError', 'JDL', self.stderr, 'Standard error file' )
self._addParameter( self.workflow, 'InputData', 'JDL', '', 'Default null input data value' )
self._addParameter( self.workflow, 'LogLevel', 'JDL', self.logLevel, 'Job Logging Level' )
#Those 2 below are need for on-site resolution
self._addParameter( self.workflow, 'ParametricInputData', 'string', '',
'Default null parametric input data value' )
self._addParameter( self.workflow, 'ParametricInputSandbox', 'string', '',
'Default null parametric input sandbox value' )
self._addParameter( self.workflow, 'ParametricParameters', 'string', '',
'Default null parametric input parameters value' )
#############################################################################
def _addParameter( self, wObject, name, ptype, value, description, io = 'input' ):
""" Internal Function
Adds a parameter to the object.
"""
if io == 'input':
inBool = True
outBool = False
elif io == 'output':
inBool = False
outBool = True
else:
raise TypeError( 'I/O flag is either input or output' )
par = Parameter( name, value, ptype, "", "", inBool, outBool, description )
wObject.addParameter( Parameter( parameter = par ) )
return par
############################################################################
def _resolveInputSandbox( self, inputSandbox ):
""" Internal function.
Resolves wildcards for input sandbox files. This is currently linux
specific and should be modified.
"""
resolvedIS = []
for i in inputSandbox:
if not re.search( '\*', i ):
if not os.path.isdir( i ):
resolvedIS.append( i )
for name in inputSandbox:
if re.search( '\*', name ): #escape the star character...
cmd = 'ls -d ' + name
output = shellCall( 10, cmd )
if not output['OK']:
self.log.error( 'Could not perform: ', cmd )
elif output['Value'][0]:
self.log.error(" Failed getting the files ", output['Value'][2])
else:
files = output['Value'][1].split()
for check in files:
if os.path.isfile( check ):
self.log.verbose( 'Found file ' + check + ' appending to Input Sandbox' )
resolvedIS.append( check )
if os.path.isdir( check ):
if re.search( '/$', check ): #users can specify e.g. /my/dir/lib/
check = check[:-1]
tarname = os.path.basename( check )
directory = os.path.dirname( check ) #if just the directory this is null
if directory:
cmd = 'tar cfz ' + tarname + '.tar.gz ' + ' -C ' + directory + ' ' + tarname
else:
cmd = 'tar cfz ' + tarname + '.tar.gz ' + tarname
output = shellCall( 60, cmd )
if not output['OK']:
self.log.error( 'Could not perform: %s' % ( cmd ) )
resolvedIS.append( tarname + '.tar.gz' )
self.log.verbose( 'Found directory ' + check + ', appending ' + check + '.tar.gz to Input Sandbox' )
if os.path.isdir( name ):
self.log.verbose( 'Found specified directory ' + name + ', appending ' + name + '.tar.gz to Input Sandbox' )
if re.search( '/$', name ): #users can specify e.g. /my/dir/lib/
name = name[:-1]
tarname = os.path.basename( name )
directory = os.path.dirname( name ) #if just the directory this is null
if directory:
cmd = 'tar cfz ' + tarname + '.tar.gz ' + ' -C ' + directory + ' ' + tarname
else:
cmd = 'tar cfz ' + tarname + '.tar.gz ' + tarname
output = shellCall( 60, cmd )
if not output['OK']:
self.log.error( 'Could not perform: %s' % ( cmd ) )
else:
resolvedIS.append( tarname + '.tar.gz' )
return resolvedIS
#############################################################################
def _toXML( self ):
"""Creates an XML representation of itself as a Job.
"""
return self.workflow.toXML()
#############################################################################
def _toJDL( self, xmlFile = '' ): #messy but need to account for xml file being in /tmp/guid dir
"""Creates a JDL representation of itself as a Job.
"""
#Check if we have to do old bootstrap...
classadJob = ClassAd( '[]' )
paramsDict = {}
params = self.workflow.parameters # ParameterCollection object
paramList = params
for param in paramList:
paramsDict[param.getName()] = {'type':param.getType(), 'value':param.getValue()}
scriptname = 'jobDescription.xml'
arguments = []
if self.script:
if os.path.exists( self.script ):
scriptname = os.path.abspath( self.script )
self.log.verbose( 'Found script name %s' % scriptname )
else:
if xmlFile:
self.log.verbose( 'Found XML File %s' % xmlFile )
scriptname = xmlFile
arguments.append( os.path.basename( scriptname ) )
self.addToInputSandbox.append( scriptname )
if paramsDict.has_key( 'LogLevel' ):
if paramsDict['LogLevel']['value']:
arguments.append( '-o LogLevel=%s' % ( paramsDict['LogLevel']['value'] ) )
else:
self.log.warn( 'Job LogLevel defined with null value' )
if paramsDict.has_key( 'DIRACSetup' ):
if paramsDict['DIRACSetup']['value']:
arguments.append( '-o DIRAC/Setup=%s' % ( paramsDict['DIRACSetup']['value'] ) )
else:
self.log.warn( 'Job DIRACSetup defined with null value' )
if paramsDict.has_key( 'JobMode' ):
if paramsDict['JobMode']['value']:
arguments.append( '-o JobMode=%s' % ( paramsDict['JobMode']['value'] ) )
else:
self.log.warn( 'Job Mode defined with null value' )
if paramsDict.has_key( 'JobConfigArgs' ):
if paramsDict['JobConfigArgs']['value']:
arguments.append( '%s' % ( paramsDict['JobConfigArgs']['value'] ) )
else:
self.log.warn( 'JobConfigArgs defined with null value' )
classadJob.insertAttributeString( 'Executable', self.executable )
self.addToOutputSandbox.append( self.stderr )
self.addToOutputSandbox.append( self.stdout )
#Extract i/o sandbox parameters from steps and any input data parameters
#to do when introducing step-level api...
#To add any additional files to input and output sandboxes
if self.addToInputSandbox:
extraFiles = ';'.join( self.addToInputSandbox )
if paramsDict.has_key( 'InputSandbox' ):
currentFiles = paramsDict['InputSandbox']['value']
finalInputSandbox = currentFiles + ';' + extraFiles
uniqueInputSandbox = uniqueElements( finalInputSandbox.split( ';' ) )
paramsDict['InputSandbox']['value'] = ';'.join( uniqueInputSandbox )
self.log.verbose( 'Final unique Input Sandbox %s' % ( ';'.join( uniqueInputSandbox ) ) )
else:
paramsDict['InputSandbox'] = {}
paramsDict['InputSandbox']['value'] = extraFiles
paramsDict['InputSandbox']['type'] = 'JDL'
if self.addToOutputSandbox:
extraFiles = ';'.join( self.addToOutputSandbox )
if paramsDict.has_key( 'OutputSandbox' ):
currentFiles = paramsDict['OutputSandbox']['value']
finalOutputSandbox = currentFiles + ';' + extraFiles
uniqueOutputSandbox = uniqueElements( finalOutputSandbox.split( ';' ) )
paramsDict['OutputSandbox']['value'] = ';'.join( uniqueOutputSandbox )
self.log.verbose( 'Final unique Output Sandbox %s' % ( ';'.join( uniqueOutputSandbox ) ) )
else:
paramsDict['OutputSandbox'] = {}
paramsDict['OutputSandbox']['value'] = extraFiles
paramsDict['OutputSandbox']['type'] = 'JDL'
if self.addToInputData:
extraFiles = ';'.join( self.addToInputData )
if paramsDict.has_key( 'InputData' ):
currentFiles = paramsDict['InputData']['value']
finalInputData = extraFiles
if currentFiles:
finalInputData = currentFiles + ';' + extraFiles
uniqueInputData = uniqueElements( finalInputData.split( ';' ) )
paramsDict['InputData']['value'] = ';'.join( uniqueInputData )
self.log.verbose( 'Final unique Input Data %s' % ( ';'.join( uniqueInputData ) ) )
else:
paramsDict['InputData'] = {}
paramsDict['InputData']['value'] = extraFiles
paramsDict['InputData']['type'] = 'JDL'
# Handle here the Parametric values
if self.parametric:
for pType in ['InputData', 'InputSandbox']:
if self.parametric.has_key( pType ):
if paramsDict.has_key( pType ) and paramsDict[pType]['value']:
pData = self.parametric[pType]
# List of lists case
currentFiles = paramsDict[pType]['value'].split( ';' )
tmpList = []
if type( pData[0] ) == list:
for pElement in pData:
tmpList.append( currentFiles + pElement )
else:
for pElement in pData:
tmpList.append( currentFiles + [pElement] )
self.parametric[pType] = tmpList
paramsDict[pType] = {}
paramsDict[pType]['value'] = "%s"
paramsDict[pType]['type'] = 'JDL'
self.parametric['files'] = self.parametric[pType]
arguments.append( ' -p Parametric' + pType + '=%s' )
break
if self.parametric.has_key( 'files' ):
paramsDict['Parameters'] = {}
paramsDict['Parameters']['value'] = self.parametric['files']
paramsDict['Parameters']['type'] = 'JDL'
if self.parametric.has_key( 'GenericParameters' ):
paramsDict['Parameters'] = {}
paramsDict['Parameters']['value'] = self.parametric['GenericParameters']
paramsDict['Parameters']['type'] = 'JDL'
arguments.append( ' -p ParametricParameters=%s' )
##This needs to be put here so that the InputData and/or InputSandbox parameters for parametric jobs are processed
classadJob.insertAttributeString( 'Arguments', ' '.join( arguments ) )
#Add any JDL parameters to classad obeying lists with ';' rule
for name, props in paramsDict.items():
ptype = props['type']
value = props['value']
if name.lower() == 'requirements' and ptype == 'JDL':
self.log.verbose( 'Found existing requirements: %s' % ( value ) )
if re.search( '^JDL', ptype ):
if type( value ) == list:
if type( value[0] ) == list:
classadJob.insertAttributeVectorStringList( name, value )
else:
classadJob.insertAttributeVectorString( name, value )
elif value == "%s":
classadJob.insertAttributeInt( name, value )
elif not re.search( ';', value ) or name == 'GridRequirements': #not a nice fix...
classadJob.insertAttributeString( name, value )
else:
classadJob.insertAttributeVectorString( name, value.split( ';' ) )
self.addToInputSandbox.remove( scriptname )
self.addToOutputSandbox.remove( self.stdout )
self.addToOutputSandbox.remove( self.stderr )
jdl = classadJob.asJDL()
start = jdl.find( '[' )
end = jdl.rfind( ']' )
return jdl[( start + 1 ):( end - 1 )]
#############################################################################
def _setParamValue( self, name, value ):
"""Internal Function. Sets a parameter value, used for production.
"""
return self.workflow.setValue( name, value )
#############################################################################
def _addJDLParameter( self, name, value ):
"""Developer function, add an arbitrary JDL parameter.
"""
self._addParameter( self.workflow, name, 'JDL', value, 'Optional JDL parameter added' )
return self.workflow.setValue( name, value )
#############################################################################
def runLocal( self, dirac = None ):
""" The dirac (API) object is for local submission.
"""
if dirac is None:
dirac = Dirac()
return dirac.submit( self, mode = 'local' )
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#
|
coberger/DIRAC
|
Interfaces/API/Job.py
|
Python
|
gpl-3.0
| 47,515
|
#!/usr/bin/env python
#
# Copyright 2007,2010,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest, blocks
import pmt
import numpy
def make_tag(key, value, offset, srcid=0):
tag = gr.tag_t()
tag.key = key
tag.value = pmt.to_pmt(value)
tag.offset = offset
tag.srcid = srcid
return tag
class test_skiphead(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
self.src_data = [int(x) for x in range(65536)]
def tearDown(self):
self.tb = None
def test_skip_0(self):
skip_cnt = 0
expected_result = tuple(self.src_data[skip_cnt:])
src1 = blocks.vector_source_i(self.src_data)
op = blocks.skiphead(gr.sizeof_int, skip_cnt)
dst1 = blocks.vector_sink_i()
self.tb.connect(src1, op, dst1)
self.tb.run()
dst_data = dst1.data()
self.assertEqual(expected_result, dst_data)
def test_skip_1(self):
skip_cnt = 1
expected_result = tuple(self.src_data[skip_cnt:])
src1 = blocks.vector_source_i(self.src_data)
op = blocks.skiphead(gr.sizeof_int, skip_cnt)
dst1 = blocks.vector_sink_i()
self.tb.connect(src1, op, dst1)
self.tb.run()
dst_data = dst1.data()
self.assertEqual(expected_result, dst_data)
def test_skip_1023(self):
skip_cnt = 1023
expected_result = tuple(self.src_data[skip_cnt:])
src1 = blocks.vector_source_i(self.src_data)
op = blocks.skiphead(gr.sizeof_int, skip_cnt)
dst1 = blocks.vector_sink_i()
self.tb.connect(src1, op, dst1)
self.tb.run()
dst_data = dst1.data()
self.assertEqual(expected_result, dst_data)
def test_skip_6339(self):
skip_cnt = 6339
expected_result = tuple(self.src_data[skip_cnt:])
src1 = blocks.vector_source_i(self.src_data)
op = blocks.skiphead(gr.sizeof_int, skip_cnt)
dst1 = blocks.vector_sink_i()
self.tb.connect(src1, op, dst1)
self.tb.run()
dst_data = dst1.data()
self.assertEqual(expected_result, dst_data)
def test_skip_12678(self):
skip_cnt = 12678
expected_result = tuple(self.src_data[skip_cnt:])
src1 = blocks.vector_source_i(self.src_data)
op = blocks.skiphead(gr.sizeof_int, skip_cnt)
dst1 = blocks.vector_sink_i()
self.tb.connect(src1, op, dst1)
self.tb.run()
dst_data = dst1.data()
self.assertEqual(expected_result, dst_data)
def test_skip_all(self):
skip_cnt = len(self.src_data)
expected_result = tuple(self.src_data[skip_cnt:])
src1 = blocks.vector_source_i(self.src_data)
op = blocks.skiphead(gr.sizeof_int, skip_cnt)
dst1 = blocks.vector_sink_i()
self.tb.connect(src1, op, dst1)
self.tb.run()
dst_data = dst1.data()
self.assertEqual(expected_result, dst_data)
def test_skip_tags(self):
skip_cnt = 25
expected_result = tuple(self.src_data[skip_cnt:])
src_tags = tuple([make_tag('foo', 'bar', 1, 0),
make_tag('baz', 'qux', 50, 1)])
src1 = blocks.vector_source_i(self.src_data, tags=src_tags)
op = blocks.skiphead(gr.sizeof_int, skip_cnt)
dst1 = blocks.vector_sink_i()
self.tb.connect(src1, op, dst1)
self.tb.run()
dst_data = dst1.data()
dst_tags = dst1.tags()
self.assertEqual(expected_result, dst_data)
self.assertEqual(dst_tags[0].offset, 25, "Tag offset is incorrect")
self.assertEqual(len(dst_tags), 1, "Wrong number of tags received")
self.assertEqual(dst_tags[0].key, "baz", "Tag key is incorrect")
self.assertEqual(pmt.to_python(
dst_tags[0].value), "qux", "Tag value is incorrect")
if __name__ == '__main__':
gr_unittest.run(test_skiphead, "test_skiphead.xml")
|
bastibl/gnuradio
|
gr-blocks/python/blocks/qa_skiphead.py
|
Python
|
gpl-3.0
| 4,664
|
# This file is part of python-flac.
#
# Copyright (c) 2014 Christian Schmitz <tynn.dev@gmail.com>
#
# python-flac is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# python-flac is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with python-flac. If not, see <http://www.gnu.org/licenses/>.
""" Unit tests for the flac.format module """
try : from . import _enum
except : import _enum
import unittest
from flac.format import *
class FormatErrorTest (unittest.TestCase) :
def _raise_format_error (self) :
raise FlacFormatError
def test_ininstanceability (self) :
self.assertRaises(FlacFormatError, self._raise_format_error)
def load_tests(loader, tests, pattern):
enums = [
ChannelAssignment,
EntropyCodingMethodType,
FrameNumberType,
SubframeType,
MetadataType,
]
return _enum._load_tests(loader, tests, *enums)
if __name__ == "__main__" : unittest.main()
|
tynn/python-flac
|
tests/test_flac_format.py
|
Python
|
gpl-3.0
| 1,325
|
#!/usr/bin/env python
#encoding: utf-8
import os
import logging
import time
import json
import socket
import subprocess
import random
from Crypto.PublicKey import RSA
from Crypto.Cipher import (
PKCS1_OAEP,
AES,
)
from Crypto.Signature import PKCS1_PSS
from Crypto.Hash.SHA import SHA1Hash
import base64
import copy
from traceback import format_exc
HOSTNAME = socket.getfqdn()
AES_SIZE = 16
PADDING_BYTE = '0'
############ Encription helpers ######################
def pad(msg):
msg = msg.encode('utf-8')
return msg + (AES_SIZE - len(msg) % AES_SIZE) * PADDING_BYTE
def unpad(msg):
while msg[-1] == PADDING_BYTE:
msg = msg[:-1]
return msg
def verifyRSA(msg, key, signature):
msg_hash = SHA1Hash(msg)
cipher = PKCS1_PSS.new(key)
return cipher.verify(msg_hash, base64.b64decode(signature)) and msg
def signRSA(msg, key):
msg_hash = SHA1Hash(msg)
cipher = PKCS1_PSS.new(key)
return base64.b64encode(cipher.sign(msg_hash))
def encryptRSA(msg, key):
cipher = PKCS1_OAEP.new(key)
return base64.b64encode(cipher.encrypt(msg))
def encryptAES(msg, key):
cipher = AES.new(key)
return base64.b64encode(cipher.encrypt(msg))
def decryptRSA(msg, key):
cipher = PKCS1_OAEP.new(key)
return cipher.decrypt(base64.b64decode(msg))
def decryptAES(msg, key):
cipher = AES.new(key)
return cipher.decrypt(base64.b64decode(msg))
def wrap(dict_obj, key):
logging.debug('Wrapping: %r' % dict_obj)
payload = json.dumps(dict_obj)
aes_key = os.urandom(AES_SIZE)
res = {}
res['payload'] = encryptAES(pad(payload), aes_key)
res['key'] = encryptRSA(aes_key, key)
logging.debug('Wrapped: %r' % res)
return res
def unwrap(dict_obj, key):
logging.debug('Unwrapping %s' % dict_obj)
aes_key = decryptRSA(dict_obj['key'], key)
payload = decryptAES(dict_obj['payload'], aes_key)
res = json.loads(unpad(payload))
logging.debug('Unwrapped: %s' % res)
return res
#### Messages and related classes ####################
class MalformedMessage(Exception):
pass
class Message(dict):
def __init__(self, m_type, token, priv_key, pub_key, **kwargs):
self['token'] = token
self['m_type'] = m_type
self.priv_key = priv_key
self.pub_key = pub_key
def __str__(self):
signed_message = {}
signed_message['data'] = json.dumps(self)
signed_message['signature'] = signRSA(signed_message['data'],
self.priv_key)
signed_message['host'] = HOSTNAME
signed_message['pub_key'] = \
self.priv_key.publickey().exportKey(format='OpenSSH')
logging.debug("Wrapping message with key %s"
% self.pub_key.exportKey(format='OpenSSH'))
return json.dumps(wrap(signed_message, self.pub_key))
@classmethod
def from_str(cls, msg_str, priv_key=None, **kwargs):
try:
msg_dict = json.loads(msg_str)
if priv_key:
unwrapped_msg_dict = unwrap(msg_dict, priv_key)
else:
unwrapped_msg_dict = msg_dict
## TODO: verify the sent pub_key
pub_key = RSA.importKey(unwrapped_msg_dict['pub_key'])
signature = unwrapped_msg_dict['signature']
if not verifyRSA(str(unwrapped_msg_dict['data']),
pub_key,
signature):
raise ValueError('Wrong signature')
kwargs.update(json.loads(unwrapped_msg_dict['data']))
kwargs['pub_key'] = pub_key
kwargs['priv_key'] = priv_key
print kwargs
return cls(**kwargs)
except (ValueError, TypeError) as exc:
raise MalformedMessage('%s\n%s' % (msg_str, format_exc(exc)))
class Token(dict):
def __init__(self, pub_key, token=None, **kwargs):
if isinstance(pub_key, unicode):
pub_key = RSA.importKey(pub_key)
self.key = pub_key
self['pub_key'] = pub_key.publickey().exportKey(format='OpenSSH')
self['token'] = token \
and verifyRSA(token, pub_key, kwargs['signature']) \
or self.generate_token()
self['m_type'] = 'token'
self['host'] = kwargs.get('host', HOSTNAME)
@staticmethod
def generate_token():
rand = str(random.randint(0, 9999))
ts = repr(time.time()).replace('.', '')
return rand + ts
def __str__(self):
new_token = copy.deepcopy(self)
new_token['signature'] = signRSA(new_token['token'], self.key)
return json.dumps(new_token)
@classmethod
def from_str(cls, msg_str, **kwargs):
try:
msg_dict = json.loads(msg_str)
kwargs.update(msg_dict)
return cls(**kwargs)
except (ValueError, TypeError) as exc:
raise MalformedMessage('%s\n%s' % (msg_str, format_exc(exc)))
class BadToken(Message):
def __init__(self, token, priv_key, pub_key, **kwargs):
if 'm_type' in kwargs:
kwargs.pop('m_type')
super(BadToken, self).__init__('bad_token', token,
priv_key, pub_key, **kwargs)
self['m_type'] = 'bad_token'
self['host'] = HOSTNAME
class CommandRequest(Message):
def __init__(self, token, priv_key, pub_key, command,
return_queue, **kwargs):
if 'm_type' in kwargs:
kwargs.pop('m_type')
super(CommandRequest, self).__init__('command_request', token,
priv_key, pub_key, **kwargs)
self['command'] = command
self['return_queue'] = return_queue
self['m_type'] = 'command_request'
class CommandResult(Message):
def __init__(self, token, priv_key, pub_key, command, return_queue,
rc='', stdout='', stderr='', host=HOSTNAME, **kwargs):
if 'm_type' in kwargs:
kwargs.pop('m_type')
super(CommandResult, self).__init__('command_result', token,
priv_key, pub_key, **kwargs)
self['token'] = token
self['command'] = command
self['return_queue'] = return_queue
self['rc'] = rc
self['stdout'] = stdout
self['stderr'] = stderr
self['host'] = host
self.finished = False
self.proc = None
def run(self):
self.proc = subprocess.Popen(
self['command'], shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
def parse_results(self):
self['rc'] = self.proc.wait()
self['stdout'] = self.proc.stdout and self.proc.stdout.read() or ''
self['stderr'] = self.proc.stderr and self.proc.stderr.read() or ''
self['m_type'] = 'command_result'
def __getattr__(self, name):
return getattr(self.proc, name)
@classmethod
def from_cmd_req(cls, cmd_req):
priv_key = cmd_req.priv_key
pub_key = cmd_req.pub_key
return cls(priv_key=priv_key, pub_key=pub_key, **cmd_req)
MSG_TYPES = {
'command_result': CommandResult,
'command_request': CommandRequest,
'token': Token,
'bad_token': BadToken,
}
def to_message(str_msg, priv_key, pub_keys=None):
try:
str_msg = unwrap(json.loads(str_msg), priv_key)
if not pub_keys:
## TODO: verify pub_key
pub_key = RSA.importKey(str_msg['pub_key'])
else:
pub_key = pub_keys[str_msg['host']]
signature = str_msg['signature']
if not verifyRSA(str_msg['data'], pub_key, signature):
raise ValueError('Wrong signature')
msg = json.loads(str_msg['data'])
msg['pub_key'] = pub_key
msg['priv_key'] = priv_key
return MSG_TYPES[msg['m_type']](**msg)
except ValueError as exc:
raise MalformedMessage('Unable to parse json: %s\n%s'
% (str_msg, format_exc(exc)))
except KeyError as exc:
raise MalformedMessage('Unknown message type: %s\n%s'
% (str_msg, format_exc(exc)))
except TypeError as exc:
raise MalformedMessage('Missing or duplicate fields: %s\n%s'
% (str_msg, format_exc(exc)))
######## Helper methods and classes #########################
class CommandPool(object):
def __init__(self, conn, key, max_procs=2):
self.max_procs = 2
self.pool = {}
self.conn = conn
self.key = key
self.init_poll()
def init_poll(self):
for _ in range(self.max_procs):
new_token = Token(pub_key=self.key)
self.pool[new_token['token']] = None
self.send_token(new_token)
def send_token(self, token):
self.send_msg(token, '/queue/ctrl_%s' % HOSTNAME)
def send_bad_token(self, token, cmd_req):
bad_token = BadToken(
token,
priv_key=self.key,
pub_key=cmd_req.pub_key)
self.send_msg(bad_token, cmd_req['return_queue'])
def send_result(self, cmd):
cmd.parse_results()
self.send_msg(cmd, cmd['return_queue'])
def poll(self):
finished = []
for token, cmd in self.pool.iteritems():
if cmd and cmd.poll() is not None:
finished.append(token)
for token in finished:
self.send_result(self.pool[token])
del self.pool[token]
## generate a new token and clear executor slot
new_token = Token(pub_key=self.key)
self.pool[new_token['token']] = None
self.send_token(new_token)
def add_command(self, cmd_req):
token = cmd_req['token']
if token in self.pool and not self.pool[token]:
cmd_res = CommandResult.from_cmd_req(cmd_req)
cmd_res.run()
logging.debug('Running command "%s"' % cmd_res['command'])
self.pool[token] = cmd_res
elif token not in self.pool:
logging.warn("Received a bad token: %r" % cmd_req)
self.send_bad_token(token, cmd_req)
def send_msg(self, msg, queue):
self.conn.send(str(msg), destination=queue)
def show_results(results, hosts, error_only=False):
global_rc = 0
for host in hosts:
if host in results:
res = results[host]
res_status = res['rc'] and 'error' or 'ok'
if res['rc'] or not error_only:
print "%s::%s::Result" % (host, res_status)
if res['stdout'] and (res['rc'] or not error_only):
print '\n'.join(
("%s::%s::stdout::" % (host, res_status) + line
for line
in res['stdout'].split('\n')
if line))
if res['stderr'] and (res['rc'] or not error_only):
print '\n'.join(
("%s::%s::stderr::" % (host, res_status) + line
for line
in res['stderr'].split('\n')
if line))
if res['rc'] or not error_only:
print "%s::%s::Return code: %d" % (host, res_status,
res['rc'])
if error_only and not res['rc']:
print '%s::ok' % host
global_rc = global_rc or res['rc']
else:
global_rc = 1
print "%s::error::Timeout" % host
return global_rc
def show_summary(results, hosts):
global_rc = 0
for host in hosts:
if host in results:
if results[host]['rc']:
print "%s::error" % host
else:
print "%s::ok" % host
else:
print "%s::error::timeout" % host
return global_rc
######### Listener classes ###############################
class BaseListener():
def load_keys(self, key_path):
if key_path is None:
key_path = os.path.expanduser('~/.ssh/id_rsa')
if not os.path.exists(key_path):
raise Exception('Unable to lad key %s, file not found' % key_path)
self.key = RSA.importKey(open(key_path))
def start_conn(self):
self.conn.set_listener('', self)
self.conn.start()
self.conn.connect()
def on_error(self, headers, message):
logging.error("HEADERS:\n%s\nMESSAGE:\n%s" % (headers, message))
raise Exception("HEADERS:\n%s\nMESSAGE:\n%s" % (headers, message))
def on_connecting(self, *args):
logging.debug('on_connecting')
def on_send(self, *args):
logging.debug('on_send')
class ServerListener(BaseListener):
def __init__(self, server, port, conn, key_path=None, max_procs=2):
self.server = server
self.port = port
self.conn = conn
self.tokens = []
self.load_keys(key_path)
self.start_conn()
self.pool = CommandPool(
conn=conn,
max_procs=max_procs,
key=self.key)
in_queue = '/queue/in_%s' % HOSTNAME
self.conn.subscribe(destination=in_queue, ack='auto')
def poll(self):
self.pool.poll()
def on_message(self, headers, message):
logging.info("Got command: %s" % message)
try:
cmd_req = to_message(message, self.key)
except MalformedMessage as exc:
logging.error('Unalbe to process command %s: %s'
% (message, format_exc(exc)))
return
self.pool.add_command(cmd_req)
class ClientListener(BaseListener):
def __init__(self, server, port, command, conn, hosts, key_path=None):
self.server = server
self.port = port
self.conn = conn
self.return_queue = '/queue/return_%s' % HOSTNAME
self.command = command
self.pub_keys = {}
self.load_keys(key_path)
self.hosts = dict((h, None) for h in hosts)
self.results = {}
self.start_conn()
self.subscribe_for_tokens()
self.finished = False
self.listening = False
def subscribe_for_tokens(self):
for host in self.hosts.iterkeys():
ctrl_queue = '/queue/ctrl_%s' % host
self.hosts[host] = 'wait_for_token'
self.conn.subscribe(destination=ctrl_queue, ack='client')
def handle_token(self, headers, message):
host = headers['destination'].split('_', 1)[1]
## this should not happen, but just in case
if not self.hosts[host] == 'wait_for_token':
return
token_msg = Token.from_str(message, key=self.key)
logging.info("::%s::Got token: %s, sending command"
% (host, token_msg['token']))
self.pub_keys[host] = RSA.importKey(token_msg['pub_key'])
command = CommandRequest(command=self.command,
token=token_msg['token'],
priv_key=self.key,
pub_key=self.pub_keys[host],
return_queue=self.return_queue)
in_queue = '/queue/in_%s' % host
self.conn.send(str(command), destination=in_queue)
self.conn.ack({'message-id': headers['message-id']})
self.conn.unsubscribe(destination='/queue/ctrl_%s' % host)
if not self.listening:
self.listening = True
self.conn.subscribe(destination=self.return_queue,
ack='client-individual')
logging.debug('::%s::Command sent, waiting for response' % host)
self.hosts[host] = token_msg['token']
def handle_result(self, headers, message):
logging.info('Got response %s' % message)
try:
message = to_message(message, self.key, self.pub_keys)
except MalformedMessage as exc:
logging.warn('Unable to parse response %s: %s'
% (message, format_exc(exc)))
return
## check if the message was meant to us (this process)
if message['host'] not in self.hosts:
logging.info("Wrong host %s, ignoring" % message['host'])
return
elif message['token'] != self.hosts[message['host']]:
logging.info("Bad token %s, expecting %s, ignoring message"
% (message['token'],
self.hosts[message['host']]))
return
## the message is for us, ack it
self.conn.ack({'message-id': headers['message-id']})
host = message['host']
if message['m_type'] == 'bad_token':
logging.debug('Ooops, we sent a bad token, retrying')
ctrl_queue = '/queue/ctrl_%s' % host
self.hosts[host] = 'wait_for_token'
self.conn.subscribe(destination=ctrl_queue, ack='client')
return
self.results[host] = message
del self.hosts[host]
logging.info('[%d|%d|%d]'
% (len(self.results),
len(self.hosts),
len(self.hosts) + len(self.results)))
if not self.hosts:
self.finished = True
self.conn.disconnect()
def on_message(self, headers, message):
if headers['destination'].startswith('/queue/ctrl_'):
self.handle_token(headers, message)
else:
self.handle_result(headers, message)
|
david-caro/mssh
|
mssh.py
|
Python
|
gpl-3.0
| 17,492
|
#
# @file TestCVTerms.py
# @brief CVTerms unit tests
#
# @author Akiya Jouraku (Python conversion)
# @author Sarah Keating
#
# $Id: TestCVTerms.py 11441 2010-07-09 02:22:23Z mhucka $
# $HeadURL: https://sbml.svn.sourceforge.net/svnroot/sbml/trunk/libsbml/src/bindings/python/test/annotation/TestCVTerms.py $
#
# ====== WARNING ===== WARNING ===== WARNING ===== WARNING ===== WARNING ======
#
# DO NOT EDIT THIS FILE.
#
# This file was generated automatically by converting the file located at
# src/annotation/test/TestCVTerms.c
# using the conversion program dev/utilities/translateTests/translateTests.pl.
# Any changes made here will be lost the next time the file is regenerated.
#
# -----------------------------------------------------------------------------
# This file is part of libSBML. Please visit http://sbml.org for more
# information about SBML, and the latest version of libSBML.
#
# Copyright 2005-2010 California Institute of Technology.
# Copyright 2002-2005 California Institute of Technology and
# Japan Science and Technology Corporation.
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation. A copy of the license agreement is provided
# in the file named "LICENSE.txt" included with this software distribution
# and also available online as http://sbml.org/software/libsbml/license.html
# -----------------------------------------------------------------------------
import sys
import unittest
import libsbml
class TestCVTerms(unittest.TestCase):
def test_CVTerm_addResource(self):
term = libsbml.CVTerm(libsbml.MODEL_QUALIFIER)
resource = "GO6666";
self.assert_( term != None )
self.assert_( term.getQualifierType() == libsbml.MODEL_QUALIFIER )
term.addResource(resource)
xa = term.getResources()
self.assert_( xa.getLength() == 1 )
self.assert_(( "rdf:resource" == xa.getName(0) ))
self.assert_(( "GO6666" == xa.getValue(0) ))
_dummyList = [ term ]; _dummyList[:] = []; del _dummyList
pass
def test_CVTerm_create(self):
term = libsbml.CVTerm(libsbml.MODEL_QUALIFIER)
self.assert_( term != None )
self.assert_( term.getQualifierType() == libsbml.MODEL_QUALIFIER )
_dummyList = [ term ]; _dummyList[:] = []; del _dummyList
pass
def test_CVTerm_createFromNode(self):
qual_triple = libsbml.XMLTriple("is", "", "bqbiol")
bag_triple = libsbml.XMLTriple()
li_triple = libsbml.XMLTriple()
att = libsbml.XMLAttributes()
att.add( "", "This is my resource")
att1 = libsbml.XMLAttributes()
li_token = libsbml.XMLToken(li_triple,att)
bag_token = libsbml.XMLToken(bag_triple,att1)
qual_token = libsbml.XMLToken(qual_triple,att1)
li = libsbml.XMLNode(li_token)
bag = libsbml.XMLNode(bag_token)
node = libsbml.XMLNode(qual_token)
bag.addChild(li)
node.addChild(bag)
term = libsbml.CVTerm(node)
self.assert_( term != None )
self.assert_( term.getQualifierType() == libsbml.BIOLOGICAL_QUALIFIER )
self.assert_( term.getBiologicalQualifierType() == libsbml.BQB_IS )
xa = term.getResources()
self.assert_( xa.getLength() == 1 )
self.assert_(( "rdf:resource" == xa.getName(0) ))
self.assert_(( "This is my resource" == xa.getValue(0) ))
_dummyList = [ qual_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ bag_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ li_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ li_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ bag_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ qual_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ att ]; _dummyList[:] = []; del _dummyList
_dummyList = [ att1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ term ]; _dummyList[:] = []; del _dummyList
_dummyList = [ node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ bag ]; _dummyList[:] = []; del _dummyList
_dummyList = [ li ]; _dummyList[:] = []; del _dummyList
pass
def test_CVTerm_getResources(self):
term = libsbml.CVTerm(libsbml.MODEL_QUALIFIER)
resource = "GO6666";
resource1 = "OtherURI";
term.addResource(resource)
term.addResource(resource1)
number = term.getNumResources()
self.assert_( number == 2 )
self.assert_(( "GO6666" == term.getResourceURI(0) ))
self.assert_(( "OtherURI" == term.getResourceURI(1) ))
_dummyList = [ term ]; _dummyList[:] = []; del _dummyList
pass
def test_CVTerm_set_get(self):
term = libsbml.CVTerm(libsbml.MODEL_QUALIFIER)
self.assert_( term != None )
self.assert_( term.getQualifierType() == libsbml.MODEL_QUALIFIER )
term.setModelQualifierType(libsbml.BQM_IS)
self.assert_( term != None )
self.assert_( term.getQualifierType() == libsbml.MODEL_QUALIFIER )
self.assert_( term.getModelQualifierType() == libsbml.BQM_IS )
term.setQualifierType(libsbml.BIOLOGICAL_QUALIFIER)
term.setBiologicalQualifierType(libsbml.BQB_IS)
self.assert_( term.getQualifierType() == libsbml.BIOLOGICAL_QUALIFIER )
self.assert_( term.getBiologicalQualifierType() == libsbml.BQB_IS )
_dummyList = [ term ]; _dummyList[:] = []; del _dummyList
pass
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestCVTerms))
return suite
if __name__ == "__main__":
if unittest.TextTestRunner(verbosity=1).run(suite()).wasSuccessful() :
sys.exit(0)
else:
sys.exit(1)
|
alexholehouse/SBMLIntegrator
|
libsbml-5.0.0/src/bindings/python/test/annotation/TestCVTerms.py
|
Python
|
gpl-3.0
| 5,615
|
import argiope as ag
import numpy as np
import pandas as pd
mesh = ag.mesh.read_msh("demo.msh")
|
lcharleux/argiope
|
doc/examples/mesh/Mesh-read_msh.py
|
Python
|
gpl-3.0
| 97
|
if x():
if y():
if z():
if u():
a()
else:
b()
else:
c()
else:
d()
else:
e()
|
bronikkk/tirpan
|
tests/test_mir24.py
|
Python
|
gpl-3.0
| 180
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "DNAOrderApp.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
aw18/DNAOrderApp
|
manage.py
|
Python
|
gpl-3.0
| 255
|
# Copyright 2007 Casey Durfee
# Copyright 2007 Gabriel Farrell
#
# This file is part of Kochief.
#
# Kochief is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Kochief is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Kochief. If not, see <http://www.gnu.org/licenses/>.
# Django settings for the Kochief project.
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
BASE_DIR = os.path.dirname(os.path.abspath(__file__)) + '/'
# Base URL of the project, either absolute or relative.
# Must include a trailing slash.
BASE_URL = '/'
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASE_ENGINE = 'sqlite3' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
DATABASE_NAME = BASE_DIR + 'kochief.db' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be avilable on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# dummy ugettext -- see http://www.djangoproject.com/documentation/i18n/
ugettext = lambda s: s
LANGUAGES = (
('fr', ugettext('French')),
('en', ugettext('English')),
)
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = BASE_DIR + 'media/'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = BASE_URL + 'media/'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = BASE_URL + 'admin/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'cr7)b7e+bq*u%a6f8a2d_rrcd3r_r@d3u32c-1*4^mk6fc0fsl'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
)
MIDDLEWARE_CLASSES = (
"django.middleware.common.CommonMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.locale.LocaleMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.middleware.doc.XViewMiddleware",
"django.middleware.gzip.GZipMiddleware",
)
ROOT_URLCONF = 'kochief.urls'
TEMPLATE_DIRS = (
BASE_DIR + 'templates/',
)
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'kochief.discovery',
'kochief.cataloging',
)
CACHE_BACKEND = 'locmem:///'
TEMPLATE_CONTEXT_PROCESSORS = (
"django.core.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"kochief.discovery.context_processors.search_history",
)
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
## Settings specific to this project (that is, non-Django) ##
SOLR_URL = 'http://localhost:8983/solr/'
SOLR_DIR = BASE_DIR + 'solr/'
# III, Unicorn, or Horizon -- affects both display and index/ingest
ILS = ''
# MAJAX_URL is for use with http://libx.org/majax/
# (has no effect if ILS != 'III')
MAJAX_URL = ''
# Set CATALOG_RECORD_URL to something else if you want to pass through
# to your catalog, e.g. 'http://innopac.library.drexel.edu/record=%s'.
# The %s is replaced by the item id.
CATALOG_RECORD_URL = ''
# Number of facet terms to display by default.
MAX_FACET_TERMS_BASIC = 4
# Number of facet terms to display when you hit "show more".
MAX_FACET_TERMS_EXPANDED = 25
# Number of terms to display for index facets.
INDEX_FACET_TERMS = 20
# Facet display on the index page. Note that values for "field" are
# appended with "_facet". If sort_by_count is False, terms will be
# sorted "in their natural index order" according to Solr docs --
# usually alphabetical.
INDEX_FACETS = [
{
'name': ugettext('Names'),
'field': 'personal_name',
'sort_by_count': True,
},
{
'name': ugettext('Subjects'),
'field': 'subject',
'sort_by_count': True,
},
{
'name': ugettext('Languages'),
'field': 'language',
'sort_by_count': True,
},
]
# Facet display in the results sidebar.
FACETS = [
{
'name': ugettext('Name'),
'field': 'name',
'sort_by_count': True,
},
{
'name': ugettext('Topic'),
'field': 'topic',
'sort_by_count': True,
},
{
'name': ugettext('Genre'),
'field': 'genre',
'sort_by_count': True,
},
{
'name': ugettext('Language'),
'field': 'language',
'sort_by_count': True,
},
{
'name': ugettext('Dubbed Language'),
'field': 'language_dubbed',
'sort_by_count': True,
},
{
'name': ugettext('Subtitled Language'),
'field': 'language_subtitles',
'sort_by_count': True,
},
{
'name': ugettext('Format'),
'field': 'format',
'sort_by_count': True,
},
{
'name': ugettext('Place'),
'field': 'place',
'sort_by_count': True,
},
{
'name': ugettext('Author'),
'field': 'author',
'sort_by_count': True,
},
{
'name': ugettext('Year of Publication'),
'field': 'year',
'sort_by_count': True,
},
# {
# 'name': ugettext('Dewey Range'),
# 'field': 'callnumlayerone',
# 'sort_by_count': False,
# },
# {
# 'name': ugettext('Dewey Range'),
# 'field': 'callnumlayertwo',
# 'sort_by_count': False,
# },
# {
# 'name': ugettext('Availability'),
# 'field': 'availability',
# 'sort_by_count': False,
# },
]
ITEMS_PER_PAGE = 20
# Sort options for results, by (DISPLAY, SOLR_PARAM).
SORTS = (
(ugettext('newest'), 'year desc'),
(ugettext('oldest'), 'year asc'),
(ugettext('relevance'), ''),
(ugettext('title'), 'title_sort asc'),
)
SEARCH_CACHE_TIME = 6000 # in seconds
# Import for local overrides
try:
from settings_local import *
except ImportError:
pass
|
edsu/lochief
|
kochief/settings.py
|
Python
|
gpl-3.0
| 7,699
|
from ..exceptions import DuplicateKeyError, SaveError
from ..monitor import Monitor
from curtsies import fmtstr
import pymongo
import bson.errors
import logging
import arrow
import re
class PostSaver:
def __init__(self, **kwargs):
self.logger = logging.getLogger(__name__)
self.db_address = kwargs.get("db_address",None)
self.db_name = kwargs.get("db_name",None)
def save(self,document=None):
""" Exceptions:
- AssertionError (Monitor)
- CannotFindField (Monitor)
- ValidationError (Monitor)
- DuplicateKeyError
- SaveError
"""
assert self.db_address is not None, "db_address is not defined."
assert self.db_name is not None, "db_name is not defined."
assert "permalink" in document, "permalink is not defined."
assert "content" in document, "content is not defined."
assert len(document["content"]) > 0 , "content cannot be empty."
monitor = Monitor()
conn = pymongo.MongoClient("mongodb://%s" % self.db_address)
db = conn[self.db_name]
# Ensuring Index
db.data.create_index([("permalink", pymongo.ASCENDING)], unique=True)
db.data.create_index([("converted", pymongo.ASCENDING)])
db.data.create_index("TTL",expireAfterSeconds=2592000)
try:
db.data.insert_one(document)
monitor.capture_insert_document(crawler_name=document["_crawled_by"])
except pymongo.errors.DuplicateKeyError:
raise DuplicateKeyError("Ops! Duplciate Data!")
except bson.errors.InvalidBSON:
raise SaveError("Invalid BSON. Cannot save data!")
finally:
conn.close()
# monitor_conn.close()
def batch_save(self, documents=None):
""" Exceptions:
- AssertionError (save)
- CannotFindField (save)
- ValidationError (save)
Return:
success<bool> : Indicate if all the documents is success or not
"""
assert documents is not None, "documents is not defined."
success = True
try:
for document in documents:
try:
self.save(document)
print(fmtstr("[PostSaver][success] Inserted One Document!","green"))
except SaveError as ex:
self.logger.error(str(ex), exc_info=True)
print(fmtstr("[PostSaver][error] %s" % ex, "red"))
success = True
except DuplicateKeyError as ex:
# Just do not try to push any more document if you find any DuplicateKeyError
print(fmtstr("[PostSaver][debug] %s" % ex,"red"))
success = False
return success
|
franziz/arcrawler
|
lib/saver/post.py
|
Python
|
gpl-3.0
| 2,417
|
class AerogenError(Exception):
pass
|
ctu-geoforall-lab-sandbox/qgis-aerogen-plugin
|
exceptions.py
|
Python
|
gpl-3.0
| 40
|
import re
from xml.etree import ElementTree
import sickchill.oldbeard.common
from sickchill import logger, settings
from sickchill.oldbeard import common
from sickchill.oldbeard.helpers import getURL, make_session
class Notifier(object):
def __init__(self):
self.headers = {
'X-Plex-Device-Name': 'SickChill',
'X-Plex-Product': 'SickChill Notifier',
'X-Plex-Client-Identifier': sickchill.oldbeard.common.USER_AGENT,
'X-Plex-Version': '2016.02.10'
}
self.session = make_session()
@staticmethod
def _notify_pht(message, title='SickChill', host=None, username=None, password=None, force=False):
"""Internal wrapper for the notify_snatch and notify_download functions
Args:
message: Message body of the notice to send
title: Title of the notice to send
host: Plex Home Theater(s) host:port
username: Plex username
password: Plex password
force: Used for the Test method to override config safety checks
Returns:
Returns a list results in the format of host:ip:result
The result will either be 'OK' or False, this is used to be parsed by the calling function.
"""
# suppress notifications if the notifier is disabled but the notify options are checked
if not settings.USE_PLEX_CLIENT and not force:
return False
host = host or settings.PLEX_CLIENT_HOST
username = username or settings.PLEX_CLIENT_USERNAME
password = password or settings.PLEX_CLIENT_PASSWORD
return sickchill.oldbeard.notifiers.kodi_notifier._notify_kodi(message, title=title, host=host, username=username, password=password, force=force, dest_app="PLEX")
##############################################################################
# Public functions
##############################################################################
def notify_snatch(self, ep_name):
if settings.PLEX_NOTIFY_ONSNATCH:
self._notify_pht(ep_name, common.notifyStrings[common.NOTIFY_SNATCH])
def notify_download(self, ep_name):
if settings.PLEX_NOTIFY_ONDOWNLOAD:
self._notify_pht(ep_name, common.notifyStrings[common.NOTIFY_DOWNLOAD])
def notify_subtitle_download(self, ep_name, lang):
if settings.PLEX_NOTIFY_ONSUBTITLEDOWNLOAD:
self._notify_pht(ep_name + ': ' + lang, common.notifyStrings[common.NOTIFY_SUBTITLE_DOWNLOAD])
def notify_git_update(self, new_version='??'):
if settings.NOTIFY_ON_UPDATE:
update_text = common.notifyStrings[common.NOTIFY_GIT_UPDATE_TEXT]
title = common.notifyStrings[common.NOTIFY_GIT_UPDATE]
if update_text and title and new_version:
self._notify_pht(update_text + new_version, title)
def notify_login(self, ipaddress=""):
if settings.NOTIFY_ON_LOGIN:
update_text = common.notifyStrings[common.NOTIFY_LOGIN_TEXT]
title = common.notifyStrings[common.NOTIFY_LOGIN]
if update_text and title and ipaddress:
self._notify_pht(update_text.format(ipaddress), title)
def test_notify_pht(self, host, username, password):
return self._notify_pht('This is a test notification from SickChill',
'Test Notification', host, username, password, force=True)
def test_notify_pms(self, host, username, password, plex_server_token):
return self.update_library(host=host, username=username, password=password,
plex_server_token=plex_server_token, force=True)
def update_library(self, ep_obj=None, host=None,
username=None, password=None,
plex_server_token=None, force=False):
"""Handles updating the Plex Media Server host via HTTP API
Plex Media Server currently only supports updating the whole video library and not a specific path.
Returns:
Returns None for no issue, else a string of host with connection issues
"""
if not (settings.USE_PLEX_SERVER and settings.PLEX_UPDATE_LIBRARY) and not force:
return None
host = host or settings.PLEX_SERVER_HOST
if not host:
logger.debug('PLEX: No Plex Media Server host specified, check your settings')
return False
if not self.get_token(username, password, plex_server_token):
logger.warning('PLEX: Error getting auth token for Plex Media Server, check your settings')
return False
file_location = '' if not ep_obj else ep_obj.location
host_list = {x.strip() for x in host.split(',') if x.strip()}
hosts_all = hosts_match = {}
hosts_failed = set()
for cur_host in host_list:
url = 'http{0}://{1}/library/sections'.format(('', 's')[settings.PLEX_SERVER_HTTPS], cur_host)
try:
xml_response = getURL(url, headers=self.headers, session=self.session, returns='text', verify=False,
allow_proxy=False)
if not xml_response:
logger.warning('PLEX: Error while trying to contact Plex Media Server: {0}'.format
(cur_host))
hosts_failed.add(cur_host)
continue
media_container = ElementTree.fromstring(xml_response)
except IOError as error:
logger.warning('PLEX: Error while trying to contact Plex Media Server: {0}'.format
(str(error)))
hosts_failed.add(cur_host)
continue
except Exception as error:
if 'invalid token' in str(error):
logger.warning('PLEX: Please set TOKEN in Plex settings: ')
else:
logger.warning('PLEX: Error while trying to contact Plex Media Server: {0}'.format
(str(error)))
hosts_failed.add(cur_host)
continue
sections = media_container.findall('.//Directory')
if not sections:
logger.debug('PLEX: Plex Media Server not running on: {0}'.format
(cur_host))
hosts_failed.add(cur_host)
continue
for section in sections:
if 'show' == section.attrib['type']:
keyed_host = [(str(section.attrib['key']), cur_host)]
hosts_all.update(keyed_host)
if not file_location:
continue
for section_location in section.findall('.//Location'):
section_path = re.sub(r'[/\\]+', '/', section_location.attrib['path'].lower())
section_path = re.sub(r'^(.{,2})[/\\]', '', section_path)
location_path = re.sub(r'[/\\]+', '/', file_location.lower())
location_path = re.sub(r'^(.{,2})[/\\]', '', location_path)
if section_path in location_path:
hosts_match.update(keyed_host)
if force:
return (', '.join(set(hosts_failed)), None)[not len(hosts_failed)]
if hosts_match:
logger.debug('PLEX: Updating hosts where TV section paths match the downloaded show: ' + ', '.join(set(hosts_match)))
else:
logger.debug('PLEX: Updating all hosts with TV sections: ' + ', '.join(set(hosts_all)))
hosts_try = (hosts_match.copy(), hosts_all.copy())[not len(hosts_match)]
for section_key, cur_host in hosts_try.items():
url = 'http{0}://{1}/library/sections/{2}/refresh'.format(('', 's')[settings.PLEX_SERVER_HTTPS], cur_host, section_key)
try:
getURL(url, headers=self.headers, session=self.session, returns='text', verify=False, allow_proxy=False)
except Exception as error:
logger.warning('PLEX: Error updating library section for Plex Media Server: {0}'.format
(str(error)))
hosts_failed.add(cur_host)
return (', '.join(set(hosts_failed)), None)[not len(hosts_failed)]
def get_token(self, username=None, password=None, plex_server_token=None):
username = username or settings.PLEX_SERVER_USERNAME
password = password or settings.PLEX_SERVER_PASSWORD
plex_server_token = plex_server_token or settings.PLEX_SERVER_TOKEN
if plex_server_token:
self.headers['X-Plex-Token'] = plex_server_token
if 'X-Plex-Token' in self.headers:
return True
if not (username and password):
return True
logger.debug('PLEX: fetching plex.tv credentials for user: ' + username)
params = {
'user[login]': username,
'user[password]': password
}
try:
response = getURL('https://plex.tv/users/sign_in.json',
post_data=params,
headers=self.headers,
session=self.session,
returns='json',
allow_proxy=False)
self.headers['X-Plex-Token'] = response['user']['authentication_token']
except Exception as error:
self.headers.pop('X-Plex-Token', '')
logger.debug('PLEX: Error fetching credentials from from plex.tv for user {0}: {1}'.format
(username, error))
return 'X-Plex-Token' in self.headers
|
Vagab0nd/SiCKRAGE
|
sickchill/oldbeard/notifiers/plex.py
|
Python
|
gpl-3.0
| 9,773
|
def execute(module, name):
#Execute the line crawl method
module.crawl()
|
runicer156/Oppomus
|
controllers/line.py
|
Python
|
gpl-3.0
| 82
|
__title__ = 'waechter'
__version__ = '0.2.4'
|
glaslos/waechter
|
waechter/__init__.py
|
Python
|
gpl-3.0
| 45
|
#!/usr/bin/env python
#
# Copyright (C) 2017 - Massachusetts Institute of Technology (MIT)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Download the 2016 HITRAN Line List Database
"""
import os
import sys
DIR = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, os.path.join(DIR, '../..'))
import SEAS_Aux.data_downloader.web_downloader as wd
from SEAS_Utils.common_utils.DIRs import HITRAN_Lines
if __name__ == "__main__":
ID = wd.get_HITRAN_ID()
numin = 0
numax = 50000
for i in ID:
# need to check if file already exist and whether or not to overwrite.
# not urgent not important though
wd.HITRAN_Line_List_downloader(HITRAN_Lines,i,numin,numax,True,False)
|
azariven/BioSig_SEAS
|
bin_stable/data_download/download_HITRAN_Line_List.py
|
Python
|
gpl-3.0
| 1,346
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# File : mail/MailBuilder.py
# Description : pyTweetBot mail builder tool.
# Auteur : Nils Schaetti <n.schaetti@gmail.com>
# Date : 01.05.2017 17:59:05
# Lieu : Nyon, Suisse
#
# This file is part of the pyTweetBot.
# The pyTweetBot is a set of free software:
# you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyTweetBot is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with pyTweetBar. If not, see <http://www.gnu.org/licenses/>.
#
# Mail builder tool
class MailBuilder(object):
"""
Mail builder tool
"""
# Constructor
def __init__(self, message_model):
"""
Constructor
"""
self._parameters = dict()
self._message = message_model
# end __init__
# Get message
def message(self):
"""
Get message
:return: Message as HTML code
"""
message = self._message
# Replace
for key in self._parameters.keys():
message = message.replace(u"@@_" + key + u"_@@", unicode(self._parameters[key]))
# end for
return message
# end message
# Set parameter
def __setitem__(self, key, value):
"""
Set parameter
:param key:
:param value:
:return:
"""
self._parameters[key] = value
# end __setattr__
# end MailBuilder
|
nschaetti/pyTweetBot
|
pyTweetBot/mail/MailBuilder.py
|
Python
|
gpl-3.0
| 1,802
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0014_auto_20150425_1828'),
]
operations = [
migrations.AlterModelOptions(
name='device',
options={'ordering': ['spark', 'config_type'], 'get_latest_by': '-last_update', 'verbose_name': 'Device', 'verbose_name_plural': 'Devices'},
),
migrations.RenameField(
model_name='device',
old_name='type',
new_name='config_type',
),
migrations.AlterField(
model_name='brewpispark',
name='device_mode',
field=models.IntegerField(default=0, verbose_name=b'Device Mode', choices=[(0, b'MANUAL'), (1, b'CALIBRATION'), (2, b'LOGGING'), (3, b'AUTOMATIC')]),
),
migrations.AlterField(
model_name='configuration',
name='type',
field=models.IntegerField(verbose_name=b'Configuration Type', choices=[(0, b'None'), (1, b'Brew'), (2, b'Fermentation'), (3, b'Calibration')]),
),
]
|
thomast74/oinkbrew_webapp
|
api/migrations/0015_auto_20150426_2042.py
|
Python
|
gpl-3.0
| 1,152
|
# -*- coding: utf-8 -*-
"""
@file person.py
@author Jakob Erdmann
@date 2015-02-06
@version $Id: _person.py 22929 2017-02-13 14:38:39Z behrisch $
Python implementation of the TraCI interface.
SUMO, Simulation of Urban MObility; see http://sumo.dlr.de/
Copyright (C) 2011-2017 DLR (http://www.dlr.de/) and contributors
This file is part of SUMO.
SUMO is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
"""
from __future__ import absolute_import
import struct
from .domain import Domain
from .storage import Storage
from . import constants as tc
_RETURN_VALUE_FUNC = {tc.ID_LIST: Storage.readStringList,
tc.ID_COUNT: Storage.readInt,
tc.VAR_SPEED: Storage.readDouble,
tc.VAR_POSITION: lambda result: result.read("!dd"),
tc.VAR_ANGLE: Storage.readDouble,
tc.VAR_ROAD_ID: Storage.readString,
tc.VAR_TYPE: Storage.readString,
tc.VAR_ROUTE_ID: Storage.readString,
tc.VAR_COLOR: lambda result: result.read("!BBBB"),
tc.VAR_LANEPOSITION: Storage.readDouble,
tc.VAR_LENGTH: Storage.readDouble,
tc.VAR_WAITING_TIME: Storage.readDouble,
tc.VAR_WIDTH: Storage.readDouble,
tc.VAR_MINGAP: Storage.readDouble,
tc.VAR_NEXT_EDGE: Storage.readString,
tc.VAR_STAGE: Storage.readInt,
tc.VAR_STAGES_REMAINING: Storage.readInt,
tc.VAR_VEHICLE: Storage.readString,
tc.VAR_EDGES: Storage.readStringList,
}
class PersonDomain(Domain):
DEPART_NOW = -3
def __init__(self):
Domain.__init__(self, "person", tc.CMD_GET_PERSON_VARIABLE, tc.CMD_SET_PERSON_VARIABLE,
tc.CMD_SUBSCRIBE_PERSON_VARIABLE, tc.RESPONSE_SUBSCRIBE_PERSON_VARIABLE,
tc.CMD_SUBSCRIBE_PERSON_CONTEXT, tc.RESPONSE_SUBSCRIBE_PERSON_CONTEXT,
_RETURN_VALUE_FUNC)
def getSpeed(self, personID):
"""getSpeed(string) -> double
Returns the speed in m/s of the named person within the last step.
"""
return self._getUniversal(tc.VAR_SPEED, personID)
def getPosition(self, personID):
"""getPosition(string) -> (double, double)
Returns the position of the named person within the last step [m,m].
"""
return self._getUniversal(tc.VAR_POSITION, personID)
def getPosition3D(self, personID):
"""getPosition(string) -> (double, double, double)
Returns the position of the named person within the last step [m,m,m].
"""
return self._getUniversal(tc.VAR_POSITION3D, personID)
def getAngle(self, personID):
"""getAngle(string) -> double
Returns the angle in degrees of the named person within the last step.
"""
return self._getUniversal(tc.VAR_ANGLE, personID)
def getRoadID(self, personID):
"""getRoadID(string) -> string
Returns the id of the edge the named person was at within the last step.
"""
return self._getUniversal(tc.VAR_ROAD_ID, personID)
def getTypeID(self, personID):
"""getTypeID(string) -> string
Returns the id of the type of the named person.
"""
return self._getUniversal(tc.VAR_TYPE, personID)
def getLanePosition(self, personID):
"""getLanePosition(string) -> double
The position of the person along the lane measured in m.
"""
return self._getUniversal(tc.VAR_LANEPOSITION, personID)
def getColor(self, personID):
"""getColor(string) -> (integer, integer, integer, integer)
Returns the person's rgba color.
"""
return self._getUniversal(tc.VAR_COLOR, personID)
def getLength(self, personID):
"""getLength(string) -> double
Returns the length in m of the given person.
"""
return self._getUniversal(tc.VAR_LENGTH, personID)
def getWaitingTime(self, personID):
"""getWaitingTime() -> double
The waiting time of a person is defined as the time (in seconds) spent with a
speed below 0.1m/s since the last time it was faster than 0.1m/s.
(basically, the waiting time of a person is reset to 0 every time it moves).
"""
return self._getUniversal(tc.VAR_WAITING_TIME, personID)
def getWidth(self, personID):
"""getWidth(string) -> double
Returns the width in m of this person.
"""
return self._getUniversal(tc.VAR_WIDTH, personID)
def getMinGap(self, personID):
"""getMinGap(string) -> double
Returns the offset (gap to front person if halting) of this person.
"""
return self._getUniversal(tc.VAR_MINGAP, personID)
def getNextEdge(self, personID):
"""getNextEdge() -> string
If the person is walking, returns the next edge on the persons route
(including crossing and walkingareas). If there is no further edge or the
person is in another stage, returns the empty string.
"""
return self._getUniversal(tc.VAR_NEXT_EDGE, personID)
def getEdges(self, personID, nextStageIndex=0):
"""getEdges(string, int) -> list(string)
Returns a list of all edges in the nth next stage.
For waiting stages this is a single edge
For walking stages this is the complete route
For driving stages this is [origin, destination]
nextStageIndex 0 retrieves value for the current stage.
nextStageIndex must be lower then value of getRemainingStages(personID)
"""
self._connection._beginMessage(
tc.CMD_GET_PERSON_VARIABLE, tc.VAR_EDGES, personID, 1 + 4)
self._connection._string += struct.pack("!Bi",
tc.TYPE_INTEGER, nextStageIndex)
return self._connection._checkResult(tc.CMD_GET_PERSON_VARIABLE,
tc.VAR_EDGES, personID).readStringList()
def getStage(self, personID, nextStageIndex=0):
"""getStage(string, int) -> int
Returns the type of the nth next stage
0 for not-yet-departed
1 for waiting
2 for walking
3 for driving
nextStageIndex 0 retrieves value for the current stage.
nextStageIndex must be lower then value of getRemainingStages(personID)
"""
self._connection._beginMessage(
tc.CMD_GET_PERSON_VARIABLE, tc.VAR_STAGE, personID, 1 + 4)
self._connection._string += struct.pack("!Bi",
tc.TYPE_INTEGER, nextStageIndex)
return self._connection._checkResult(tc.CMD_GET_PERSON_VARIABLE,
tc.VAR_STAGE, personID).readInt()
def getRemainingStages(self, personID):
"""getStage(string) -> int
Returns the number of remaining stages (at least 1)
"""
return self._getUniversal(tc.VAR_STAGES_REMAINING, personID)
def getVehicle(self, personID):
"""getVehicle(string) -> string
Returns the id of the current vehicle if the person is in stage driving
and has entered a vehicle.
Return the empty string otherwise
"""
return self._getUniversal(tc.VAR_VEHICLE, personID)
def removeStages(self, personID):
"""remove(string)
Removes all stages of the person. If no new phases are appended,
the person will be removed from the simulation in the next simulationStep().
"""
# remove all stages after the current and then abort the current stage
while self.getRemainingStages(personID) > 1:
self.removeStage(personID, 1)
self.removeStage(personID, 0)
def add(self, personID, edgeID, pos, depart=DEPART_NOW, typeID="DEFAULT_PEDTYPE"):
"""add(string, string, double, int, string)
Inserts a new person to the simulation at the given edge, position and
time (in s). This function should be followed by appending Stages or the person
will immediatly vanish on departure.
"""
if depart > 0:
depart *= 1000
self._connection._beginMessage(tc.CMD_SET_PERSON_VARIABLE, tc.ADD, personID,
1 + 4 + 1 + 4 + len(typeID) + 1 + 4 + len(edgeID) + 1 + 4 + 1 + 8)
self._connection._string += struct.pack("!Bi", tc.TYPE_COMPOUND, 4)
self._connection._packString(typeID)
self._connection._packString(edgeID)
self._connection._string += struct.pack("!Bi", tc.TYPE_INTEGER, depart)
self._connection._string += struct.pack("!Bd", tc.TYPE_DOUBLE, pos)
self._connection._sendExact()
def appendWaitingStage(self, personID, duration, description="waiting", stopID=""):
"""appendWaitingStage(string, int, string, string)
Appends a waiting stage with duration in s to the plan of the given person
"""
duration *= 1000
self._connection._beginMessage(tc.CMD_SET_PERSON_VARIABLE, tc.APPEND_STAGE, personID,
1 + 4 # compound
+ 1 + 4 # stage type
+ 1 + 4 # duration
+ 1 + 4 + len(description)
+ 1 + 4 + len(stopID))
self._connection._string += struct.pack("!Bi", tc.TYPE_COMPOUND, 4)
self._connection._string += struct.pack(
"!Bi", tc.TYPE_INTEGER, tc.STAGE_WAITING)
self._connection._string += struct.pack("!Bi",
tc.TYPE_INTEGER, duration)
self._connection._packString(description)
self._connection._packString(stopID)
self._connection._sendExact()
def appendWalkingStage(self, personID, edges, arrivalPos, duration=-1, speed=-1, stopID=""):
"""appendWalkingStage(string, stringList, double, int, double, string)
Appends a walking stage to the plan of the given person
The walking speed can either be specified, computed from the duration parameter (in s) or taken from the type of the person
"""
if duration is not None:
duration *= 1000
if isinstance(edges, str):
edges = [edgeList]
self._connection._beginMessage(tc.CMD_SET_PERSON_VARIABLE, tc.APPEND_STAGE, personID,
1 + 4 # compound
+ 1 + 4 # stageType
+ 1 + 4 + \
sum(map(len, edges)) + 4 * len(edges)
+ 1 + 8 # arrivalPos
+ 1 + 4 # duration
+ 1 + 8 # speed
+ 1 + 4 + len(stopID)
)
self._connection._string += struct.pack("!Bi", tc.TYPE_COMPOUND, 6)
self._connection._string += struct.pack(
"!Bi", tc.TYPE_INTEGER, tc.STAGE_WALKING)
self._connection._packStringList(edges)
self._connection._string += struct.pack("!Bd",
tc.TYPE_DOUBLE, arrivalPos)
self._connection._string += struct.pack("!Bi",
tc.TYPE_INTEGER, duration)
self._connection._string += struct.pack("!Bd", tc.TYPE_DOUBLE, speed)
self._connection._packString(stopID)
self._connection._sendExact()
def appendDrivingStage(self, personID, toEdge, lines, stopID=""):
"""appendDrivingStage(string, string, string, string)
Appends a driving stage to the plan of the given person
The lines parameter should be a space-separated list of line ids
"""
self._connection._beginMessage(tc.CMD_SET_PERSON_VARIABLE, tc.APPEND_STAGE, personID,
1 + 4 # compound
+ 1 + 4 # stage type
+ 1 + 4 + len(toEdge)
+ 1 + 4 + len(lines)
+ 1 + 4 + len(stopID))
self._connection._string += struct.pack("!Bi", tc.TYPE_COMPOUND, 4)
self._connection._string += struct.pack(
"!Bi", tc.TYPE_INTEGER, tc.STAGE_DRIVING)
self._connection._packString(toEdge)
self._connection._packString(lines)
self._connection._packString(stopID)
self._connection._sendExact()
def removeStage(self, personID, nextStageIndex):
"""removeStage(string, int)
Removes the nth next stage
nextStageIndex must be lower then value of getRemainingStages(personID)
nextStageIndex 0 immediately aborts the current stage and proceeds to the next stage
"""
self._connection._beginMessage(
tc.CMD_SET_PERSON_VARIABLE, tc.REMOVE_STAGE, personID, 1 + 4)
self._connection._string += struct.pack("!Bi",
tc.TYPE_INTEGER, nextStageIndex)
self._connection._sendExact()
def setSpeed(self, personID, speed):
"""setSpeed(string, double) -> None
Sets the maximum speed in m/s for the named person for subsequent step.
"""
self._connection._sendDoubleCmd(
tc.CMD_SET_PERSON_VARIABLE, tc.VAR_SPEED, personID, speed)
def setType(self, personID, typeID):
"""setType(string, string) -> None
Sets the id of the type for the named person.
"""
self._connection._sendStringCmd(
tc.CMD_SET_PERSON_VARIABLE, tc.VAR_TYPE, personID, typeID)
def setWidth(self, personID, width):
"""setWidth(string, double) -> None
Sets the width in m for this person.
"""
self._connection._sendDoubleCmd(
tc.CMD_SET_PERSON_VARIABLE, tc.VAR_WIDTH, personID, width)
def setHeight(self, personID, height):
"""setHeight(string, double) -> None
Sets the height in m for this person.
"""
self._connection._sendDoubleCmd(
tc.CMD_SET_PERSON_VARIABLE, tc.VAR_HEIGHT, personID, height)
def setLength(self, personID, length):
"""setLength(string, double) -> None
Sets the length in m for the given person.
"""
self._connection._sendDoubleCmd(
tc.CMD_SET_PERSON_VARIABLE, tc.VAR_LENGTH, personID, length)
def setMinGap(self, personID, minGap):
"""setMinGap(string, double) -> None
Sets the offset (gap to front person if halting) for this vehicle.
"""
self._connection._sendDoubleCmd(
tc.CMD_SET_PERSON_VARIABLE, tc.VAR_MINGAP, personID, minGap)
def setColor(self, personID, color):
"""setColor(string, (integer, integer, integer, integer))
sets color for person with the given ID.
i.e. (255,0,0,0) for the color red.
The fourth integer (alpha) is only used when drawing persons with raster images
"""
self._connection._beginMessage(
tc.CMD_SET_PERSON_VARIABLE, tc.VAR_COLOR, personID, 1 + 1 + 1 + 1 + 1)
self._connection._string += struct.pack("!BBBBB", tc.TYPE_COLOR, int(
color[0]), int(color[1]), int(color[2]), int(color[3]))
self._connection._sendExact()
PersonDomain()
|
702nADOS/sumo
|
tools/traci/_person.py
|
Python
|
gpl-3.0
| 16,025
|
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 18 09:35:10 2016
@author: kristiny
"""
import process_data
from matplotlib import pyplot as plt
file_name = 'test.csv'
analyzer = process_data.PowerConsumptionAnalyzer(file_name)
target_fractions = [0.7, 0.8, 0.9]
for target_fraction in target_fractions:
total_hours = 0
total_months = 0
hours_list = []
x_axis = []
for month in analyzer.months():
total_months +=1
first = month[:1]
print first.keys()[0][0].strftime("%b %Y")
x_axis.append(first.keys()[0][0])
sorted_month = month.sort_values(ascending=False)
target_value = sorted_month[0]*target_fraction
antall=0
for hour in sorted_month:
if hour < target_value: break
antall = antall+1
total_hours +=1
hours_list.append(antall)
print " Hours: ", antall
print " Max value: ", sorted_month[0]
print " Target value: ", target_value
print "Total hours: ", total_hours
print "Average hours per month: ", total_hours/float(total_months)
label_percent = (1 - target_fraction)*100
plt.plot(x_axis, hours_list, label='{:.0f}%'.format(label_percent))
#plt.suptitle(u'Antall timer per måned som må reduseres for å oppnå 10%, 20% og 30% reduksjon i effektforbruk')
#plt.xlabel('Dato')
plt.ylabel('Hours')
plt.legend()
plt.show()
|
bonus85/csv-analytics
|
days_script.py
|
Python
|
gpl-3.0
| 1,460
|
#!/usr/bin/env python
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
# Copyright (C) 2013 Doro Wu <doro.wu@canonical.com>
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
from command import CommandBase
from mount import Command as Mount
from umount import Command as Umount
from ssh import Command as Ssh
import sys
from shutil import copy
import os
import logging
from random import randint
from os.path import isfile
from os import access
def exit_if_ne_0(ret):
if ret == 0:
return
logging.error('return {} != 0'.format(ret))
sys.exit(ret)
class Command(CommandBase):
@property
def script(self):
return self.argv[1]
@property
def outputs(self):
if hasattr(self, '_outputs'):
return self._outputs
self._outputs = []
skip = False
for argv in enumerate(self.argv):
if skip:
skip = False
continue
if argv[1] == '-o':
skip = True
self._outputs.append(self.argv[argv[0] + 1])
return self._outputs
def copy_file_back(self, arg):
filename = os.path.basename(arg)
if not os.path.exists('./mnt/' + arg):
logging.error('File not found: ' + arg)
return
logging.info('copy result back as ' + filename)
copy('./mnt/' + arg, filename)
def execlocal(self):
argv = self.argv
is_mount = Mount().is_mount()
if not is_mount:
logging.info('mount')
exit_if_ne_0(Mount().run(argv))
try:
script_target = 'execlocal-{0:06d}'.format(randint(0, 999999))
logging.debug('copy {} to target {}'.format(self.script,
script_target))
copy(self.script, './mnt/tmp/' + script_target,)
logging.info('run script {} ...'.format(self.script))
cmd = ['ssh', '/tmp/' + script_target]
output_index = -1
for arg in enumerate(self.argv[2:]):
if arg[1] == '-f':
output_index = arg[0] + 3
break
cmd.append(arg[1])
Ssh().run(cmd)
if output_index > 0:
map(self.copy_file_back, argv[output_index:])
cmd = ['ssh', 'rm', '-f', '/tmp/' + script_target]
return Ssh().run(cmd)
finally:
if not is_mount:
logging.info('umount')
Umount().run(argv)
return -1
def run(self, argv):
self.argv = argv
if len(argv) == 1 or argv[-1] == 'help':
self.help()
return
if not isfile(self.script) or not access(self.script, os.X_OK):
logging.critical('{} not found or not executable'.
format(self.script))
return self.execlocal()
def help(self):
print('Usage: fish-init {} <localscript arg1 arg2 ...> '
'[-f f1 f2 ...]'.format(self.argv[0]))
print(' Execute localscript on target and copy f1/f2... back')
sys.exit(0)
|
fcwu/lp-fish-init
|
lp_fish_init/execlocal.py
|
Python
|
gpl-3.0
| 3,721
|
# -*- coding: utf-8 -*-
import random
import telnetlib
import json
import scrapy
import sys
from scrapy import log
from settings import FAILED_CODES
from fake_useragent import UserAgent
from scrapy.downloadermiddlewares.useragent import UserAgentMiddleware
class RotateUserAgentMiddleware(UserAgentMiddleware):
def process_request(self,request,spider):
user_agent = UserAgent()
ua = user_agent.random
if ua:
log.msg('Current UserAgent: '+ua, level=log.INFO)
request.headers.setdefault('User-Agent', ua)
class saveFailedMiddleware():
def process_response(self, request, response, spider):
if response.status in FAILED_CODES:
with open('failed_request', 'a') as f:
f.write(response.url + '\n')
return response
'''
class ProxyMiddleware(object):
#读取代理url
#文件路径可能要根据自己机子的环境改一下
# proxyList = ["218.92.220.58:8080", "43.226.162.23:80", "27.148.151.27:80", "124.88.67.7:843"]
dbTools = sqlTools()
proxyList = dbTools.getIpPool()
#测试ip是否可用
#return 'xxx.xxx.xxx.xxx:port'
def getPropertyIp(self,l):
def testIp(ip,port):
print 'testing '+ip+':'+port+'...'
try:
telnetlib.Telnet(ip,port=port,timeout=20)
except:
print ip+':'+port+' can not be used!!!'
return False
else:
print ip+':'+port+' success!!!'
return True
ipSucc = False
while not ipSucc:
pro_adr = str.split(random.choice(l),':')
ipSucc = testIp(pro_adr[0],pro_adr[1])
return ':'.join(pro_adr)
def process_request(self, request, spider):
pro_adr = self.getPropertyIp(self.proxyList)
print "*******-----------*Current Proxy IP:%s*-----------***********" %pro_adr
#request.meta['proxy'] = "http://{}:{}@{}:{}".format(user,pass,'127.0.0.1','8118')
request.meta['proxy'] = "http://"+ pro_adr
'''
|
Rafael-Cheng/Douban_Crawler
|
douban_book/book_crawler/douban_crawler/middlewares.py
|
Python
|
gpl-3.0
| 2,076
|
#!/usr/bin/env python3
# encoding: utf-8
# Copyright (C) 2014-2021 Space Science and Engineering Center (SSEC),
# University of Wisconsin-Madison.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This file is part of the polar2grid software package. Polar2grid takes
# satellite observation data, remaps it, and writes it to a file format for
# input into another program.
# Documentation: http://www.ssec.wisc.edu/software/polar2grid/
"""Tests for polar2grid."""
|
davidh-ssec/polar2grid
|
polar2grid/tests/__init__.py
|
Python
|
gpl-3.0
| 1,096
|
import json
import logging
import os
import pytest
import tempfile
from unittest import mock
API_KEY = 'fnord'
os.environ['SLIPSTREAM_API_KEY'] = API_KEY
from slipstream import slipstream
log = logging.getLogger('slipstream')
log.addHandler(logging.StreamHandler())
log.setLevel(logging.DEBUG)
SIMPLE_PAYLOAD = dict(
id=42,
name='This is a cool title',
content='This is a blog post\nAnd it is *soo* cool',
content_html='<html><h2>This is some markdown</h2></html>',
user={'id': 42, 'email': 'test@example.com'},
created_at='2010-08-14T09:23:12-05:00',
updated_at='2010-08-14T09:23:12-05:00',
)
# TODO: Module? Or not? -W. Werner, 2015-11-21
@pytest.fixture(scope="module")
def client():
return slipstream.app.test_client()
def test_client_should_use_api_key():
assert slipstream.app.config['API_KEY'] == API_KEY
def test_post_should_fail_if_api_key_is_wrong(client):
rv = client.post('/{}-invalid'.format(API_KEY))
assert rv.status_code == 403
def test_post_should_pass_payload_to_core_publish(client):
expected_id = 'Some document id thing'
expected_title = 'The Ballad of Wol Emulov'
expected_content = '## This is some markdown'
expected_author = 'roscivs@indessed.com'
payload = dict(
id=expected_id,
name=expected_title,
content=expected_content,
content_html='<html><h2>This is some markdown</h2></html>',
user={'id': 42, 'email': expected_author},
created_at='2010-08-14T09:23:12-05:00',
updated_at='2010-08-14T09:23:12-05:00',
)
with mock.patch('slipstream.core.publish') as fake_publish:
rv = client.post('/{}'.format(API_KEY), data={
'payload': json.dumps(payload),
})
assert rv.status_code == 200
fake_publish.assert_called_with(
id=expected_id,
title=expected_title,
content=expected_content,
author=expected_author,
)
def test_publish_should_put_post_in_content_directory(client):
with tempfile.TemporaryDirectory() as content_dir, \
tempfile.TemporaryDirectory() as output_dir, \
mock.patch.dict(slipstream.app.config,
{'OUTPUT_DIR': output_dir,
'CONTENT_DIR': content_dir,
'DEFAULT_AUTHOR': 'test@example.com',
}), \
mock.patch.dict(slipstream.core.config, slipstream.app.config):
rv = client.post('/{}'.format(API_KEY), data={
'payload': json.dumps(SIMPLE_PAYLOAD),
})
assert slipstream.core.slugify(SIMPLE_PAYLOAD['name'])+'.md' in os.listdir(content_dir)
def test_publish_should_put_post_in_content_directory(client):
with tempfile.TemporaryDirectory() as content_dir, \
tempfile.TemporaryDirectory() as output_dir, \
mock.patch.dict(slipstream.app.config,
{'OUTPUT_DIR': output_dir,
'CONTENT_DIR': content_dir,
'DEFAULT_AUTHOR': 'test@example.com',
}), \
mock.patch.dict(slipstream.core.config, slipstream.app.config):
rv = client.post('/{}'.format(API_KEY), data={
'payload': json.dumps(SIMPLE_PAYLOAD),
})
assert slipstream.core.slugify(SIMPLE_PAYLOAD['name'])+'.md' in os.listdir(content_dir)
|
waynew/slipstream
|
tests/test_slipstream.py
|
Python
|
gpl-3.0
| 3,485
|
# Example for: alignment.malign()
# This will read all sequences from a file, align them, and write
# the alignment to a new file:
from modeller import *
env = environ()
aln = alignment(env, file='toxin.ali', align_codes='all')
aln.malign(gap_penalties_1d=(-600, -400))
aln.write(file='toxin-seq.pap', alignment_format='PAP')
|
bjornwallner/proq2-server
|
apps/modeller9v8/examples/commands/malign.py
|
Python
|
gpl-3.0
| 330
|
import datetime
from brewer import Brewer
from receipe import Receipe
from globalthings import *
from step import Step
print "Test begging"
mon_step1=Step("monStep",TRANSITION,10)
mon_step2=Step("monStep2",LEVEL,60,67)
mon_step3=Step("monStep3",TRANSITION,10)
mon_step4=Step("monStep4",LEVEL,1,90)
mon_step1.print_self()
mon_step2.print_self()
mon_step3.print_self()
mon_step4.print_self()
mon_step1.interpolation(0, datetime.datetime.now(), 50, datetime.datetime.now()+ datetime.timedelta(minutes=5, seconds=0))
ma_receipe=Receipe("Ma premiere recette")
ma_receipe.add_step(mon_step1)
ma_receipe.add_step(mon_step2)
ma_receipe.add_step(mon_step3)
ma_receipe.add_step(mon_step4)
ma_receipe.print_self()
ma_receipe.start(20)
ma_receipe.print_self()
ma_receipe.get_current_temperature_instruction()
ma_receipe.update_step()
ma_receipe.print_self()
ma_receipe.user_force_next_step()
ma_receipe.print_self()
ma_receipe.get_current_temperature_instruction()
ma_receipe.user_force_next_step()
ma_receipe.print_self()
ma_receipe.get_current_temperature_instruction()
ma_receipe.user_force_next_step()
ma_receipe.print_self()
ma_receipe.get_current_temperature_instruction()
ma_receipe.user_force_next_step()
ma_receipe.print_self()
ma_receipe.get_current_temperature_instruction()
loaded_receipe=Receipe("ah ah ah")
loaded_receipe.config_from_file("./receipes/Bonnambr2_2016_03.xml")
loaded_receipe.print_self()
sys.stdin.read()
|
astrxlegaulois/RaspBrewerPy
|
test.py
|
Python
|
gpl-3.0
| 1,436
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing M2M table for field geometries on 'Datasource'
db.delete_table('dpnetcdf_datasource_geometries')
def backwards(self, orm):
# Adding M2M table for field geometries on 'Datasource'
db.create_table('dpnetcdf_datasource_geometries', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('datasource', models.ForeignKey(orm['dpnetcdf.datasource'], null=False)),
('geometry', models.ForeignKey(orm['dpnetcdf.geometry'], null=False))
))
db.create_unique('dpnetcdf_datasource_geometries', ['datasource_id', 'geometry_id'])
models = {
'dpnetcdf.datasource': {
'Meta': {'object_name': 'Datasource'},
'dataset': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dpnetcdf.OpendapDataset']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'imported': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'variable': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dpnetcdf.Variable']", 'null': 'True'})
},
'dpnetcdf.geometry': {
'Meta': {'object_name': 'Geometry'},
'geometry': ('django.contrib.gis.db.models.fields.GeometryField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'dpnetcdf.maplayer': {
'Meta': {'object_name': 'MapLayer'},
'datasources': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dpnetcdf.Datasource']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parameter': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'sql_query': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'styles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dpnetcdf.Style']", 'symmetrical': 'False', 'blank': 'True'})
},
'dpnetcdf.opendapcatalog': {
'Meta': {'object_name': 'OpendapCatalog'},
'base_url': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'catalog_url': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'http_url': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'opendap_url': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'service_prefix': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'dpnetcdf.opendapdataset': {
'Meta': {'object_name': 'OpendapDataset'},
'calculation_facility': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'catalog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dpnetcdf.OpendapSubcatalog']"}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'program': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'scenario': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'strategy': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'time_zero': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'variables': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dpnetcdf.Variable']", 'symmetrical': 'False'}),
'year': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'})
},
'dpnetcdf.opendapsubcatalog': {
'Meta': {'object_name': 'OpendapSubcatalog'},
'catalog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dpnetcdf.OpendapCatalog']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'dpnetcdf.shapefile': {
'Meta': {'object_name': 'ShapeFile'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'shape_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'})
},
'dpnetcdf.style': {
'Meta': {'object_name': 'Style'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'xml': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'dpnetcdf.value': {
'Meta': {'ordering': "(u'geometry',)", 'object_name': 'Value'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'datasource': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dpnetcdf.Datasource']"}),
'geometry': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dpnetcdf.Geometry']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
'dpnetcdf.variable': {
'Meta': {'object_name': 'Variable'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['dpnetcdf']
|
nens/dpnetcdf
|
dpnetcdf/migrations/0035_auto.py
|
Python
|
gpl-3.0
| 6,488
|
print("Divisao entre dois números")
try:
num1 = int(input("Digite o primeiro número: "))
num2 = int(input("Digite o segundo número: "))
print("Divisao = ",num1/num2)
except:
print("Não podemos dividir um número por Zero")
print("Continuei...")
|
fkenjikamei/python-exercises
|
aula12-3.py
|
Python
|
gpl-3.0
| 255
|
from distutils.core import setup
setup(
name='home_backup',
version='1.0',
packages=['home_backup'],
url='http://zufallsheld.de/2013/09/29/python-backup-script-with-rsync/',
license='GPL 3.0',
author='Sebastian Gumprich',
author_email='sebastian.gumprich@38.de',
description='simple python backup script',
long_description=open('README.md').read()
)
|
dylan-reeves/home_backup
|
setup.py
|
Python
|
gpl-3.0
| 387
|
#
# Autogenerated by Thrift Compiler (0.9.0-dev)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py:slots, dynamic
#
from thrift.Thrift import TType, TMessageType, TException
from thrift.protocol.TBase import TBase, TExceptionBase
class DownloadStatus(TBase):
Finished = 0
Offline = 1
Online = 2
Queued = 3
Skipped = 4
Waiting = 5
TempOffline = 6
Starting = 7
Failed = 8
Aborted = 9
Decrypting = 10
Custom = 11
Downloading = 12
Processing = 13
Unknown = 14
_VALUES_TO_NAMES = {
0: "Finished",
1: "Offline",
2: "Online",
3: "Queued",
4: "Skipped",
5: "Waiting",
6: "TempOffline",
7: "Starting",
8: "Failed",
9: "Aborted",
10: "Decrypting",
11: "Custom",
12: "Downloading",
13: "Processing",
14: "Unknown",
}
_NAMES_TO_VALUES = {
"Finished": 0,
"Offline": 1,
"Online": 2,
"Queued": 3,
"Skipped": 4,
"Waiting": 5,
"TempOffline": 6,
"Starting": 7,
"Failed": 8,
"Aborted": 9,
"Decrypting": 10,
"Custom": 11,
"Downloading": 12,
"Processing": 13,
"Unknown": 14,
}
class Destination(TBase):
Collector = 0
Queue = 1
_VALUES_TO_NAMES = {
0: "Collector",
1: "Queue",
}
_NAMES_TO_VALUES = {
"Collector": 0,
"Queue": 1,
}
class ElementType(TBase):
Package = 0
File = 1
_VALUES_TO_NAMES = {
0: "Package",
1: "File",
}
_NAMES_TO_VALUES = {
"Package": 0,
"File": 1,
}
class Input(TBase):
NONE = 0
TEXT = 1
TEXTBOX = 2
PASSWORD = 3
BOOL = 4
CLICK = 5
CHOICE = 6
MULTIPLE = 7
LIST = 8
TABLE = 9
_VALUES_TO_NAMES = {
0: "NONE",
1: "TEXT",
2: "TEXTBOX",
3: "PASSWORD",
4: "BOOL",
5: "CLICK",
6: "CHOICE",
7: "MULTIPLE",
8: "LIST",
9: "TABLE",
}
_NAMES_TO_VALUES = {
"NONE": 0,
"TEXT": 1,
"TEXTBOX": 2,
"PASSWORD": 3,
"BOOL": 4,
"CLICK": 5,
"CHOICE": 6,
"MULTIPLE": 7,
"LIST": 8,
"TABLE": 9,
}
class Output(TBase):
CAPTCHA = 1
QUESTION = 2
NOTIFICATION = 4
_VALUES_TO_NAMES = {
1: "CAPTCHA",
2: "QUESTION",
4: "NOTIFICATION",
}
_NAMES_TO_VALUES = {
"CAPTCHA": 1,
"QUESTION": 2,
"NOTIFICATION": 4,
}
class DownloadInfo(TBase):
"""
Attributes:
- fid
- name
- speed
- eta
- format_eta
- bleft
- size
- format_size
- percent
- status
- statusmsg
- format_wait
- wait_until
- packageID
- packageName
- plugin
"""
__slots__ = [
'fid',
'name',
'speed',
'eta',
'format_eta',
'bleft',
'size',
'format_size',
'percent',
'status',
'statusmsg',
'format_wait',
'wait_until',
'packageID',
'packageName',
'plugin',
]
thrift_spec = (
None, #: 0
(1, TType.I32, 'fid', None, None,), #: 1
(2, TType.STRING, 'name', None, None,), #: 2
(3, TType.I64, 'speed', None, None,), #: 3
(4, TType.I32, 'eta', None, None,), #: 4
(5, TType.STRING, 'format_eta', None, None,), #: 5
(6, TType.I64, 'bleft', None, None,), #: 6
(7, TType.I64, 'size', None, None,), #: 7
(8, TType.STRING, 'format_size', None, None,), #: 8
(9, TType.BYTE, 'percent', None, None,), #: 9
(10, TType.I32, 'status', None, None,), #: 10
(11, TType.STRING, 'statusmsg', None, None,), #: 11
(12, TType.STRING, 'format_wait', None, None,), #: 12
(13, TType.I64, 'wait_until', None, None,), #: 13
(14, TType.I32, 'packageID', None, None,), #: 14
(15, TType.STRING, 'packageName', None, None,), #: 15
(16, TType.STRING, 'plugin', None, None,), #: 16
)
def __init__(self, fid=None, name=None, speed=None, eta=None, format_eta=None, bleft=None, size=None, format_size=None, percent=None, status=None, statusmsg=None, format_wait=None, wait_until=None, packageID=None, packageName=None, plugin=None,):
self.fid = fid
self.name = name
self.speed = speed
self.eta = eta
self.format_eta = format_eta
self.bleft = bleft
self.size = size
self.format_size = format_size
self.percent = percent
self.status = status
self.statusmsg = statusmsg
self.format_wait = format_wait
self.wait_until = wait_until
self.packageID = packageID
self.packageName = packageName
self.plugin = plugin
class ServerStatus(TBase):
"""
Attributes:
- pause
- active
- queue
- total
- speed
- download
- reconnect
"""
__slots__ = [
'pause',
'active',
'queue',
'total',
'speed',
'download',
'reconnect',
]
thrift_spec = (
None, #: 0
(1, TType.BOOL, 'pause', None, None,), #: 1
(2, TType.I16, 'active', None, None,), #: 2
(3, TType.I16, 'queue', None, None,), #: 3
(4, TType.I16, 'total', None, None,), #: 4
(5, TType.I64, 'speed', None, None,), #: 5
(6, TType.BOOL, 'download', None, None,), #: 6
(7, TType.BOOL, 'reconnect', None, None,), #: 7
)
def __init__(self, pause=None, active=None, queue=None, total=None, speed=None, download=None, reconnect=None,):
self.pause = pause
self.active = active
self.queue = queue
self.total = total
self.speed = speed
self.download = download
self.reconnect = reconnect
class ConfigItem(TBase):
"""
Attributes:
- name
- description
- value
- type
"""
__slots__ = [
'name',
'description',
'value',
'type',
]
thrift_spec = (
None, #: 0
(1, TType.STRING, 'name', None, None,), #: 1
(2, TType.STRING, 'description', None, None,), #: 2
(3, TType.STRING, 'value', None, None,), #: 3
(4, TType.STRING, 'type', None, None,), #: 4
)
def __init__(self, name=None, description=None, value=None, type=None,):
self.name = name
self.description = description
self.value = value
self.type = type
class ConfigSection(TBase):
"""
Attributes:
- name
- description
- items
- outline
"""
__slots__ = [
'name',
'description',
'items',
'outline',
]
thrift_spec = (
None, #: 0
(1, TType.STRING, 'name', None, None,), #: 1
(2, TType.STRING, 'description', None, None,), #: 2
(3, TType.LIST, 'items', (TType.STRUCT, (ConfigItem, ConfigItem.thrift_spec)), None,), #: 3
(4, TType.STRING, 'outline', None, None,), #: 4
)
def __init__(self, name=None, description=None, items=None, outline=None,):
self.name = name
self.description = description
self.items = items
self.outline = outline
class FileData(TBase):
"""
Attributes:
- fid
- url
- name
- plugin
- size
- format_size
- status
- statusmsg
- packageID
- error
- order
"""
__slots__ = [
'fid',
'url',
'name',
'plugin',
'size',
'format_size',
'status',
'statusmsg',
'packageID',
'error',
'order',
]
thrift_spec = (
None, #: 0
(1, TType.I32, 'fid', None, None,), #: 1
(2, TType.STRING, 'url', None, None,), #: 2
(3, TType.STRING, 'name', None, None,), #: 3
(4, TType.STRING, 'plugin', None, None,), #: 4
(5, TType.I64, 'size', None, None,), #: 5
(6, TType.STRING, 'format_size', None, None,), #: 6
(7, TType.I32, 'status', None, None,), #: 7
(8, TType.STRING, 'statusmsg', None, None,), #: 8
(9, TType.I32, 'packageID', None, None,), #: 9
(10, TType.STRING, 'error', None, None,), #: 10
(11, TType.I16, 'order', None, None,), #: 11
)
def __init__(self, fid=None, url=None, name=None, plugin=None, size=None, format_size=None, status=None, statusmsg=None, packageID=None, error=None, order=None,):
self.fid = fid
self.url = url
self.name = name
self.plugin = plugin
self.size = size
self.format_size = format_size
self.status = status
self.statusmsg = statusmsg
self.packageID = packageID
self.error = error
self.order = order
class PackageData(TBase):
"""
Attributes:
- pid
- name
- folder
- site
- password
- dest
- order
- linksdone
- sizedone
- sizetotal
- linkstotal
- links
- fids
"""
__slots__ = [
'pid',
'name',
'folder',
'site',
'password',
'dest',
'order',
'linksdone',
'sizedone',
'sizetotal',
'linkstotal',
'links',
'fids',
]
thrift_spec = (
None, #: 0
(1, TType.I32, 'pid', None, None,), #: 1
(2, TType.STRING, 'name', None, None,), #: 2
(3, TType.STRING, 'folder', None, None,), #: 3
(4, TType.STRING, 'site', None, None,), #: 4
(5, TType.STRING, 'password', None, None,), #: 5
(6, TType.I32, 'dest', None, None,), #: 6
(7, TType.I16, 'order', None, None,), #: 7
(8, TType.I16, 'linksdone', None, None,), #: 8
(9, TType.I64, 'sizedone', None, None,), #: 9
(10, TType.I64, 'sizetotal', None, None,), #: 10
(11, TType.I16, 'linkstotal', None, None,), #: 11
(12, TType.LIST, 'links', (TType.STRUCT, (FileData, FileData.thrift_spec)), None,), #: 12
(13, TType.LIST, 'fids', (TType.I32, None), None,), #: 13
)
def __init__(self, pid=None, name=None, folder=None, site=None, password=None, dest=None, order=None, linksdone=None, sizedone=None, sizetotal=None, linkstotal=None, links=None, fids=None,):
self.pid = pid
self.name = name
self.folder = folder
self.site = site
self.password = password
self.dest = dest
self.order = order
self.linksdone = linksdone
self.sizedone = sizedone
self.sizetotal = sizetotal
self.linkstotal = linkstotal
self.links = links
self.fids = fids
class InteractionTask(TBase):
"""
Attributes:
- iid
- input
- structure
- preset
- output
- data
- title
- description
- plugin
"""
__slots__ = [
'iid',
'input',
'structure',
'preset',
'output',
'data',
'title',
'description',
'plugin',
]
thrift_spec = (
None, #: 0
(1, TType.I32, 'iid', None, None,), #: 1
(2, TType.I32, 'input', None, None,), #: 2
(3, TType.LIST, 'structure', (TType.STRING, None), None,), #: 3
(4, TType.LIST, 'preset', (TType.STRING, None), None,), #: 4
(5, TType.I32, 'output', None, None,), #: 5
(6, TType.LIST, 'data', (TType.STRING, None), None,), #: 6
(7, TType.STRING, 'title', None, None,), #: 7
(8, TType.STRING, 'description', None, None,), #: 8
(9, TType.STRING, 'plugin', None, None,), #: 9
)
def __init__(self, iid=None, input=None, structure=None, preset=None, output=None, data=None, title=None, description=None, plugin=None,):
self.iid = iid
self.input = input
self.structure = structure
self.preset = preset
self.output = output
self.data = data
self.title = title
self.description = description
self.plugin = plugin
class CaptchaTask(TBase):
"""
Attributes:
- tid
- data
- type
- resultType
"""
__slots__ = [
'tid',
'data',
'type',
'resultType',
]
thrift_spec = (
None, #: 0
(1, TType.I16, 'tid', None, None,), #: 1
(2, TType.STRING, 'data', None, None,), #: 2
(3, TType.STRING, 'type', None, None,), #: 3
(4, TType.STRING, 'resultType', None, None,), #: 4
)
def __init__(self, tid=None, data=None, type=None, resultType=None,):
self.tid = tid
self.data = data
self.type = type
self.resultType = resultType
class EventInfo(TBase):
"""
Attributes:
- eventname
- id
- type
- destination
"""
__slots__ = [
'eventname',
'id',
'type',
'destination',
]
thrift_spec = (
None, #: 0
(1, TType.STRING, 'eventname', None, None,), #: 1
(2, TType.I32, 'id', None, None,), #: 2
(3, TType.I32, 'type', None, None,), #: 3
(4, TType.I32, 'destination', None, None,), #: 4
)
def __init__(self, eventname=None, id=None, type=None, destination=None,):
self.eventname = eventname
self.id = id
self.type = type
self.destination = destination
class UserData(TBase):
"""
Attributes:
- name
- email
- role
- permission
- templateName
"""
__slots__ = [
'name',
'email',
'role',
'permission',
'templateName',
]
thrift_spec = (
None, #: 0
(1, TType.STRING, 'name', None, None,), #: 1
(2, TType.STRING, 'email', None, None,), #: 2
(3, TType.I32, 'role', None, None,), #: 3
(4, TType.I32, 'permission', None, None,), #: 4
(5, TType.STRING, 'templateName', None, None,), #: 5
)
def __init__(self, name=None, email=None, role=None, permission=None, templateName=None,):
self.name = name
self.email = email
self.role = role
self.permission = permission
self.templateName = templateName
class AccountInfo(TBase):
"""
Attributes:
- validuntil
- login
- options
- valid
- trafficleft
- maxtraffic
- premium
- type
"""
__slots__ = [
'validuntil',
'login',
'options',
'valid',
'trafficleft',
'maxtraffic',
'premium',
'type',
]
thrift_spec = (
None, #: 0
(1, TType.I64, 'validuntil', None, None,), #: 1
(2, TType.STRING, 'login', None, None,), #: 2
(3, TType.MAP, 'options', (TType.STRING, None, TType.LIST, (TType.STRING, None)), None,), #: 3
(4, TType.BOOL, 'valid', None, None,), #: 4
(5, TType.I64, 'trafficleft', None, None,), #: 5
(6, TType.I64, 'maxtraffic', None, None,), #: 6
(7, TType.BOOL, 'premium', None, None,), #: 7
(8, TType.STRING, 'type', None, None,), #: 8
)
def __init__(self, validuntil=None, login=None, options=None, valid=None, trafficleft=None, maxtraffic=None, premium=None, type=None,):
self.validuntil = validuntil
self.login = login
self.options = options
self.valid = valid
self.trafficleft = trafficleft
self.maxtraffic = maxtraffic
self.premium = premium
self.type = type
class ServiceCall(TBase):
"""
Attributes:
- plugin
- func
- arguments
- parseArguments
"""
__slots__ = [
'plugin',
'func',
'arguments',
'parseArguments',
]
thrift_spec = (
None, #: 0
(1, TType.STRING, 'plugin', None, None,), #: 1
(2, TType.STRING, 'func', None, None,), #: 2
(3, TType.LIST, 'arguments', (TType.STRING, None), None,), #: 3
(4, TType.BOOL, 'parseArguments', None, None,), #: 4
)
def __init__(self, plugin=None, func=None, arguments=None, parseArguments=None,):
self.plugin = plugin
self.func = func
self.arguments = arguments
self.parseArguments = parseArguments
class OnlineStatus(TBase):
"""
Attributes:
- name
- plugin
- packagename
- status
- size
"""
__slots__ = [
'name',
'plugin',
'packagename',
'status',
'size',
]
thrift_spec = (
None, #: 0
(1, TType.STRING, 'name', None, None,), #: 1
(2, TType.STRING, 'plugin', None, None,), #: 2
(3, TType.STRING, 'packagename', None, None,), #: 3
(4, TType.I32, 'status', None, None,), #: 4
(5, TType.I64, 'size', None, None,), #: 5
)
def __init__(self, name=None, plugin=None, packagename=None, status=None, size=None,):
self.name = name
self.plugin = plugin
self.packagename = packagename
self.status = status
self.size = size
class OnlineCheck(TBase):
"""
Attributes:
- rid
- data
"""
__slots__ = [
'rid',
'data',
]
thrift_spec = (
None, #: 0
(1, TType.I32, 'rid', None, None,), #: 1
(2, TType.MAP, 'data', (TType.STRING, None, TType.STRUCT, (OnlineStatus, OnlineStatus.thrift_spec)), None,), #: 2
)
def __init__(self, rid=None, data=None,):
self.rid = rid
self.data = data
class PackageDoesNotExists(TExceptionBase):
"""
Attributes:
- pid
"""
__slots__ = [
'pid',
]
thrift_spec = (
None, #: 0
(1, TType.I32, 'pid', None, None,), #: 1
)
def __init__(self, pid=None,):
self.pid = pid
def __str__(self):
return repr(self)
class FileDoesNotExists(TExceptionBase):
"""
Attributes:
- fid
"""
__slots__ = [
'fid',
]
thrift_spec = (
None, #: 0
(1, TType.I32, 'fid', None, None,), #: 1
)
def __init__(self, fid=None,):
self.fid = fid
def __str__(self):
return repr(self)
class ServiceDoesNotExists(TExceptionBase):
"""
Attributes:
- plugin
- func
"""
__slots__ = [
'plugin',
'func',
]
thrift_spec = (
None, #: 0
(1, TType.STRING, 'plugin', None, None,), #: 1
(2, TType.STRING, 'func', None, None,), #: 2
)
def __init__(self, plugin=None, func=None,):
self.plugin = plugin
self.func = func
def __str__(self):
return repr(self)
class ServiceException(TExceptionBase):
"""
Attributes:
- msg
"""
__slots__ = [
'msg',
]
thrift_spec = (
None, #: 0
(1, TType.STRING, 'msg', None, None,), #: 1
)
def __init__(self, msg=None,):
self.msg = msg
def __str__(self):
return repr(self)
|
ardi69/pyload-0.4.10
|
pyload/remote/thriftbackend/thriftgen/pyload/ttypes.py
|
Python
|
gpl-3.0
| 17,135
|
from vilay.core.DescriptionScheme import DescriptionScheme
from vilay.core.Film import Film
class VDData:
def __init__(self, film=None):
self.dsRoot = DescriptionScheme("Feature Film")
self.setFilm(film)
def setFilm(self, film):
if film is None:
self.film = None
elif type(film) == str:
self.film = Film(film)
elif isinstance(film, Film):
self.film = film
else:
raise("unknown type of input")
|
dakot/vilay-detect
|
vilay/core/VDData.py
|
Python
|
gpl-3.0
| 517
|
from __future__ import absolute_import, print_function, unicode_literals
import itertools
from streamparse.spout import Spout
class WordSpout(Spout):
def initialize(self, stormconf, context):
self.sentences = [
"She advised him to take a long holiday, so "
"he immediately quit work and took a trip around the world",
"I was very glad to get a present from her",
"He will be here in half an hour",
"She saw him eating a sandwich",
]
# self.words = itertools.cycle(['dog', 'cat',
# 'zebra', 'elephant'])
self.words = itertools.cycle(self.sentences)
def next_tuple(self):
word = next(self.words)
self.emit([word])
def ack(self, tup_id):
print('+++++++++++++++++++++++++++++++++++')
print('+++++++++++++++++++++++++++++++++++')
print('Words ack:+++++++++++++++++++++++++++++')
print('+++++++++++++++++++++++++++++++++++')
print('+++++++++++++++++++++++++++++++++++')
pass # if a tuple is processed properly, do nothing
def fail(self, tup_id):
print('+++++++++++++++++++++++++++++++++++')
print('+++++++++++++++++++++++++++++++++++')
print('Words fail:+++++++++++++++++++++++++++++')
print('+++++++++++++++++++++++++++++++++++')
print('+++++++++++++++++++++++++++++++++++')
pass
|
unlessbamboo/grocery-shop
|
language/python/src/storm/wordcount/src/spouts/words.py
|
Python
|
gpl-3.0
| 1,441
|
import matplotlib.pyplot as plt
import numpy as np
import math
from scripts import *
if True:
RESULT_SRC = './results_UAV123/{0}/' # '{0} : OPE, SRE, TRE'
else:
RESULT_SRC = './results_UAV_10fps/{0}/' # '{0} : OPE, SRE, TRE'
def main():
evalTypes = ['OPE']
testname = 'UAV123'
for i in range(len(evalTypes)):
evalType = evalTypes[i]
result_src = RESULT_SRC.format(evalType)
trackers = os.listdir(result_src)
scoreList = []
for t in trackers:
if t == 'KCF_multi_cnn_dnn_scale_best_valid_CNN':
# we don't want this yet
continue
score = butil.load_scores(evalType, t, testname, RESULT_SRC)
scoreList.append(score)
plot_graph_success(scoreList, i*2, evalType, testname)
plot_graph_precision(scoreList, i*2+1, evalType, testname)
plt.waitforbuttonpress(0)
def plot_graph_success(scoreList, fignum, evalType, testname):
plt.figure(num=fignum, figsize=(9, 6), dpi=70)
rankList = sorted(scoreList, key=lambda o: sum(o[0].successRateList), reverse=True)
for i in range(len(rankList)):
result = rankList[i]
tracker = result[0].tracker
attr = result[0]
if len(attr.successRateList) == len(thresholdSetOverlap):
if i < MAXIMUM_LINES:
ls = '-'
if i % 2 == 1:
ls = '--'
ave = sum(attr.successRateList) /100. / float(len(attr.successRateList))
if type(tracker) == dict:
if tracker['name'] == 'DSST':
plt.plot(thresholdSetOverlap, attr.successRateList,
c=LINE_COLORS[i], label='{0} [{1:.3f}]'.format('DSST_BMVC_2014_tPAMI17', ave), lw=2.0, ls=ls)
elif tracker['name'] == 'MEEM':
plt.plot(thresholdSetOverlap, attr.successRateList,
c=LINE_COLORS[i], label='{0} [{1:.3f}]'.format('MEEM_ECCV14', ave), lw=2.0,
ls=ls)
elif tracker['name'] == 'MUSTer':
plt.plot(thresholdSetOverlap, attr.successRateList,
c=LINE_COLORS[i], label='{0} [{1:.3f}]'.format('MUSTer_CVPR15', ave), lw=2.0, ls=ls)
elif tracker['name'][:3] =='HDT':
tracker['name'] = "HDT_cvpr16"
plt.plot(thresholdSetOverlap, attr.successRateList,
c = LINE_COLORS[i], label='{0} [{1:.3f}]'.format(tracker['name'].upper(), ave), lw=2.0, ls = ls)
elif tracker['name'] == 'KCFraw_colour':
plt.plot(thresholdSetOverlap, attr.successRateList,
c=LINE_COLORS[i], label='{0} [{1:.3f}]'.format('KCF_ECCV12_tPAMI15', ave), lw=2.0,
ls=ls)
# Wudi's modification:
else:
plt.plot(thresholdSetOverlap, attr.successRateList,
c = LINE_COLORS[i], label='{0} [{1:.3f}]'.format(tracker['name'], ave), lw=2.0, ls = ls)
else:
plt.plot(thresholdSetOverlap, attr.successRateList,
c = LINE_COLORS[i], label='{0} [{1:.3f}]'.format(tracker, ave), lw=2.0, ls=ls)
else:
plt.plot(thresholdSetOverlap, attr.successRateList,
label='', alpha=0.5, c='#202020', ls='--')
else:
print('err')
plt.title('Success plots of {0}_{1} (sequence average)'.format(evalType, testname.upper()))
plt.rcParams.update({'axes.titlesize': 'medium'})
plt.xlabel('thresholds')
plt.xticks(np.arange(thresholdSetOverlap[0], thresholdSetOverlap[len(thresholdSetOverlap)-1]+0.1, 0.1))
plt.grid(color='#101010', alpha=0.5, ls=':')
plt.legend(fontsize='medium')
# plt.savefig(BENCHMARK_SRC + 'graph/{0}_sq.png'.format(evalType), dpi=74, bbox_inches='tight')
return plt
def plot_graph_precision(scoreList, fignum, evalType, testname):
plt.figure(num=fignum, figsize=(9, 6), dpi=70)
# some don't have precison list--> we will delete them?
for t in scoreList:
if len(t[0].precisionRateList)<20:
print(t[0].tracker)
t[0].precisionRateList= np.zeros(51)
t[0].precisionRateList[20] = 0
rankList = sorted(scoreList, key=lambda o: o[0].precisionRateList[20], reverse=True)
for i in range(len(rankList)):
result = rankList[i]
tracker = result[0].tracker
attr = result[0]
if len(attr.precisionRateList) == len(thresholdSetError):
if i < MAXIMUM_LINES:
ls = '-'
if i % 2 == 1:
ls = '--'
#ave = sum(attr.precisionRateList) / float(len(attr.precisionRateList))
ave = attr.precisionRateList[20]
if type(tracker) == dict:
# Wudi's modification:
if tracker['name']=='DSST':
plt.plot(thresholdSetError, attr.precisionRateList, c=LINE_COLORS[i],
label='{0} [{1:.3f}]'.format('DSST_BMVC_2014_tPAMI17', ave), lw=2.0, ls=ls)
elif tracker['name'] == 'MEEM':
plt.plot(thresholdSetError, attr.precisionRateList, c=LINE_COLORS[i],
label='{0} [{1:.3f}]'.format('MEEM_ECCV14', ave), lw=2.0, ls=ls)
elif tracker['name'] == 'MUSTer':
plt.plot(thresholdSetError, attr.precisionRateList, c=LINE_COLORS[i],
label='{0} [{1:.3f}]'.format('MUSTer_CVPR15', ave), lw=2.0, ls=ls)
elif tracker['name'][:3] == 'HDT':
tracker['name'] = "HDT_cvpr16"
plt.plot(thresholdSetError, attr.precisionRateList, c=LINE_COLORS[i],
label='{0} [{1:.3f}]'.format(tracker['name'].upper(), ave), lw=2.0, ls=ls)
elif tracker['name'] == 'KCFraw_colour':
plt.plot(thresholdSetError, attr.precisionRateList, c=LINE_COLORS[i],
label='{0} [{1:.3f}]'.format('KCF_ECCV12_tPAMI15', ave), lw=2.0, ls=ls)
else:
plt.plot(thresholdSetError, attr.precisionRateList,c = LINE_COLORS[i],
label='{0} [{1:.3f}]'.format(tracker['name'], ave), lw=2.0, ls = ls)
elif tracker == "HDT_cvpr2016" or tracker =='KCFvgg_rnn' or tracker[:3] =='KCF':
plt.plot(thresholdSetError, attr.precisionRateList,
c = LINE_COLORS[i], label='{0} [{1:.2f}]'.format(tracker, ave), lw=2.0, ls=ls)
# else:
# plt.plot(thresholdSetOverlap, attr.precisionRateList,
# label='', alpha=0.5, c='#202020', ls='--')
else:
print 'err'
plt.title('Precision plots of {0}_{1} (sequence average)'.format(evalType, testname.upper()))
plt.rcParams.update({'axes.titlesize': 'medium'})
plt.xlabel('thresholds')
plt.xticks(np.arange(thresholdSetError[0], thresholdSetError[len(thresholdSetError)-1], 10))
plt.grid(color='#101010', alpha=0.5, ls=':')
plt.legend(fontsize='medium')
# plt.savefig(BENCHMARK_SRC + 'graph/{0}_sq.png'.format(evalType), dpi=74, bbox_inches='tight')
plt.show()
return plt
if __name__ == '__main__':
main()
|
stevenwudi/Kernelized_Correlation_Filter
|
draw_graph_UAV.py
|
Python
|
gpl-3.0
| 7,588
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import utils.youtube_field
class Migration(migrations.Migration):
dependencies = [
('ideas', '0007_remove_vote_vote'),
]
operations = [
migrations.RemoveField(
model_name='category',
name='description_cz',
),
migrations.RemoveField(
model_name='category',
name='description_it',
),
migrations.RemoveField(
model_name='category',
name='name_cz',
),
migrations.RemoveField(
model_name='category',
name='name_it',
),
migrations.AddField(
model_name='idea',
name='video_url',
field=utils.youtube_field.YoutubeUrlField(null=True, verbose_name='Youtube video', blank=True),
preserve_default=True,
),
]
|
CivilHub/CivilHub
|
ideas/migrations/0008_auto_20150626_1143.py
|
Python
|
gpl-3.0
| 954
|
__author__ = 'Salvakiya'
import configparser
import glob
import os
import traitfile
def printcol(col, string):
print( {"r": "\033[01;31m{0}\033[00m",
"y": "\033[1;33m{0}\033[00m",
"g": "\033[1;36m{0}\033[00m"}[col].format(string))
blue_print_instances = {}
actor_instances = {}
#retrieve which mod we are currently using
config = configparser.RawConfigParser()
if os.path.isfile("settings.ini"):
config.read("settings.ini")
GAME_MOD = config.get("Options", "mod")
else:
printcol("r", "Cannot find settings.ini!")
#load a list of files of given type from assets.txt
def load_file_list(file_type):
path = "mods/"+GAME_MOD+"/"
file_object = open(path+"assets.txt", 'r')
directory_list = []
for directory in file_object.readlines():
append_path = path+directory.rstrip('\n')
if not directory.startswith("#") and os.path.isdir(append_path):
directory_list.append(append_path)
file_list = []
for directory in directory_list:
printcol("g", directory+"/*"+file_type)
files = glob.glob(directory+"/*"+file_type)
file_list.extend(files)
return file_list
def load_assets():
actor_file_list = load_file_list(".ini")
templates = {}
for file in actor_file_list:
config = configparser.RawConfigParser()
config.read(file)
if config.has_option("General", "Name"):
name = config.get("General", "Name")
printcol("g","Loading: "+name)
trait_list = []
for trait in config.sections():
if hasattr(traitfile, trait):
templates[name] = trait_list.append(trait)
printcol("g","Trait "+trait+" loaded!")
else:
printcol("y","Trait "+trait+" does not exist?")
templates[name] = BluePrintClass(templates)
printcol("g", name+": Complete!")
else:
printcol("r", "Error, Missing General Trait For "+file)
return templates
class AssetLoader():
def __init__(self):
template_list = load_assets()
printcol("g", template_list)
class BluePrintClass:
def __init__(self, trait_list):
return
AssetLoader()
|
Salvakiya/PyRTS
|
src/Program.py
|
Python
|
gpl-3.0
| 2,343
|
import urllib.request
class HTTPRedirectHandler(urllib.request.HTTPRedirectHandler):
def http_error_302(self, req, fp, code, msg, headers):
print("302 Redirection")
print(headers['Location'])
#return urllib.request.HTTPRedirectHandler.http_error_302(self, req, fp, code, msg, headers)
return headers['Location']
#http_error_301 = http_error_303 = http_error_307 = http_error_302
|
zeroidle/GomBot
|
HTTPRedirectHandler.py
|
Python
|
gpl-3.0
| 422
|
from flask import Flask, g
import sqlite3
app = Flask(__name__)
db_location = 'var/test.db'
def get_db():
db = getattr(g, 'db', None)
if db is None:
db = sqlite3.connect(db_location)
g.db = db
return db
@app.teardown_appcontext
def close_db_connection(exception):
db = getattr(g, 'db', None)
if db is not None:
db.close()
def init_db():
with app.app_context():
db = get_db()
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
@app.route("/")
def root():
db = get_db()
db.cursor().execute('insert into albums values ("American Beauty", "Grateful Dead", "CD")')
db.commit()
page = []
page.append('<html><ul>')
sql = "SELECT rowid, * FROM albums ORDER BY artist"
for row in db.cursor().execute(sql):
page.append('<li>')
page.append(str(row))
page.append('</li>')
page.append('</ul><html>')
return ''.join(page)
if __name__ == "__main__":
app.run(host="0.0.0.0", debug=True)
|
siwells/teaching_set09103
|
code/topic_10/datastore.py
|
Python
|
gpl-3.0
| 1,079
|
from PyQt4 import Qt as qt
from . import qt_util
from ..abstract.asyncmsg import MessageBar, ButtonKind
import collections
import platform
def _get_keyboard_hint():
if platform.system() == 'Darwin':
return '\N{PLACE OF INTEREST SIGN}B'
else:
return 'Ctrl+B'
class MessageBarView(qt.QWidget):
def __init__(self, parent=None):
super().__init__(parent)
self.setAttribute(qt.Qt.WA_MacSmallSize)
self._shortcut = qt.QShortcut(qt.QKeySequence('Ctrl+B'), self)
self._shortcut.activated.connect(self._on_shortcut)
self._q = collections.deque()
self._layout = qt.QHBoxLayout(self)
self._msg = None
self._title = qt.QLabel(self)
self._text_box = None
self._close_button = qt.QToolButton(self)
self._close_button.setIcon(self.style().standardIcon(qt.QStyle.SP_TitleBarCloseButton))
self._close_button.setStyleSheet('border: none')
self._close_button.setFocusPolicy(qt.Qt.NoFocus)
self._close_button.clicked.connect(self._on_close)
self._layout.addWidget(self._close_button)
self._layout.addWidget(self._title)
self._layout.addStretch()
self._edit_invalid_stylesheet = '''
background-color: #FAA;
'''
lbl = qt.QLabel('({})'.format(_get_keyboard_hint()), self)
lbl.setStyleSheet('font-size: 10pt; font-style: italic;')
self._layout.addWidget(lbl)
self._widgets = []
self._default_widget = None
self.hide()
self.setProperty('message_bar_panel', True)
stylesheet = '''
QWidget[message_bar_panel=true] {
border-bottom: 1px solid palette(dark);
}
'''
self.setStyleSheet(stylesheet)
def paintEvent(self, event):
# provide support for stylesheets
opt = qt.QStyleOption()
opt.init(self)
painter = qt.QPainter(self)
with qt_util.ending(painter):
self.style().drawPrimitive(qt.QStyle.PE_Widget, opt, painter, self)
def _add_text_box(self):
if self._text_box is None:
self._text_box = qt.QLineEdit(self)
self._text_box.textEdited.connect(self._on_text_change)
self._text_box.setFocusPolicy(qt.Qt.NoFocus)
self._text_box.editingFinished.connect(self._on_editing_finished)
self._layout.insertWidget(3, self._text_box)
def _remove_text_box(self):
if self._text_box is not None:
self._text_box.deleteLater()
self._text_box = None
def keyPressEvent(self, event):
if event.key() == qt.Qt.Key_Escape:
event.accept()
self._close_button.click()
else:
super().keyPressEvent(event)
def _on_is_valid_changed(self):
is_valid = self._msg.is_valid
if self._text_box is not None:
if is_valid:
self._text_box.setStyleSheet('')
else:
self._text_box.setStyleSheet(self._edit_invalid_stylesheet)
for w in self._widgets:
w.setEnabled(is_valid)
def _on_text_change(self):
if self._msg is not None:
self._msg.emit_text_changed(self._text_box.text())
def _on_shortcut(self):
self._activate()
def _activate(self):
if self._text_box is not None:
self._text_box.setFocus()
elif self._widgets:
self._widgets[0].setFocus()
def _get_result(self, choice):
if self._msg.text_box is not None:
return choice, self._text_box.text()
else:
return choice
def _on_close(self):
m = self._msg
result = self._get_result(None)
self._show_next()
m.done(result)
def _on_click(self):
b = self.sender()
m = self._msg
result = self._get_result(b.text())
self._show_next()
m.done(result)
def _on_editing_finished(self):
if self._default_widget is not None:
self._default_widget.click()
def _show_next(self):
pw = self.parentWidget()
self._default_widget = None
if pw is not None:
pw.setFocus()
if not self._q:
self._msg = None
for w in self._widgets:
w.deleteLater()
self._title.setText('')
self._widgets.clear()
self._remove_text_box()
self.hide()
if pw is not None:
pw.setFocus()
return
if self._msg is not None:
self._msg.is_valid_changed.disconnect(self._on_is_valid_changed)
msg = self._q.popleft()
assert isinstance(msg, MessageBar)
msg.is_valid_changed.connect(self._on_is_valid_changed)
self._msg = msg
for widget in self._widgets:
widget.deleteLater()
self._widgets.clear()
self._title.setText(msg.title)
if msg.text_box is not None:
self._add_text_box()
self._text_box.setText(msg.text_box)
else:
self._remove_text_box()
for choice in msg.choices:
b = qt.QPushButton(self)
if choice.kind == ButtonKind.ok:
b.setDefault(True)
self._default_widget = b
b.setText(choice.name)
self._layout.addWidget(b)
self._widgets.append(b)
b.clicked.connect(self._on_click)
qt_util.set_tab_order(self, [self._text_box] + self._widgets)
self._on_is_valid_changed()
self.show()
if msg.steal_focus:
self._activate()
def enqueue(self, msg):
assert isinstance(msg, MessageBar)
self._q.append(msg)
if self._msg is None:
self._show_next()
if __name__ == '__main__':
app = qt.QApplication([])
mbv = MessageBarView()
mbv.show()
mbv.raise_()
def validator(sender, text):
sender.is_valid = len(text) > 5
def callback(r):
print(r)
app.quit()
mbv.enqueue(MessageBar(title='Baz',
choices=['Done'],
text_box='',
is_valid=False)
.add_callback(lambda r: print(r))
.add_text_changed_callback(validator,
add_sender=True))
mbv.enqueue(MessageBar(title='Foo',
choices=['Bar', 'Baz'])
.add_callback(lambda r: print(r)))
mbv.enqueue(MessageBar(title='Bar',
choices=['Baz', 'Quux'])
.add_callback(lambda r: print(r)))
mbv.enqueue(MessageBar(title='Baz',
choices=['Done'],
text_box='')
.add_callback(callback))
app.exec()
|
sam-roth/Keypad
|
keypad/qt/asyncmsg.py
|
Python
|
gpl-3.0
| 6,931
|
__author__ = 'Chris Hamm'
#NetworkClient_rBugg
#Created: 11/21/2014
#This is a restructured version of r7A
#NOTES: SOCKET IS NOT CLOSED WHEN FINISHED!!!
#SUPERCEDED BY r9!!!!
import socket
import platform
import Chunk
#===================================================================
#Client constructor/class definition
#===================================================================
#CLASS NAME WILL NOT CHANGE BETWEEN VERSIONS
class NetworkClient():
#class variables
pipe = 0
#This does not stay a number
port = 49200
clientSocket = 0
serverSaysKeepSearching = True
serverIP = "127.0.1.1"
chunk = Chunk.Chunk()
key = 0
#constructor
def __init__(self, pipeendconnectedtocontroller):
self.pipe = pipeendconnectedtocontroller
self.clientSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print "client socket created successfully"
try: #Main Client Loop
print "Entering Main Client Loop"
#getOS try block
try:
print "*************************************"
print " Network Client"
print "*************************************"
print "OS DETECTION:"
#Detecting Windows
if platform.system() == "Windows":
print platform.system()
print platform.win32_ver()
#Detecting Linux
elif platform.system() == "Linux":
print platform.system()
print platform.dist()
#Detecting OSX
elif platform.system() == "Darwin":
print platform.system()
print platform.mac_ver()
#Detecting an OS that is not listed
else:
print platform.system()
print platform.version()
print platform.release()
print "*************************************"
except Exception as inst:
print "========================================================================================"
print "ERROR: An exception was thrown in getOS try block"
#the exception instance
print type(inst)
#arguments stored in .args
print inst.args
#_str_ allows args to be printed directly
print inst
print "========================================================================================"
#Retreive the server's IP from the controller class
try:
print "Attempting to get serverIP from controller"
self.receiveServerIPFromController()
print "sucessfully received serverIP from controller"
except Exception as inst:
print "========================================================================================"
print "ERROR: An exception was thrown in serverIP try block"
#the exception instance
print type(inst)
#arguments stored in .args
print inst.args
#_str_ allows args to be printed directly
print inst
print "========================================================================================"
try:
print "Attempting to connect to server"
self.clientSocket.connect((self.serverIP, self.port))
print "Successfully connected to server"
except socket.timeout as msg:
print "========================================================================================"
print "ERROR: the connection has timed out. Check to see if you entered the correct IP Address."
print "Error code: " + str(msg[0]) + " Message: " + msg[1]
print "Socket timeout set to: " + self.clientSocket.gettimeout + " seconds"
print "========================================================================================"
except socket.error as msg:
print "========================================================================================"
print "ERROR: Failed to connect to server"
print "Error code: " + str(msg[0]) + " Message: " + msg[1]
raise Exception("Failed to connect to server")
#print "========================================================================================"
self.sendConnectedCommandToCOntroller()
#Client primary while loop
try:
#This should probably be 'while server days not done'
# since we might be just waiting on server, before we get a job (or in-between)
# Just my thoughts though --CJB
while self.serverSaysKeepSearching:
self.clientSocket.settimeout(2.0)
######################## SERVER-CLIENT Communication #############################################
#checking for server commands try block
try:
print "Checking for server commands..."
theInput = self.clientSocket.recv(2048)
if theInput == "DONE":
self.sendDoneCommandToController()
#Make this line seperate from the other print statements
print " "
print "Server has issued the DONE command."
print " "
serverSaysKeepSearching = False
break
#If the server wants to give us the next chunk, take it
#Server should be sending "NEXT" -> params -> data in seperate strings all to us
elif theInput == "NEXT":
try:
#and store it locally till controller is ready for it
self.chunk.params = self.clientSocket.recv(2048)
self.chunk.data = self.clientSocket.recv(2048)
#let controller know we're ready to give it a chunk
self.sendDoingStuffCommandToController()
#send chunk object to controller
self.pipe.send(self.chunk)
except Exception as inst:
print "============================================================================================="
print "An exception was thrown in the checking for server commands Try Block"
#the exception instance
print type(inst)
#srguments stored in .args
print inst.args
#_str_ allows args tto be printed directly
print inst
print "============================================================================================="
except socket.timeout as inst:
print "Socket timed out. No new server command"
except Exception as inst:
print "============================================================================================="
print "An exception was thrown in the checking for server commands Try Block"
#the exception instance
print type(inst)
#srguments stored in .args
print inst.args
#_str_ allows args tto be printed directly
print inst
print "============================================================================================="
########################## Client - Controller Communication #########################################
#check for controller commands
recv = self.pipe.recv()
#If controller says 'next', say 'next' to server
if recv == "next":
self.sendNextCommandToServer()
#if controller says 'found' then send 'found' and key to server
elif recv == "found":
self.key = self.pipe.recv()
self.sendFoundSolutionToServer()
except Exception as inst:
print "============================================================================================="
print "An exception was thrown in the Client Primary Loop Try Block"
print type(inst) #the exception instance
print inst.args #srguments stored in .args
print inst #_str_ allows args tto be printed directly
print "============================================================================================="
except Exception as inst:
print "============================================================================================="
print "An exception was thrown in the Main Client Loop Try Block"
print type(inst) #the exception instance
print inst.args #srguments stored in .args
print inst #_str_ allows args tto be printed directly
print "============================================================================================="
#End of constructor block
#======================================================================================
#CLIENT-SERVER COMMUNICATION FUNCTIONS
#This section contains methods the client will use to communicate with the server.
#======================================================================================
#Outbound communication functions
#NEXT
def sendNextCommandToServer(self):
#sends the NEXT command to the serve
try:
self.clientSocket.send("NEXT")
print "The NEXT command was sent to the server"
except Exception as inst:
print "============================================================================================="
print "An exception was thrown in the Client-Server Communication FUnctions Try Block"
#the exception instance
print type(inst)
#srguments stored in .args
print inst.args
#_str_ allows args tto be printed directly
print inst
print "============================================================================================="
#FOUNDSOLUTION
def sendFoundSolutionToServer(self):
#sends the FOUNDSOLUTION command to the server, and key
try:
self.clientSocket.send("FOUNDSOLUTION")
self.clientSocket.send(self.key)
print "The FOUNDSOLUTION command was sent to the server"
except Exception as inst:
print "============================================================================================="
print "An exception was thrown in the Client-Server Communication FUnctions Try Block"
#the exception instance
print type(inst)
#srguments stored in .args
print inst.args
#_str_ allows args tto be printed directly
print inst
print "============================================================================================="
#CRASHED
def sendCrashedCommandToServer(self):
#sends the CRASHED command to the server
#NOTICE: THIS COMMAND IS NOT IMPLEMENTED OR DOES NOT WORK, BUT STILL SENDS EMPTY STRING TO SERVER!
try:
self.clientSocket.send("CRASHED")
print "The CRASHED command was sent to the server"
except Exception as inst:
print "============================================================================================="
print "An exception was thrown in the Client-Server Communication FUnctions Try Block"
#the exception instance
print type(inst)
#srguments stored in .args
print inst.args
#_str_ allows args tto be printed directly
print inst
print "============================================================================================="
#INVALIDCOMMAND
def sendInvalidCommandToServer(self):
#sends INVALIDCOMMAND command to server
try:
self.clientSocket.send("INVALIDCOMMAND")
print "The INVALIDCOMMAND command was sent to the server"
except Exception as inst:
print "============================================================================================="
print "An exception was thrown in the Client-Server Communication FUnctions Try Block"
#the exception instance
print type(inst)
#srguments stored in .args
print inst.args
#_str_ allows args tto be printed directly
print inst
print "============================================================================================="
#Inbound communication functions
#DONE
def checkForDoneCommand(self, inboundString):
try:
if inboundString == "DONE":
return True
else:
return False
except Exception as inst:
print "============================================================================================="
print "An exception was thrown in the Client-Server Communication FUnctions Try Block"
#the exception instance
print type(inst)
#srguments stored in .args
print inst.args
#_str_ allows args tto be printed directly
print inst
print "============================================================================================="
#next part of problem
#not sure what to check for here
#INVALIDCOMMAND
def checkForInvalidCommand(self, inboundString):
try:
if inboundString == "INVALIDCOMMAND":
return True
else:
return False
except Exception as inst:
print "============================================================================================="
print "An exception was thrown in the Client-Server Communication FUnctions Try Block"
#the exception instance
print type(inst)
#srguments stored in .args
print inst.args
#_str_ allows args tto be printed directly
print inst
print "============================================================================================="
#======================================================================================
#CLIENT-CONTROLLER COMMUNICATION FUNCTIONS
#This section contains methods the client will use to communicate with the controller class
#======================================================================================
#Outbound communication functions with controller
#done
def sendDoneCommandToController(self):
try:
self.pipe.send("done")
print "The DONE command was sent to the Controller"
except Exception as inst:
print "============================================================================================="
print "An exception was thrown in the Client-Controller Communication Functions Try Block"
#the exception instance
print type(inst)
#srguments stored in .args
print inst.args
#_str_ allows args tto be printed directly
print inst
print "============================================================================================="
#connected
def sendConnectedCommandToCOntroller(self):
try:
self.pipe.send("connected")
print "The CONNECTED command was sent to the Controller"
except Exception as inst:
print "============================================================================================="
print "An exception was thrown in the Client-Controller Communication Functions Try Block"
#the exception instance
print type(inst)
#srguments stored in .args
print inst.args
#_str_ allows args tto be printed directly
print inst
print "============================================================================================="
#doingStuff
def sendDoingStuffCommandToController(self):
try:
self.pipe.send("doingStuff")
print "The DOINGSTUFF command was sent to the Controller"
except Exception as inst:
print "============================================================================================="
print "An exception was thrown in the Client-Controller Communication Functions Try Block"
#the exception instance
print type(inst)
#srguments stored in .args
print inst.args
#_str_ allows args tto be printed directly
print inst
print "============================================================================================="
######### NEW CODE########################################
#serverIP
def receiveServerIPFromController(self):
try:
#self.pipe.send("doingStuff")
print "Attempting to get serverIP from Controller (function block)"
self.serverIP = self.pipe.recv()
print "The ServerIP was received from the Controller (function block)"
except Exception as inst:
print "============================================================================================="
print "An exception was thrown in the Client-Controller Communication Functions Try Block"
#the exception instance
print type(inst)
#srguments stored in .args
print inst.args
#_str_ allows args tto be printed directly
print inst
print "============================================================================================="
# To Be Deleted, since they don't have their try blocks anymore
'''
except Exception as inst:
print "============================================================================================="
print "An exception was thrown in the Network Client class Try Block"
print type(inst) #the exception instance
print inst.args #srguments stored in .args
print inst #_str_ allows args tto be printed directly
print "============================================================================================="
finally:
print "Closing the socket"
clientSocket.close()
'''
'''
except Exception as inst:
print "============================================================================================="
print "An exception was thrown in the Master Try Block"
print type(inst) #the exception instance
print inst.args #srguments stored in .args
print inst #_str_ allows args tto be printed directly
print "============================================================================================="
finally:
print "Program has ended"
'''
|
COCS4950G7/COSC4950
|
Source/Network/Obsolete/NetworkClient_rBugg.py
|
Python
|
gpl-3.0
| 20,107
|
# -*- coding: UTF-8 -*-
"""
Date utilities.
@author: Sébastien Renard <sebastien.renard@digitalfox.org>
@license: GPL v3 or later
"""
import operator
from datetime import date, datetime, timedelta
from yokadi.ycli import basicparseutils
from yokadi.core.yokadiexception import YokadiException
WEEKDAYS = {"monday": 0, "tuesday": 1, "wednesday": 2, "thursday": 3, "friday": 4, "saturday": 5, "sunday": 6}
SHORT_WEEKDAYS = {"mo": 0, "tu": 1, "we": 2, "th": 3, "fr": 4, "sa": 5, "su": 6}
TIME_HINT_BEGIN = "begin"
TIME_HINT_END = "end"
DATE_FORMATS = [
"%d/%m/%Y",
"%d/%m/%y",
"%d/%m",
]
TIME_FORMATS = [
"%H:%M:%S",
"%H:%M",
"%H",
]
def parseDateTimeDelta(line):
# FIXME: Do we really want to support float deltas?
try:
delta = float(line[:-1])
except ValueError:
raise YokadiException("Timeshift must be a float or an integer")
suffix = line[-1].upper()
if suffix == "W":
return timedelta(days=delta * 7)
elif suffix == "D":
return timedelta(days=delta)
elif suffix == "H":
return timedelta(hours=delta)
elif suffix == "M":
return timedelta(minutes=delta)
else:
raise YokadiException("Unable to understand time shift. See help t_set_due")
def testFormats(text, formats):
for fmt in formats:
try:
return datetime.strptime(text, fmt), fmt
except ValueError:
pass
return None, None
def guessTime(text):
afternoon = False
# We do not use the "%p" format to handle AM/PM because its behavior is
# locale-dependent
suffix = text[-2:]
if suffix == "am":
text = text[:-2].strip()
elif suffix == "pm":
afternoon = True
text = text[:-2].strip()
out, fmt = testFormats(text, TIME_FORMATS)
if out is None:
return None
if afternoon:
out += timedelta(hours=12)
return out.time()
def parseHumaneDateTime(line, hint=None, today=None):
"""Parse human date and time and return structured datetime object
Datetime can be absolute (23/10/2008 10:38) or relative (+5M, +3H, +1D, +6W)
@param line: human date / time
@param hint: optional hint to tell whether time should be set to the
beginning or the end of the day when not specified.
@param today: optional parameter to define a fake today date. Useful for
unit testing.
@type line: str
@return: datetime object"""
def guessDate(text):
out, fmt = testFormats(text, DATE_FORMATS)
if not out:
return None
if "%y" not in fmt and "%Y" not in fmt:
out = out.replace(year=today.year)
return out.date()
def applyTimeHint(date, hint):
if not hint:
return date
if hint == TIME_HINT_BEGIN:
return date.replace(hour=0, minute=0, second=0)
elif hint == TIME_HINT_END:
return date.replace(hour=23, minute=59, second=59)
else:
raise Exception("Unknown hint %s" % hint)
line = basicparseutils.simplifySpaces(line).lower()
if not line:
raise YokadiException("Date is empty")
if today is None:
today = datetime.today().replace(microsecond=0)
if line == "now":
return today
if line == "today":
return applyTimeHint(today, hint)
# Check for "+<delta>" format
if line.startswith("+"):
return today + parseDateTimeDelta(line[1:])
if line.startswith("-"):
return today - parseDateTimeDelta(line[1:])
# Check for "<weekday> [<time>]" format
firstWord = line.split()[0]
weekdayDict = {
"today": today.weekday(),
"tomorrow": (today.weekday() + 1) % 7,
}
weekdayDict.update(WEEKDAYS)
weekdayDict.update(SHORT_WEEKDAYS)
weekday = weekdayDict.get(firstWord)
if weekday is not None:
date = today + timedelta(days=(weekday - today.weekday()) % 7)
if " " in line:
timeText = line.split(' ', 1)[1]
tTime = guessTime(timeText)
if tTime is None:
raise YokadiException("Unable to understand time '%s'" % timeText)
date = datetime.combine(date, tTime)
else:
date = applyTimeHint(date, hint)
return date
if " " in line:
# Absolute date and time?
dateText, timeText = line.split(' ', 1)
tDate = guessDate(dateText)
if tDate is not None:
tTime = guessTime(timeText)
if tTime is not None:
return datetime.combine(tDate, tTime)
# Only date?
tDate = guessDate(line)
if tDate is not None:
dt = datetime.combine(tDate, today.time())
return applyTimeHint(dt, hint)
# Only time?
tTime = guessTime(line)
if tTime is not None:
tDate = datetime.combine(today.date(), tTime)
if tTime > today.time():
return tDate
else:
return tDate + timedelta(days=1)
raise YokadiException("Unable to understand date '%s'" % line)
def formatTimeDelta(delta):
"""Friendly format a time delta:
- Show only days if delta > 1 day
- Show only hours and minutes otherwise
@param timeLeft: Remaining time
@type timeLeft: timedelta (from datetime)
@return: formated str"""
prefix = ""
value = ""
if delta < timedelta(0):
delta = -delta
prefix = "-"
if delta.days >= 365:
value = "%dY" % (delta.days / 365)
days = delta.days % 365
if days > 30:
value += ", %dM" % (days / 30)
elif delta.days > 50:
value = "%dM" % (delta.days / 30)
days = delta.days % 30
if days > 0:
value += ", %dd" % days
elif delta.days >= 7:
value = "%dw" % (delta.days / 7)
days = delta.days % 7
if days > 0:
value += ", %dd" % days
elif delta.days > 0:
value = "%dd" % delta.days
else:
minutes = delta.seconds / 60
hours = minutes / 60
minutes = minutes % 60
if hours >= 1:
value = "%dh " % hours
else:
value = ""
value += "%dm" % minutes
return prefix + value
def getHourAndMinute(token):
"""Extract hour and minute from HH:MM token
#TODO: move this in date utils
@param token: HH:MM string
@return: (int, int)"""
try:
hour, minute = token.split(":")
except ValueError:
hour = token
minute = 0
try:
hour = int(hour)
minute = int(minute)
except ValueError:
raise YokadiException("You must provide integer for hour/minute")
return hour, minute
def getWeekDayNumberFromDay(day):
"""Return week day number (0-6) from week day name (short or long)
@param day: week day as a string in short or long format (in english)
@type day: str
@return: week day number (int)"""
if len(day) == 2 and day in SHORT_WEEKDAYS:
dayNumber = SHORT_WEEKDAYS[day]
elif day in WEEKDAYS:
dayNumber = WEEKDAYS[day]
else:
raise YokadiException("Day must be one of the following: [mo]nday, [tu]esday, [we]nesday, [th]ursday, [fr]iday,"
" [sa]turday, [su]nday")
return dayNumber
def parseDateLimit(line, today=None):
"""Parse a string of the form <operator><limit>
- operator is one of: < <= >= > (default to <=)
- limit is a date as understood by parseHumaneDateTime()
@param line: the string to parse
@param today: optional specification of current day, for unit testing
@return: (operator, date)"""
# Order matters: match longest operators first!
operators = [
("<=", operator.__le__, TIME_HINT_END),
(">=", operator.__ge__, TIME_HINT_BEGIN),
(">", operator.__gt__, TIME_HINT_END),
("<", operator.__lt__, TIME_HINT_BEGIN),
]
op = operator.__le__
hint = TIME_HINT_END
for txt, loopOp, loopHint in operators:
if line.startswith(txt):
op = loopOp
hint = loopHint
line = line[len(txt):]
break
limit = parseHumaneDateTime(line, today=today, hint=hint)
return op, limit
def parseMinDate(line):
# Parse the line string and return a minimum date
minDate = date.today()
if line == "today":
pass
elif line == "thisweek":
minDate -= timedelta(minDate.weekday())
elif line == "thismonth":
minDate = minDate.replace(day=1)
else:
minDate = parseHumaneDateTime(line).date()
return minDate
# vi: ts=4 sw=4 et
|
agateau/yokadi
|
yokadi/core/ydateutils.py
|
Python
|
gpl-3.0
| 8,617
|
#!/usr/bin/env python
##
## Biskit, a toolkit for the manipulation of macromolecular structures
## Copyright (C) 2004-2018 Raik Gruenberg & Johan Leckner
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You find a copy of the GNU General Public License in the file
## license.txt along with this program; if not, write to the Free
## Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
##
##
import Biskit.tools as t
import re, shutil
def syntax():
print \
"""
replace_wildcard_import -i |src_file| -m |module_name|
[-as |import_as| -e |exclude_method1 ..| ]
example:
replace_wildcard_import -i tools.py -m Numeric -as N -e Complex
will replace all calls to any Numeric method (except Complex) by
N.|method|
"""
def re_lst( module, exclude ):
"""
generate list of regex that capture any call to any method of module
"""
exec( 'methods = dir(' + module + ')' )
r = [ re.compile( '[^a-zA-Z0-9_\.]('+m+'\()' ) for m in methods
if not m in exclude ]
return r
def replace_line( l, method_patterns, prefix ):
"""
add prefix to all occurences of any method
"""
replaced = 0
for regex in method_patterns:
matches = regex.findall( l )
if matches:
l = l.replace( matches[0], prefix + matches[0] )
replaced += 1
return l, replaced
def replace_import_statement( l, module, importas ):
if importas == module:
importas = ''
else:
importas = ' as ' + importas
return l.replace( 'from '+module+' import *', 'import '+module+importas)
options = t.cmdDict( {'m':'Numeric', 'as':'N', 'e':['Complex'] } )
module = options['m']
importas = options.get('as', module )
srcfiles = t.toList( options.get('i', None) )
exclude = t.toList( options['e'] )
try:
exec('import '+module)
for fname in srcfiles:
fname = t.absfile( fname )
shutil.copy( fname, fname + '_' )
methods = re_lst( module, exclude )
fold = open( fname + '_' )
fnew = open( fname, 'w' )
i = 0
for l in fold:
i += 1
l = replace_import_statement( l, module, importas )
l, occurrences = replace_line( l, methods, importas + '.' )
if occurrences > 0:
t.errWriteln( '%s %5i %2i matches:\n\t%s' %
(t.stripFilename(fname), i, occurrences, l) )
fnew.write( l )
fnew.close()
fold.close()
except:
syntax()
|
graik/biskit
|
archive_biskit2/scripts/Biskit/replace_wildcard_import.py
|
Python
|
gpl-3.0
| 3,009
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-24 16:52
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('scan', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='poc',
name='poc_target_sample',
field=models.CharField(default='http://www.target.com', max_length=100),
),
]
|
404fovnd/vul
|
scan/migrations/0002_poc_poc_target_sample.py
|
Python
|
gpl-3.0
| 476
|
# -*- coding: utf-8 -*-
from django.contrib.auth.decorators import login_required
from manual.models import Manual
from manual.forms import ManualFilterForm
from manual.models import Manualgroup
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.contrib.auth.models import User
from django.db.models import Q
@login_required(login_url='/admin/login/')
def main(request):
if request.user.is_authenticated():
sensor = False
if request.method == 'GET':
request.session['search'] = request.GET.get('search')
search = request.session.get('search', request.session['search'])
manual_list = Manual.objects.all()
if search:
sensor = True
manual_list = manual_list.filter(Q(name__icontains=search) | Q(model__icontains=search) |
Q(description__icontains=search) | Q(group__name__icontains=search) |
Q(author__icontains=search)
)
manual_list_groups = {}
for manual in manual_list:
if not manual.group in manual_list_groups:
manual_list_groups.update({
manual.group: []
})
manual_list_groups[manual.group].append(manual)
else:
sensor = False
return ReferenceError
filter_form = ManualFilterForm()
return render_to_response('manual_list.html', {
'user': request.user,
'manual_list_groups': manual_list_groups,
'filter_form': filter_form,
'sensor': sensor,
})
else:
error = 'Ви не авторизированы'
return render_to_response('manual_list.html', {'user': request.user, 'error' : error,})
@login_required(login_url='/admin/login/')
def manual_full(request, id):
manual = Manual.objects.get(id=id)
if request.user.is_authenticated():
return render_to_response('manual.html', {'user': request.user, 'manual' : manual,})
else:
error = 'Ви не авторизированы'
return render_to_response('manual.html', {'user': request.user, 'error' : error,})
@login_required(login_url='/admin/login/')
def manual_count(request, id):
manual = Manual.objects.all().count()
if request.user.is_authenticated():
return render_to_response('index.html', {'user': request.user, 'manual' : manual,})
else:
error = 'Ви не авторизированы'
return render_to_response('index.html', {'user': request.user, 'error' : error,})
|
ximepa/docp
|
manual/views.py
|
Python
|
gpl-3.0
| 2,763
|
#!/usr/bin/env python
""" Get a proxy from the proxy manager
"""
import os
from DIRAC.Core.Base.Script import Script
Script.setUsageMessage(
"\n".join(
[
__doc__.split("\n")[1],
"Usage:",
" %s [options] UserName Role" % Script.scriptName,
"Arguments:",
" UserName: User DN",
]
)
)
Script.registerSwitch("R:", "role=", "set the User DN.")
Script.parseCommandLine()
for unprocSw in Script.getUnprocessedSwitches():
if unprocSw[0] in ("R", "role"):
role = unprocSw[1]
args = Script.getPositionalArgs()
dn = args[0]
uid = os.getuid()
from DIRAC.FrameworkSystem.Client.ProxyManagerClient import gProxyManager
print("Getting proxy for User DN: %s, User role %s" % (dn, role))
res = gProxyManager.downloadProxyToFile(
dn,
role,
limited=False,
requiredTimeLeft=1200,
cacheTime=43200,
filePath="/tmp/x509up_u%s" % uid,
proxyToConnect=False,
token=False,
)
if not res["OK"]:
print("Error downloading proxy", res["Message"])
exit(1)
|
DIRACGrid/DIRAC
|
tests/Jenkins/dirac-proxy-download.py
|
Python
|
gpl-3.0
| 1,068
|
#!/usr/bin/env python
# coding: utf-8
import pytest
import capytaine as cpt
try:
import vtk
except ImportError:
vtk = None
try:
import IPython
except ImportError:
IPython = None
@pytest.mark.skipif(vtk is None or IPython is None,
reason='vtk and/or Ipython are not installed')
def test_animation_of_dofs():
body = cpt.Sphere()
body.add_translation_dof(name="Heave")
animation = body.animate({"Heave": 0.2}, loop_duration=1.0)
animation.embed_in_notebook()
|
mancellin/capytaine
|
pytest/test_ui_vtk.py
|
Python
|
gpl-3.0
| 512
|
""" py2app build script for VALENCE.
Usage: python setup.py py2app
"""
from setuptools import setup
setup(
app = ["attractor.py"],
py_modules = ["headset"],
data_files = [
('g', [
'g/bg.png',
'g/blob.png']),
('g/cell', [
'g/cell/flower1.png',
'g/cell/flower2.png',
'g/cell/flower3.png',
'g/cell/flower4.png',
'g/cell/flower5.png',
'g/cell/flower6.png']),
('audio', [
'audio/ambient_hi.wav',
'audio/ambient_lo.wav',
'audio/attract.wav',
'audio/repulse.wav'])
],
setup_requires = ["py2app"],
options = dict(
py2app = dict(
plist = 'Info.plist',
iconfile = 'g/valence.icns',
packages = ["nodebox"] # Don't zip it in site-packages.zip,
# we need to access the data files in nodebox/gui/theme.
))
)
|
nodebox/valence
|
setup.py
|
Python
|
gpl-3.0
| 986
|
import cv2
import numpy as np
def nothing(x):
pass
# Create a black image, a window
img = np.zeros((300, 512, 3), np.uint8)
cv2.namedWindow('image')
# create trackbars for color change
cv2.createTrackbar('R', 'image', 0, 255, nothing)
cv2.createTrackbar('G', 'image', 0, 255, nothing)
cv2.createTrackbar('B', 'image', 0, 255, nothing)
# create switch for ON/OFF functionality
switch = '0 : OFF \n1 : ON'
cv2.createTrackbar(switch, 'image', 0, 1, nothing)
while 1:
cv2.imshow('image', img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# get current positions of four trackbars
r = cv2.getTrackbarPos('R', 'image')
g = cv2.getTrackbarPos('G', 'image')
b = cv2.getTrackbarPos('B', 'image')
s = cv2.getTrackbarPos(switch, 'image')
if s == 0:
img[:] = 0
else:
img[:] = [b, g, r]
cv2.destroyAllWindows()
|
RyanChinSang/ECNG3020-ORSS4SCVI
|
BETA/TestCode/OpenCV/APP-ColorFilter2.py
|
Python
|
gpl-3.0
| 872
|
import bootstrap
import unittest
import unittest.mock
import cifparser
import collections
from mandelbrot.model.evaluation import *
class TestProcessCPU(unittest.TestCase):
@unittest.mock.patch('time.time')
def test_execute_ProcessCPU_check_healthy(self, time):
"ProcessCPU check should return healthy evaluation when not breaching threshold"
time.side_effect = [0.0, 100.0]
values = cifparser.ValueTree()
values.put_field(cifparser.ROOT_PATH, "process matches name", "foo")
values.put_field(cifparser.ROOT_PATH, "extended summary", "true")
ns = cifparser.Namespace(values)
from mandelbrot.check.processcpu import ProcessCPU
check = ProcessCPU(ns)
with unittest.mock.patch.object(check, 'get_process') as get_process:
process = unittest.mock.Mock()
process.pid = 42
process.create_time = unittest.mock.Mock(return_value=0.0)
process.cpu_times = unittest.mock.Mock(side_effect=[
(0.0, 0.0),
(0.0, 0.0),
])
get_process.return_value = process
evaluation = Evaluation()
context = check.init()
check.execute(evaluation, context)
print(evaluation)
self.assertEqual(evaluation.get_health(), HEALTHY)
@unittest.mock.patch('time.time')
def test_execute_ProcessCPU_check_user_degraded(self, time):
"ProcessCPU check should return degraded evaluation when user % breaches degraded threshold"
time.side_effect = [0.0, 100.0]
values = cifparser.ValueTree()
values.put_field(cifparser.ROOT_PATH, "process matches name", "foo")
values.put_field(cifparser.ROOT_PATH, "extended summary", "true")
values.put_field(cifparser.ROOT_PATH, "user degraded threshold", "25%")
values.put_field(cifparser.ROOT_PATH, "user failed threshold", "75%")
ns = cifparser.Namespace(values)
from mandelbrot.check.processcpu import ProcessCPU
check = ProcessCPU(ns)
with unittest.mock.patch.object(check, 'get_process') as get_process:
process = unittest.mock.Mock()
process.pid = 42
process.create_time = unittest.mock.Mock(return_value=0.0)
process.cpu_times = unittest.mock.Mock(side_effect=[
(0.0, 0.0),
(50.0, 0.0),
])
get_process.return_value = process
evaluation = Evaluation()
context = check.init()
check.execute(evaluation, context)
print(evaluation)
self.assertEqual(evaluation.get_health(), DEGRADED)
@unittest.mock.patch('time.time')
def test_execute_ProcessCPU_check_user_failed(self, time):
"ProcessCPU check should return failed evaluation when user % breaches failed threshold"
time.side_effect = [0.0, 100.0]
values = cifparser.ValueTree()
values.put_field(cifparser.ROOT_PATH, "process matches name", "foo")
values.put_field(cifparser.ROOT_PATH, "extended summary", "true")
values.put_field(cifparser.ROOT_PATH, "user degraded threshold", "25%")
values.put_field(cifparser.ROOT_PATH, "user failed threshold", "75%")
ns = cifparser.Namespace(values)
from mandelbrot.check.processcpu import ProcessCPU
check = ProcessCPU(ns)
with unittest.mock.patch.object(check, 'get_process') as get_process:
process = unittest.mock.Mock()
process.pid = 42
process.create_time = unittest.mock.Mock(return_value=0.0)
process.cpu_times = unittest.mock.Mock(side_effect=[
(0.0, 0.0),
(100.0, 0.0),
])
get_process.return_value = process
evaluation = Evaluation()
context = check.init()
check.execute(evaluation, context)
print(evaluation)
self.assertEqual(evaluation.get_health(), FAILED)
|
msfrank/mandelbrot
|
test/test_check_processcpu.py
|
Python
|
gpl-3.0
| 4,020
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2014 Dariusz Suchojad <dsuch at zato.io>
Licensed under LGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# nose
from nose.tools import eq_
# Paste
from paste.util.converters import asbool
# live test case
from . import LiveTestCase
# SIO live test services
from . import zato_test_sio_live
class SIOLiveTestCase(LiveTestCase):
SERVICES_SOURCE = 'zato_test_sio_live.py'
def _run_tests_output_assigned_manually(self, service_name, service_class):
# No input provided hence we expect a proper message on output
try:
response = self.invoke_asi(service_name)
except Exception, e:
self.assertIn('Missing input', e.message)
test_data = service_class.test_data
# ################################################################################################################################
# JSON request/response over AnyServiceInvoker
response = self.invoke_asi(service_name, test_data)
service_class.check_json(response.response, False)
# ################################################################################################################################
# JSON request/response over JSONClient
response = self.invoke_json(
self.set_up_client_and_channel(service_name, 'json', 'plain_http'), test_data)
service_class.check_json(response.response, False)
# ################################################################################################################################
request = """<request>
<should_as_is>True</should_as_is>
<is_boolean>True</is_boolean>
<should_boolean>False</should_boolean>
<csv1>1,2,3,4</csv1>
<dict>
<item><key>a</key><value>b</value></item>
<item><key>c</key><value>d</value></item>
</dict>
<float>2.3</float>
<integer>190</integer>
<integer2>0</integer2>
<list>
<item>1</item>
<item>2</item>
<item>3</item>
</list>
<list_of_dicts>
<item_dict>
<item>
<key>1</key>
<value>11</value>
</item>
<item>
<key>2</key>
<value>22</value>
</item>
</item_dict>
<item_dict>
<item>
<key>3</key>
<value>33</value>
</item>
</item_dict>
<item_dict>
<item>
<key>4</key>
<value>44</value>
</item>
<item>
<key>5</key>
<value>55</value>
</item>
<item>
<key>3</key>
<value>33</value>
</item>
<item>
<key>2</key>
<value>22</value>
</item>
<item>
<key>1</key>
<value>11</value>
</item>
</item_dict>
</list_of_dicts>
<unicode1>zzzä</unicode1>
<unicode2>zä</unicode2>
<utc>2012-01-12T03:12:19+00:00</utc>
</request>"""
for request_wrapper, xpath_string_pattern, transport in self.get_xml_soap_config():
# XML request/response over XMLClient
client = self.set_up_client_and_channel(service_name, 'xml', transport)
response = self.invoke_xml(client, request_wrapper.format(request).encode('utf-8'))
for name in('should_as_is', 'is_boolean', 'should_boolean', 'csv1', 'float', 'integer', 'integer2',
'unicode1', 'unicode2', 'utc'):
expected = test_data[name]
actual = self.get_xml_value_from_response(xpath_string_pattern, response, name)
if name in ('is_boolean', 'should_boolean'):
expected = asbool(expected)
if name == 'float':
expected = float(expected)
if name in ('integer', 'integer2'):
expected = int(expected)
if name == 'utc':
expected = expected.replace('+00:00', '')
eq_(actual, expected, 'name:`{}` actual:`{}` expected:`{}`'.format(name, repr(actual), repr(expected)))
# ################################################################################################################################
def test_channels_output_assigned_manually(self):
if not self.should_run:
return
service_data = (
('zato-test-sio-live.roundtrip', zato_test_sio_live.Roundtrip),
('zato-test-sio-live.from-dict', zato_test_sio_live.FromDict),
)
for service_info in service_data:
self._run_tests_output_assigned_manually(*service_info)
# ################################################################################################################################
def test_channels_output_from_sqlalchemy(self):
if not self.should_run:
return
service_name = 'zato-test-sio-live.from-sql-alchemy'
expected = [
('impl_name', 'zato.server.service.internal.Ping'),
('is_active', True),
('is_internal', True),
('name', 'zato.ping'),
('slow_threshold', 99999)
]
# ################################################################################################################################
# JSON request/response over AnyServiceInvoker
response = self.invoke_asi(service_name, {})
eq_(sorted(response.response.items()), expected)
# ################################################################################################################################
# JSON request/response over JSONClient
response = self.invoke_json(self.set_up_client_and_channel(service_name, 'json', 'plain_http'), {})
eq_(sorted(response.response.items()), expected)
# ################################################################################################################################
for request_wrapper, xpath_string_pattern, transport in self.get_xml_soap_config():
# XML request/response over XMLClient
client = self.set_up_client_and_channel(service_name, 'xml', transport)
response = self.invoke_xml(client, request_wrapper.format('<dummy/>').encode('utf-8'))
actual_items = {}
for name in ('name', 'is_active', 'impl_name', 'is_internal', 'slow_threshold'):
actual_items[name] = self.get_xml_value_from_response(xpath_string_pattern, response, name)
eq_(sorted(actual_items.items()), expected)
# ################################################################################################################################
|
alirizakeles/zato
|
code/zato-server/test/zato/server/live/test_sio_live.py
|
Python
|
gpl-3.0
| 7,674
|
# -*- encoding: utf-8 -*-
"""Test class for CLI Foreman Discovery
@Requirement: Discoveredhost
@CaseAutomation: Automated
@CaseLevel: Acceptance
@CaseComponent: CLI
@TestType: Functional
@CaseImportance: High
@Upstream: No
"""
from robottelo.decorators import run_only_on, stubbed, tier3
from robottelo.test import CLITestCase
class DiscoveryTestCase(CLITestCase):
"""Implements Foreman discovery CLI tests."""
@run_only_on('sat')
@stubbed()
@tier3
def test_positive_custom_facts_pxeless_discovery(self):
"""Check if defined custom facts of pxeless host are correctly
displayed under host's facts
@id: 0d39f2cc-654f-41ed-8e31-4d9a37c5b9b1
@Setup:
1. Provisioning should be configured
2. Host is already discovered
@Steps: Validate specified custom facts
@Assert: All defined custom facts should be displayed correctly
@caseautomation: notautomated
@CaseLevel: System
"""
@run_only_on('sat')
@stubbed()
@tier3
def test_positive_custom_facts_pxe_discovery(self):
"""Check if defined custom facts of pxe-based discovered host are
correctly displayed under host's facts
@id: 2c65925c-05d9-4f6d-b1b7-1fa1492c95a8
@Setup:
1. Provisioning should be configured
2. Host is already discovered
@Steps: Validate specified custom facts
@Assert: All defined custom facts should be displayed correctly
@caseautomation: notautomated
@CaseLevel: System
"""
@run_only_on('sat')
@stubbed()
@tier3
def test_positive_provision_pxeless_host(self):
"""Provision the pxe-less discovered host from cli
@id: ae7f3ce2-e66e-44dc-85cb-0c3c4782cbb1
@Setup: Host should already be discovered
@Assert: Host should be provisioned successfully and entry from
discovered host list should be auto removed
@caseautomation: notautomated
@CaseLevel: System
"""
@run_only_on('sat')
@stubbed()
@tier3
def test_positive_provision_pxe_host(self):
"""Provision the pxe based discovered host from cli
@id: b5385fe3-d532-4373-af64-5492275ff8d4
@Setup: Host should already be discovered
@Assert: Host should be provisioned successfully and entry from
discovered host list should be automatically removed.
@caseautomation: notautomated
@CaseLevel: System
"""
@run_only_on('sat')
@stubbed()
@tier3
def test_positive_delete_pxeless_host(self):
"""Delete the selected pxe-less discovered host
@id: 3959abd7-a1c0-418f-a75a-dec06b5ea0e2
@Setup: Host should already be discovered
@Assert: Selected host should be removed successfully
@caseautomation: notautomated
@CaseLevel: System
"""
@run_only_on('sat')
@stubbed()
@tier3
def test_positive_delete_pxe_host(self):
"""Delete the selected pxe-based discovered host
@id: c4103de8-145c-4a7d-b837-a1dec97231a2
@Setup: Host should already be discovered
@Assert: Selected host should be removed successfully
@caseautomation: notautomated
@CaseLevel: System
"""
@run_only_on('sat')
@stubbed()
@tier3
def test_positive_refresh_facts_pxe_host(self):
"""Refresh the facts of pxe based discovered hosts by adding a new NIC
@id: 410eaa5d-cc6a-44f7-8c6f-e8cfa81610f0
@Setup: Host should already be discovered
@Assert: Facts should be refreshed successfully with a new NIC
@caseautomation: notautomated
@CaseLevel: System
"""
@run_only_on('sat')
@stubbed()
@tier3
def test_positive_refresh_facts_of_pxeless_host(self):
"""Refresh the facts of pxeless discovered hosts by adding a new NIC
@id: 2e199eaa-9651-47b1-a2fd-622778dfe68e
@Setup: Host should already be discovered
@Assert: Facts should be refreshed successfully with a new NIC
@caseautomation: notautomated
@CaseLevel: System
"""
@run_only_on('sat')
@stubbed()
@tier3
def test_positive_reboot_pxe_host(self):
"""Reboot pxe based discovered hosts
@id: 9cc17742-f810-4be7-b410-a6c68e6cc64a
@Setup: Host should already be discovered
@Assert: Host is rebooted
@caseautomation: notautomated
@CaseLevel: System
"""
@run_only_on('sat')
@stubbed()
@tier3
def test_positive_reboot_pxeless_host(self):
"""Reboot pxe-less discovered hosts
@id: e72e1607-8778-45b6-b8b9-8215514546f0
@Setup: PXELess host should already be discovered
@Assert: Host is rebooted
@caseautomation: notautomated
@CaseLevel: System
"""
@run_only_on('sat')
@stubbed()
@tier3
def test_positive_auto_provision_pxe_host(self):
"""Discover a pxe based host and auto-provision it with
discovery rule and by enabling auto-provision flag
@id: 701a1892-1c6a-4ba1-bbd8-a37b7fb02fa0
@Assert: Host should be successfully rebooted and provisioned
@caseautomation: notautomated
@CaseLevel: System
"""
@run_only_on('sat')
@stubbed()
@tier3
def test_positive_auto_provision_pxeless_host(self):
"""Discover a pxe-less host and auto-provision it with
discovery rule and by enabling auto-provision flag
@id: 298417b3-d242-4999-89f9-198095704c0e
@Assert: Host should be successfully rebooted and provisioned
@caseautomation: notautomated
@CaseLevel: System
"""
@run_only_on('sat')
@stubbed()
@tier3
def test_positive_list_discovered_host(self):
"""List pxe-based and pxe-less discovered hosts
@id: 3a827080-3fab-4f64-a830-1b41841aa2df
@Assert: Host should be discovered and listed with names.
@caseautomation: notautomated
@CaseLevel: System
"""
@run_only_on('sat')
@stubbed()
@tier3
def test_positive_assign_discovery_manager_role(self):
"""Assign 'Discovery_Manager' role to a normal user
@id: f694c361-5fbb-4c3a-b2ff-6dfe6ea14820
@Assert: User should be able to view, provision, edit and destroy one
or more discovered host as well view, create_new, edit, execute and
delete discovery rules.
@caseautomation: notautomated
@CaseLevel: System
"""
@run_only_on('sat')
@stubbed()
@tier3
def test_positive_assign_discovery_role(self):
"""Assign 'Discovery" role to a normal user
@id: 873e8411-563d-4bf9-84ce-62e522410cfe
@Assert: User should be able to list, provision, and destroy one
or more discovered host
@caseautomation: notautomated
@CaseLevel: System
"""
|
Ichimonji10/robottelo
|
tests/foreman/cli/test_discoveredhost.py
|
Python
|
gpl-3.0
| 7,009
|
#!/usr/bin/env python3
from configparser import ConfigParser
from os import environ, getcwd, chdir
from os.path import isdir, isfile, join as path_join
from sys import argv, exit
try: root = INSTALLATION_PATH
except NameError:
print('3*** configuration error: you must run scripts/setup.sh first! ***\tfake\t(NULL)\t0')
exit(255)
chdir(root)
# Setup config
configDefaults = {
'server': {
'host': 'localhost',
'port': 70,
'maxAuthorLength': 16,
},
'file': {
'db': 'data/database.sqlite3',
'lock': 'data/lock',
'gopher': '.gopher',
'wordlist': '/usr/share/dict/words',
'words': 'data/words',
'upload': '../upload'
},
'path': {
'board': '/',
'post': '/post/',
'del': '/del/',
'upload': '/upload/',
'register': '/register/',
'changepw': '/changepw/'
},
}
boardconfDefaults = {
'board': {
'showThreads': 10,
'showReplies': 3,
'showTextLines': 3,
'preferThreadWords': True,
'prune': 50,
'defaultPassword': 'password',
'throttle': 120,
'anonPost': True
},
}
config = ConfigParser()
config.read_dict(configDefaults)
config.read(path_join(getcwd(), 'data/config.ini'))
boardconf = ConfigParser(default_section='board')
boardconf.read_dict(boardconfDefaults)
boardconf.read(path_join(getcwd(), 'data/boards.ini'))
# Path resolution
for f, path in config['file'].items():
if not path.startswith('/'):
config['file'][f] = path_join(getcwd(), config['file'][f])
path = config['file'][f]
if f == 'words':
pass
elif f == 'lock':
pass
elif f == 'upload':
if not isdir(path):
print('3*** configuration error: "{}" does not exist ***\tfake\t(NULL)\t0'.format(path))
elif not isfile(path) and len(argv) < 4:
print('3*** configuration error: "{}" does not exist ***\tfake\t(NULL)\t0'.format(path))
exit(255)
config['file']['root'] = root
# Type assertions
try:
boardconf.getboolean('board', 'preferThreadWords')
boardconf.getboolean('board', 'anonPost')
except ValueError:
print('3*** configuration error: board.{preferThreadWords, anonPost} must be "yes" or "no" ***\tfake\t(NULL)\t0')
exit(255)
try:
boardconf.getint('board', 'showThreads')
boardconf.getint('board', 'showReplies')
boardconf.getint('board', 'showTextLines')
boardconf.getint('board', 'prune')
boardconf.getint('board', 'throttle')
config.getint('server', 'maxAuthorLength')
except ValueError:
print('3*** configuration error: board.{showThreads, showReplies, showTextLines, prune, maxAuthorLength, throttle} and server.maxAuthorLength must be integers ***\tfake\t(NULL)\t0')
exit(255)
if __name__ == '__main__' and 'REQUEST' not in environ:
if len(argv) > 2:
try: print(config[argv[1]][argv[2]])
except KeyError:
print('Unknown configuration option')
exit(1)
|
sroracle/70chan
|
config.py
|
Python
|
gpl-3.0
| 3,016
|
from pyrser import meta, directives
from pyrser.grammar import Grammar
from pyrser.hooks import echo
from pyrser.parsing.node import Node
from pyrser.directives import ignore
from cnorm import nodes
from cnorm.parsing.expression import Expression
class Statement(Grammar, Expression):
"""
interaction with other CNORM PART:
Declaration.init_declarator -> compound_statement
Expression.primary_expression
"""
entry = "single_statement"
grammar = """
/*
Comment works as in C/C++
*/
single_statement = [
[compound_statement
| labeled_statement
| expression_statement
]:>_
]
compound_statement = [
[
'{'
__scope__:current_block
#new_blockstmt(_, current_block)
[
line_of_code
]*
'}'
]
]
line_of_code = [
single_statement:line
#end_loc(current_block, line)
]
labeled_statement = [
Expression.rootidentifier:ident
[ #check_stmt(ident, "if") if_statement:>_
| #check_stmt(ident, "for") for_statement:>_
| #check_stmt(ident, "while") while_statement:>_
| #check_stmt(ident, "switch") switch_statement:>_
| #check_stmt(ident, "do") do_statement:>_
| #check_stmt(ident, "return") return_statement:>_
| #check_stmt(ident, "goto") goto_statement:>_
| #check_stmt(ident, "case") case_statement:>_
| #check_stmt(ident, "break") ';' #new_break(_)
| #check_stmt(ident, "continue") ';' #new_continue(_)
| ':' #new_label(_, ident)
]
]
if_statement = [
'(' expression:cond ')'
single_statement:then
__scope__:else
[
"else"
single_statement:>else
]?
#new_if(_, cond, then, else)
]
for_statement = [
'('
expression_statement:init
expression_statement:cond
expression?:inc
')'
single_statement:body
#new_for(_, init, cond, inc, body)
]
while_statement = [
'('
expression:cond
')'
single_statement:body
#new_while(_, cond, body)
]
switch_statement = [
'(' expression:cond ')'
single_statement:body
#new_switch(_, cond, body)
]
do_statement = [
single_statement:body
"while" '(' expression:cond ')' ';'
#new_do(_, cond, body)
]
return_statement = [
expression?:e ';'
#new_return(_, e)
]
goto_statement = [
expression:e ';'
#new_goto(_, e)
]
range_expression = [
constant_expression:>_
[
"..."
constant_expression:r
#new_range(_, r)
]?
]
case_statement = [
range_expression:e #new_case(_, e)
':'
]
expression_statement = [
[expression:e #new_expr(_, e)]?
';'
]
"""
@meta.hook(Statement)
def new_expr(self, ast, expr):
ast.set(nodes.ExprStmt(expr))
return True
@meta.hook(Statement)
def new_if(self, ast, cond_expr, then_expr, else_expr):
ast.set(nodes.If(cond_expr, then_expr, else_expr))
return True
@meta.hook(Statement)
def new_for(self, ast, init, cond, inc, body):
ast.set(nodes.For(init, cond, inc, body))
return True
@meta.hook(Statement)
def new_while(self, ast, cond, body):
ast.set(nodes.While(cond, body))
return True
@meta.hook(Statement)
def new_switch(self, ast, cond, body):
ast.set(nodes.Switch(cond, body))
return True
@meta.hook(Statement)
def new_do(self, ast, cond, body):
ast.set(nodes.Do(cond, body))
return True
@meta.hook(Statement)
def new_return(self, ast, expr):
ast.set(nodes.Return(expr))
return True
@meta.hook(Statement)
def new_goto(self, ast, expr):
ast.set(nodes.Goto(expr))
return True
@meta.hook(Statement)
def new_range(self, ast, expr):
begin = Node()
begin.set(ast)
ast.set(nodes.Range(nodes.Raw('...'), [begin, expr]))
return True
@meta.hook(Statement)
def new_case(self, ast, expr):
ast.set(nodes.Case(expr))
return True
@meta.hook(Statement)
def new_break(self, ast):
ast.set(nodes.Break())
return True
@meta.hook(Statement)
def new_continue(self, ast):
ast.set(nodes.Continue())
return True
@meta.hook(Statement)
def new_label(self, ast, ident):
ast.set(nodes.Label(self.value(ident)))
return True
@meta.hook(Statement)
def new_blockstmt(self, ast, current_block):
ast.set(nodes.BlockStmt([]))
current_block.ref = ast
parent = self.rule_nodes.parents
if (('current_block' in parent
and hasattr(parent['current_block'].ref, 'types'))):
current_block.ref.types = parent['current_block'].ref.types.new_child()
return True
@meta.hook(Statement)
def end_loc(self, current_block, line):
current_block.ref.body.append(line)
return True
@meta.hook(Statement)
def check_stmt(self, ident: Node, val: str) -> bool:
stmt = self.value(ident)
return stmt == val
|
LionelAuroux/cnorm
|
cnorm/parsing/statement.py
|
Python
|
gpl-3.0
| 5,555
|
"""This module contains metadata about the project."""
title = 'hadaly'
version = '0.1a'
description = 'Presentation software for art historians'
author = 'Bogdan Cordie'
author_email = 'ooctogene@gmail.com'
license = 'GPLv3'
|
octogene/hadaly
|
hadaly/meta.py
|
Python
|
gpl-3.0
| 225
|
"""Utilities not related to a specific challenge."""
import base64
class Error(Exception):
pass
class InvalidArgumentError(Error):
pass
# These should not be accessed externally.
hex_to_bin_map = {'0': '0000',
'1': '0001',
'2': '0010',
'3': '0011',
'4': '0100',
'5': '0101',
'6': '0110',
'7': '0111',
'8': '1000',
'9': '1001',
'a': '1010',
'b': '1011',
'c': '1100',
'd': '1101',
'e': '1110',
'f': '1111'}
bin_to_hex_map = {'0000': '0',
'0001': '1',
'0010': '2',
'0011': '3',
'0100': '4',
'0101': '5',
'0110': '6',
'0111': '7',
'1000': '8',
'1001': '9',
'1010': 'a',
'1011': 'b',
'1100': 'c',
'1101': 'd',
'1110': 'e',
'1111': 'f'}
def hex_to_bin(hex_string):
"""Converts an hexadecimal string to the corresponding binary string."""
res = ''
for ch in hex_string:
if ch not in hex_to_bin_map:
raise InvalidArgumentError(
"Called hex_to_bin with non-hex string: " + hex_string)
res += hex_to_bin_map[ch]
return res
def bin_to_hex(bin_string):
"""Converts a binary string to the corresponding hexadecimal one."""
res_hex = ''
# Divides the string in pieces of 4 characters each, then converts
# each piece to an hexadecimal character.
#
# If the length of the input string is not divisible by 4, the last
# bits will be ignored.
for i in range(0, len(bin_string) - 3, 4):
piece = bin_string[i:i+4]
if piece not in bin_to_hex_map:
raise InvalidArgumentError(
"Called bin_to_hex with non-bin string: " + bin_string)
res_hex += bin_to_hex_map[piece]
return res_hex
# TODO: enforce one naming convention.
def ReadableAscii(i):
return (i == 10 or (i > 31 and i < 127))
def bin_to_ascii(bin_string):
"""Returns a tuple of 2 elements: first one is the ASCII decoding of
the binary string, second one is False if the decoding includes non-readable
characters."""
ascii = ''
is_readable = True
for i in range(0, len(bin_string) - 7, 8):
# Derive individual "slices" of the string, 8 bits each.
piece = bin_string[i:i+8]
# Convert to an integer.
num_piece = int(piece, base=2)
if not ReadableAscii(num_piece):
is_readable = False
ascii += chr(num_piece)
return (ascii, is_readable)
def base64_to_bin(text64):
"""Converts a base64-encoded string into a binary string."""
text = base64.b64decode(text64)
bin_text = ''
for ch in text:
# We remove the leading '0b' added by bin()
bin_ch = bin(ch)[2:]
# Each 'ch' is a decimal ASCII character, so the resulting
# binary number must have 8 digits. We pad with zeroes when
# shorter.
while len(bin_ch) < 8:
bin_ch = '0' + bin_ch
bin_text += bin_ch
return bin_text
def base64_to_ascii(text64):
"""Converts a base64-encoded string into an ASCII string."""
text = base64.b64decode(text64)
ascii_text = ''
# The loop gives us the (decimal) ASCII number for each character,
# so we only need to convert it to an actual character.
for ch in text:
ascii_text += chr(ch)
return ascii_text
def ByteToPaddedBin(byte):
"""Converts a byte to a binary string of length 8."""
bin_str = '{0:b}'.format(byte)
while len(bin_str) < 8:
bin_str = '0' + bin_str
return bin_str
def HammingDistance(b1, b2):
"""Computes the Hamming distance between two binary strings.
No verification is done on the strings (I am lazy), so use
at your own risk.
"""
assert len(b1) == len(b2)
distance = 0
for i in range(len(b1)):
if b1[i] != b2[i]:
distance += 1
return distance
def HammingDistanceAscii(s1, s2):
"""Computes the Hamming distance between two ASCII strings."""
assert len(s1) == len(s2)
distance = 0
for i in range(0, len(s1)):
s1_byte = ord(s1[i])
s2_byte = ord(s2[i])
# Each byte is converted to binary; the resulting string
# must have length 8 in order to preserve the ASCII
# information, therefore we pad it.
s1_bin = ByteToPaddedBin(s1_byte)
s2_bin = ByteToPaddedBin(s2_byte)
for j in range(0, len(s1_bin)):
if s1_bin[j] != s2_bin[j]:
distance += 1
return distance
|
shainer/matasano
|
lib/utils.py
|
Python
|
gpl-3.0
| 4,226
|
"""
This is the program to write the complete works of William Shakespeare.
"""
import random
import string
import time
CHARACTERS = string.ascii_letters + string.digits + '\n' + ' '
while True:
print(random.choice(CHARACTERS), end='')
time.sleep(random.random() / 50)
|
tetrau/dogwalker
|
test/monkey_typewriter.py
|
Python
|
gpl-3.0
| 278
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-06-06 14:32
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tmv_app', '0009_auto_20170602_1101'),
]
operations = [
migrations.AddField(
model_name='topic',
name='year',
field=models.IntegerField(null=True),
),
]
|
mcallaghan/tmv
|
BasicBrowser/tmv_app/migrations/0010_topic_year.py
|
Python
|
gpl-3.0
| 445
|
import tornado.web
class MainHandler(tornado.web.RequestHandler):
def data_received(self, chunk):
pass
async def get(self, *args, **kwargs):
db = self.application.db
page_count = await db.get_page_count(self.application.rows_per_page)
genre_filter = self.get_argument('genre_filter', None)
page_num = self.get_argument('page_num', 1)
render_params = {
'genre_filter': genre_filter,
'search_filter': None,
'page_num': int(page_num),
'page_count': int(page_count),
}
self.render('index.html', **render_params)
async def post(self, *args, **kwargs):
db = self.application.db
page_count = await db.get_page_count(self.application.rows_per_page)
genre_filter = self.get_argument('genre_filter', None)
search_filter = self.get_argument('search_filter', None)
page_num = self.get_argument('page_num', 1)
render_params = {
'genre_filter': genre_filter,
'search_filter': search_filter,
'page_num': int(page_num),
'page_count': int(page_count),
}
self.render('index.html', **render_params)
|
denissmirnov/spider2
|
application/handlers/main_handler.py
|
Python
|
gpl-3.0
| 1,222
|
##########################################################################
#
# This file is part of OCEMR.
#
# OCEMR is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OCEMR is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OCEMR. If not, see <http://www.gnu.org/licenses/>.
#
#
#########################################################################
# Copyright 2011-8 Philip Freeman <elektron@halo.nu>
##########################################################################
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from django.template import RequestContext
from django.http import HttpResponseRedirect, HttpResponse, HttpResponseBadRequest
from django.db.models import Q
@login_required
def lab_queue(request):
"""
"""
from datetime import datetime, timedelta
d_today = datetime.today()
d_midnight = datetime(d_today.year,d_today.month,d_today.day,0,0,0)
from ocemr.models import Lab
active_q = Q(status='ORD' ) | Q(status='PEN' )
inactive_q = Q(orderedDateTime__gte=d_midnight) & ( Q(status='CAN' ) | Q(status='COM' ) | Q(status='FAI') )
labs_active = Lab.objects.filter(active_q).order_by('orderedDateTime', '-id')
labs_inactive = Lab.objects.filter(inactive_q).order_by('orderedDateTime', '-id')
return render(request, 'lab_queue.html', locals())
@login_required
def lab_start(request,id):
"""
"""
from ocemr.models import Lab, LabNote
l = Lab.objects.get(pk=id)
l.status = 'PEN'
l.save()
ln = LabNote(lab=l, addedBy=request.user, note="Lab Started")
ln.save()
return render(request, 'close_window.html', {})
@login_required
def lab_cancel(request,id):
"""
"""
from ocemr.models import Lab, LabNote
l = Lab.objects.get(pk=id)
l.status = 'CAN'
l.save()
ln = LabNote(lab=l, addedBy=request.user, note="Lab Canceled")
ln.save()
return render(request, 'close_window.html', {})
@login_required
def lab_fail(request,id):
"""
"""
from ocemr.models import Lab, LabNote
l = Lab.objects.get(pk=id)
l.status = 'FAI'
l.save()
ln = LabNote(lab=l, addedBy=request.user, note="Lab Failed")
ln.save()
return render(request, 'close_window.html', {})
@login_required
def lab_reorder(request,id):
"""
"""
from ocemr.models import Lab
l = Lab.objects.get(pk=id)
newl = Lab(type = l.type, patient = l.patient, visit=l.visit, orderedBy=request.user, status='ORD')
newl.save()
return render(request, 'close_window.html', {})
@login_required
def lab_notate(request, id):
"""
"""
from ocemr.models import LabNote, Lab, Visit
from ocemr.forms import NewLabNoteForm
labid=int(id)
l=Lab.objects.get(pk=labid)
if request.method == 'POST': # If the form has been submitted...
form = NewLabNoteForm(l, request.user, request.POST) # A form bound to the POST data
if form.is_valid(): # All validation rules pass
o = form.save()
return HttpResponseRedirect('/close_window/')
else:
form = NewLabNoteForm(l, request.user) # An unbound form
return render(request, 'popup_form.html', {
'title': 'Add an Lab Note: %s'%(l.type.title),
'form_action': '/lab/%d/notate/'%(l.id),
'form': form,
})
@login_required
def lab_complete(request, id):
"""
"""
from ocemr.models import Lab
from ocemr.forms import CompleteLabForm
l = Lab.objects.get(pk=id)
if request.method == 'POST':
form = CompleteLabForm(request.POST)
if form.is_valid():
l.result = form.cleaned_data['result']
l.status='COM'
l.save()
return HttpResponseRedirect('/close_window/')
else:
form = CompleteLabForm()
return render(request, 'popup_form.html', {
'title': 'Complete Lab: %s'%(l),
'form_action': '/lab/%s/complete/'%(l.id),
'form': form,
})
|
ph1l/ocemr
|
ocemr/views/lab.py
|
Python
|
gpl-3.0
| 4,143
|
# -*- coding: utf-8 -*-
# Copyright (c) 2003 - 2015 Detlev Offenbach <detlev@die-offenbachs.de>
#
"""
Module implementing a dialog to show the output of the svn proplist command
process.
"""
from __future__ import unicode_literals
try:
str = unicode
except NameError:
pass
from PyQt5.QtCore import QTimer, QProcess, QProcessEnvironment, QRegExp, Qt
from PyQt5.QtWidgets import QWidget, QHeaderView, QDialogButtonBox, \
QTreeWidgetItem
from E5Gui import E5MessageBox
from .Ui_SvnPropListDialog import Ui_SvnPropListDialog
import Preferences
class SvnPropListDialog(QWidget, Ui_SvnPropListDialog):
"""
Class implementing a dialog to show the output of the svn proplist command
process.
"""
def __init__(self, vcs, parent=None):
"""
Constructor
@param vcs reference to the vcs object
@param parent parent widget (QWidget)
"""
super(SvnPropListDialog, self).__init__(parent)
self.setupUi(self)
self.buttonBox.button(QDialogButtonBox.Close).setEnabled(False)
self.buttonBox.button(QDialogButtonBox.Cancel).setDefault(True)
self.process = QProcess()
env = QProcessEnvironment.systemEnvironment()
env.insert("LANG", "C")
self.process.setProcessEnvironment(env)
self.vcs = vcs
self.propsList.headerItem().setText(self.propsList.columnCount(), "")
self.propsList.header().setSortIndicator(0, Qt.AscendingOrder)
self.process.finished.connect(self.__procFinished)
self.process.readyReadStandardOutput.connect(self.__readStdout)
self.process.readyReadStandardError.connect(self.__readStderr)
self.rx_path = QRegExp(r"Properties on '([^']+)':\s*")
self.rx_prop = QRegExp(r" (.*) *: *(.*)[\r\n]")
self.lastPath = None
self.lastProp = None
self.propBuffer = ""
def __resort(self):
"""
Private method to resort the tree.
"""
self.propsList.sortItems(
self.propsList.sortColumn(),
self.propsList.header().sortIndicatorOrder())
def __resizeColumns(self):
"""
Private method to resize the list columns.
"""
self.propsList.header().resizeSections(QHeaderView.ResizeToContents)
self.propsList.header().setStretchLastSection(True)
def __generateItem(self, path, propName, propValue):
"""
Private method to generate a properties item in the properties list.
@param path file/directory name the property applies to (string)
@param propName name of the property (string)
@param propValue value of the property (string)
"""
QTreeWidgetItem(self.propsList, [path, propName, propValue.strip()])
def closeEvent(self, e):
"""
Protected slot implementing a close event handler.
@param e close event (QCloseEvent)
"""
if self.process is not None and \
self.process.state() != QProcess.NotRunning:
self.process.terminate()
QTimer.singleShot(2000, self.process.kill)
self.process.waitForFinished(3000)
e.accept()
def start(self, fn, recursive=False):
"""
Public slot to start the svn status command.
@param fn filename(s) (string or list of string)
@param recursive flag indicating a recursive list is requested
"""
self.errorGroup.hide()
self.process.kill()
args = []
args.append('proplist')
self.vcs.addArguments(args, self.vcs.options['global'])
args.append('--verbose')
if recursive:
args.append('--recursive')
if isinstance(fn, list):
dname, fnames = self.vcs.splitPathList(fn)
self.vcs.addArguments(args, fnames)
else:
dname, fname = self.vcs.splitPath(fn)
args.append(fname)
self.process.setWorkingDirectory(dname)
self.process.start('svn', args)
procStarted = self.process.waitForStarted(5000)
if not procStarted:
E5MessageBox.critical(
self,
self.tr('Process Generation Error'),
self.tr(
'The process {0} could not be started. '
'Ensure, that it is in the search path.'
).format('svn'))
def __finish(self):
"""
Private slot called when the process finished or the user pressed the
button.
"""
if self.process is not None and \
self.process.state() != QProcess.NotRunning:
self.process.terminate()
QTimer.singleShot(2000, self.process.kill)
self.process.waitForFinished(3000)
self.buttonBox.button(QDialogButtonBox.Close).setEnabled(True)
self.buttonBox.button(QDialogButtonBox.Cancel).setEnabled(False)
self.buttonBox.button(QDialogButtonBox.Close).setDefault(True)
self.process = None
if self.lastProp:
self.__generateItem(self.lastPath, self.lastProp, self.propBuffer)
self.__resort()
self.__resizeColumns()
def on_buttonBox_clicked(self, button):
"""
Private slot called by a button of the button box clicked.
@param button button that was clicked (QAbstractButton)
"""
if button == self.buttonBox.button(QDialogButtonBox.Close):
self.close()
elif button == self.buttonBox.button(QDialogButtonBox.Cancel):
self.__finish()
def __procFinished(self, exitCode, exitStatus):
"""
Private slot connected to the finished signal.
@param exitCode exit code of the process (integer)
@param exitStatus exit status of the process (QProcess.ExitStatus)
"""
if self.lastPath is None:
self.__generateItem('', 'None', '')
self.__finish()
def __readStdout(self):
"""
Private slot to handle the readyReadStandardOutput signal.
It reads the output of the process, formats it and inserts it into
the contents pane.
"""
self.process.setReadChannel(QProcess.StandardOutput)
while self.process.canReadLine():
s = str(self.process.readLine(),
Preferences.getSystem("IOEncoding"),
'replace')
if self.rx_path.exactMatch(s):
if self.lastProp:
self.__generateItem(
self.lastPath, self.lastProp, self.propBuffer)
self.lastPath = self.rx_path.cap(1)
self.lastProp = None
self.propBuffer = ""
elif self.rx_prop.exactMatch(s):
if self.lastProp:
self.__generateItem(
self.lastPath, self.lastProp, self.propBuffer)
self.lastProp = self.rx_prop.cap(1)
self.propBuffer = self.rx_prop.cap(2)
else:
self.propBuffer += ' '
self.propBuffer += s
def __readStderr(self):
"""
Private slot to handle the readyReadStandardError signal.
It reads the error output of the process and inserts it into the
error pane.
"""
if self.process is not None:
self.errorGroup.show()
s = str(self.process.readAllStandardError(),
Preferences.getSystem("IOEncoding"),
'replace')
self.errors.insertPlainText(s)
self.errors.ensureCursorVisible()
|
paulmadore/Eric-IDE
|
6-6.0.9/eric/Plugins/VcsPlugins/vcsSubversion/SvnPropListDialog.py
|
Python
|
gpl-3.0
| 7,904
|
from traits.api import \
HasTraits, Float, Property, cached_property, Instance, \
Int
import numpy as np
from oricreate.api import \
YoshimuraCPFactory, fix, link, r_, s_, MapToSurface,\
GuConstantLength, GuDofConstraints, SimulationConfig, SimulationTask, \
FTV
from oricreate.forming_tasks.forming_task import FormingTask
from oricreate.fu import \
FuPotEngTotal
class BarrellVaultGravityFormingProcess(HasTraits):
'''
Define the simulation task prescribing the boundary conditions,
target surfaces and configuration of the algorithm itself.
'''
L_x = Float(2.0, auto_set=False, enter_set=True, input=True)
L_y = Float(1.0, auto_set=False, enter_set=True, input=True)
n_x = Int(2, auto_set=False, enter_set=True, input=True)
n_y = Int(2, auto_set=False, enter_set=True, input=True)
u_x = Float(0.1, auto_set=False, enter_set=True, input=True)
n_steps = Int(10, auto_set=False, enter_set=True, input=True)
ctf = Property(depends_on='+input')
'''control target surface'''
@cached_property
def _get_ctf(self):
return [r_, s_, -0.1 * (r_ * (1 - r_ / self.L_x))]
factory_task = Property(Instance(FormingTask))
'''Factory task generating the crease pattern.
'''
@cached_property
def _get_factory_task(self):
return YoshimuraCPFactory(L_x=self.L_x, L_y=self.L_y,
n_x=self.n_x, n_y=self.n_y)
init_displ_task = Property(Instance(FormingTask))
'''Initialization to render the desired folding branch.
'''
@cached_property
def _get_init_displ_task(self):
cp = self.factory_task.formed_object
return MapToSurface(previous_task=self.factory_task,
target_faces=[(self.ctf, cp.N)])
fold_task = Property(Instance(FormingTask))
'''Configure the simulation task.
'''
@cached_property
def _get_fold_task(self):
self.init_displ_task.x_1
cp = self.factory_task
n_l_h = cp.N_h[0, :].flatten()
n_r_h = cp.N_h[-1, :].flatten()
n_lr_h = cp.N_h[(0, -1), :].flatten()
n_fixed_y = cp.N_h[(0, -1), 1].flatten()
u_max = self.u_x
dof_constraints = fix(n_l_h, [0], lambda t: t * u_max) + fix(n_lr_h, [2]) + \
fix(n_fixed_y, [1]) + fix(n_r_h, [0], lambda t: t * -u_max) + \
link(cp.N_v[0, :].flatten(), 0, 1.0,
cp.N_v[1, :].flatten(), 0, 1.0)
gu_dof_constraints = GuDofConstraints(dof_constraints=dof_constraints)
gu_constant_length = GuConstantLength()
sim_config = SimulationConfig(goal_function_type='gravity potential energy',
gu={'cl': gu_constant_length,
'dofs': gu_dof_constraints},
acc=1e-5, MAX_ITER=500,
debug_level=0)
return SimulationTask(previous_task=self.init_displ_task,
config=sim_config, n_steps=self.n_steps)
load_task = Property(Instance(FormingTask))
'''Configure the simulation task.
'''
@cached_property
def _get_load_task(self):
self.fold_task.x_1
cp = self.factory_task
n_l_h = cp.N_h[0, (0, -1)].flatten()
n_r_h = cp.N_h[-1, (0, -1)].flatten()
dof_constraints = fix(n_l_h, [0, 1, 2]) + fix(n_r_h, [0, 1, 2])
gu_dof_constraints = GuDofConstraints(dof_constraints=dof_constraints)
gu_constant_length = GuConstantLength()
sim_config = SimulationConfig(goal_function_type='total potential energy',
gu={'cl': gu_constant_length,
'dofs': gu_dof_constraints},
use_f_du=True,
acc=1e-4, MAX_ITER=1000,
debug_level=0)
F_ext_list = [(n, 2, 100.0) for n in cp.N_h[1, :]]
fu_tot_poteng = FuPotEngTotal(kappa=np.array([10]),
F_ext_list=F_ext_list)
sim_config._fu = fu_tot_poteng
st = SimulationTask(previous_task=self.fold_task,
config=sim_config, n_steps=1)
cp = st.formed_object
cp.x_0 = self.fold_task.x_1
cp.u[:, :] = 0.0
fu_tot_poteng.forming_task = st
return st
class BikeShellterFormingProcessFTV(FTV):
model = Instance(BarrellVaultGravityFormingProcess)
if __name__ == '__main__':
bsf_process = BarrellVaultGravityFormingProcess(
L_x=2.0, n_x=2, n_steps=1, u_x=0.1)
it = bsf_process.init_displ_task
ft = bsf_process.fold_task
lt = bsf_process.load_task
it.u_1
ft.u_1
ftv = BikeShellterFormingProcessFTV(model=bsf_process)
# ftv.add(it.target_faces[0].viz3d)
# it.formed_object.viz3d.set(tube_radius=0.002)
# ftv.add(it.formed_object.viz3d)
# ftv.add(it.formed_object.viz3d_dict['node_numbers'], order=5)
lt.formed_object.viz3d.set(tube_radius=0.002)
ftv.add(lt.formed_object.viz3d_dict['node_numbers'], order=5)
ftv.add(lt.formed_object.viz3d)
ftv.add(lt.formed_object.viz3d_dict['displ'])
lt.config.gu['dofs'].viz3d.scale_factor = 0.5
ftv.add(lt.config.gu['dofs'].viz3d)
ftv.add(lt.config.fu.viz3d)
ftv.add(lt.config.fu.viz3d_dict['node_load'])
print('ft_x1', ft.x_1)
cp = lt.formed_object
print('lt_x0', cp.x_0)
print('lt_u', cp.u)
cp.u[(2, 3), 2] = -0.001
print('lt.u_1', lt.u_1)
cp = lt.formed_object
iL_phi = cp.iL_psi2 - cp.iL_psi_0
print('phi', iL_phi)
ftv.plot()
ftv.update(vot=1, force=True)
ftv.show()
# n_cam_move = 40
# fta = FTA(ftv=ftv)
# fta.init_view(a=45, e=60, d=7, f=(0, 0, 0), r=-120)
# fta.add_cam_move(a=60, e=70, n=n_cam_move, d=6, r=-120,
# duration=10,
# vot_fn=lambda cmt: np.linspace(0.01, 0.5, n_cam_move),
# azimuth_move='damped',
# elevation_move='damped',
# distance_move='damped')
# fta.add_cam_move(a=80, e=80, d=4, n=n_cam_move, r=-132,
# duration=10,
# vot_fn=lambda cmt: np.linspace(0.5, 1.0, n_cam_move),
# azimuth_move='damped',
# elevation_move='damped',
# distance_move='damped')
#
# fta.plot()
# fta.render()
# fta.configure_traits()
|
simvisage/oricreate
|
apps/sandbox/rch/ex02_yoshimura_2x2_ff_min_poteng.py
|
Python
|
gpl-3.0
| 6,546
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('payroll', '0008_hours_overall_total_hours'),
]
operations = [
migrations.AlterField(
model_name='netsalary',
name='over_monthly_amount',
field=models.DecimalField(default=0.0, max_digits=7, decimal_places=3),
preserve_default=True,
),
migrations.AlterField(
model_name='netsalary',
name='over_monthly_hours',
field=models.DecimalField(default=0.0, max_digits=7, decimal_places=3),
preserve_default=True,
),
]
|
KenanBek/rslservices
|
app/payroll/migrations/0009_auto_20160722_1720.py
|
Python
|
gpl-3.0
| 729
|
#!/usr/bin/python
import os
import requests
import snowflake
import subprocess
import time
import sys
dirSelf = os.path.dirname(os.path.realpath(__file__))
libDir = dirSelf.rstrip(os.sep).rstrip("theBox").rstrip(os.sep) + os.sep + "lib"
sys.path.append(libDir)
import constants
hostname = "google.com" #example
headers = {}
headers['user-agent'] = "theBox-v1.0"
boxid = snowflake.snowflake()
serverHost = "http://"+ constants.backendServer +"/ALIVE"
timeToWait = 30
timeInformed = 0
def getPublicIP():
p = subprocess.Popen(['dig','+short','myip.opendns.com','@resolver1.opendns.com'],stdout=subprocess.PIPE)
t = p.communicate()
p.wait()
if(t):
return(t[0])
else:
return(0)
def setBoxDetails():
headers['ip'] = getPublicIP()
headers['id'] = boxid
def informServer():
setBoxDetails()
r = requests.get(serverHost,headers=headers)
return(r.content)
while(True):
try:
if((time.time() - timeInformed) > timeToWait):
a = getPublicIP()
if(a):
informServer()
timeInformed = time.time()
except:
print(str(sys.exc_info()))
time.sleep(5)
|
shrinidhi666/wtfBox
|
theBox/alive.py
|
Python
|
gpl-3.0
| 1,119
|
# -*- coding: UTF-8
""" nya.sh html parser
Classes
=======
Parser -- Parser implementation
Attributes
==========
NYA_SH_SOURCE_NAME -- Source name constant
"""
import html
from html.parser import HTMLParser
from lxml.html import HtmlElement
from pyquery import PyQuery
from ._base import *
__all__ = ['Parser', 'NYA_SH_SOURCE_NAME']
# Source name constant
NYA_SH_SOURCE_NAME = 'nya.sh'
class Parser(AbstractParser):
""" nya.sh parser """
def __init__(self):
""" Init parser object, create html parser for entities decoding """
super().__init__()
self.__html_parser = HTMLParser()
@property
def name(self) -> str:
""" Get unique name
:return str:
"""
return NYA_SH_SOURCE_NAME
def _parse_posts_collection(self, html_: PyQuery) -> list:
""" Get posts HTMLElement[] collection
:param PyQuery html_: Page PyQuery object
:return list:
"""
return html_('div.q')
def _parse_pages_collection(self, html_: PyQuery) -> list:
""" Get pages urls for indexation
:param PyQuery html_: Page PyQuery object
:return list:
"""
pages_links = []
links_list = html_('div.pages *')
is_next_link = False
for link in links_list:
if is_next_link:
pages_links.append(link.attrib['href'])
elif link.tag == 'b':
is_next_link = True
return pages_links
def _parse_post(self, html_element: HtmlElement) -> RawPost:
""" Parse post html element
:param HTMLElement html_element:
:return RawPost:
"""
post_pyq_el = PyQuery(html_element)
story_id = int(post_pyq_el('div.sm a b')[0].text.lstrip('#'))
story_text = html.unescape(parse_multiline_html(post_pyq_el('div.content')))
return RawPost(story_id, self.name, '', story_text, frozenset())
|
kawashiro/dewyatochka2
|
src/dewyatochka/plugins/cool_story/parser/nya_sh.py
|
Python
|
gpl-3.0
| 1,954
|
#### import the simple module from the paraview
from paraview.simple import *
pnglocation = 'D:\\figure_JFMcylinder\\temp1_CLV_finer\\'
# create a new 'XML Unstructured Grid Reader'
M_MODES = [0, 4, 16, 39]
K_SEGMENTS = range(350,451)
def plotAverage(m_mode):
print('plotting mode '+ str(m_mode))
files = ['D:\\figure_JFMcylinder\\temp1_CLV_finer\\CLV'+str(m_mode)+'\\z0_plane_seg.'+str(k)+'.vtu' for k in K_SEGMENTS]
z0_plane = XMLUnstructuredGridReader(FileName=files)
z0_plane.CellArrayStatus = ['RHOU']
renderView1 = GetActiveViewOrCreate('RenderView')
# normalize by a new 'Calculator'
calculator1 = Calculator(Input=z0_plane)
calculator1.AttributeMode = 'Cell Data'
calculator1.ResultArrayName = 'rhou_mag'
calculator1.Function = 'sqrt(RHOU_X^2 + RHOU_Y^2 + RHOU_Z^2)'
if m_mode == 0:
calculator1.Function = 'sqrt(RHOU_X^2 + RHOU_Y^2 + RHOU_Z^2)/9'
elif m_mode == 4:
calculator1.Function = 'sqrt(RHOU_X^2 + RHOU_Y^2 + RHOU_Z^2)/0.01/1.4'
elif m_mode == 16:
calculator1.Function = 'sqrt(RHOU_X^2 + RHOU_Y^2 + RHOU_Z^2)/7/1.05'
elif m_mode == 39:
calculator1.Function = 'sqrt(RHOU_X^2 + RHOU_Y^2 + RHOU_Z^2)/9'
else:
pause;
# only average data
temporalStatistics1 = TemporalStatistics(Input=calculator1)
temporalStatistics1.ComputeMinimum = 0
temporalStatistics1.ComputeMaximum = 0
temporalStatistics1.ComputeStandardDeviation = 0
# point data
PointData = CellDatatoPointData(Input=temporalStatistics1)
PointDataDisplay = Show(PointData, renderView1)
ColorBy(PointDataDisplay, ('POINTS', 'rhou_mag_average', 'Magnitude'))
PointDataDisplay.RescaleTransferFunctionToDataRange(True, False)
# show color bar/color legend
PointDataDisplay.SetScalarBarVisibility(renderView1, True)
renderView1.OrientationAxesVisibility = 0
newRHOULUT = GetColorTransferFunction('rhou_mag_average')
# newRHOULUT.MapControlPointsToLogSpace()
# newRHOULUT.UseLogScale = 1
newRHOULUT.ApplyPreset('X Ray', True)
newRHOULUT.RescaleTransferFunction(0.01, 1.00)
# color bar
rHOULUTColorBar = GetScalarBar(newRHOULUT, renderView1)
rHOULUTColorBar.WindowLocation = 'AnyLocation'
rHOULUTColorBar.TitleFontSize = 6
rHOULUTColorBar.LabelFontSize = 6
rHOULUTColorBar.Position = [0.03, 0.345]
rHOULUTColorBar.ScalarBarLength = 0.3
rHOULUTColorBar.LabelFormat = '%-#7.1f'
rHOULUTColorBar.RangeLabelFormat = '%-#7.1f'
# adjust lights
rHOULUTColorBar.TitleColor = [0.0, 0.0, 0.0]
rHOULUTColorBar.LabelColor = [0.0, 0.0, 0.0]
renderView1.LightSwitch = 0
renderView1.UseLight = 0
# camera placement
renderView1.CameraPosition = [0.0028, 0.0, 0.005]
renderView1.CameraFocalPoint = [0.0028, 0.0, 0.0]
renderView1.CameraParallelScale = 0.003
renderView1.CameraParallelProjection = 1
SaveScreenshot(pnglocation+'CLV_finer_averaged'+str(m_mode)+'.png', renderView1, ImageResolution=[1735, 1140])
for m in M_MODES:
plotAverage(m)
|
qiqi/fds
|
apps/charles_cylinder3D_Lyapunov/for_paraview/averageCLV.py
|
Python
|
gpl-3.0
| 3,150
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
'''Pychemqt, Chemical Engineering Process simulator
Copyright (C) 2009-2017, Juan José Gómez Romera <jjgomera@gmail.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.'''
###############################################################################
# Transport properties config section
###############################################################################
from PyQt5 import QtCore, QtWidgets
from lib.compuestos import Componente
from lib.mezcla import Mezcla
class UI_confTransport_widget(QtWidgets.QWidget):
"""Transport properties widget, tu use in dialog, wizard..."""
def __init__(self, config=None, parent=None):
"""Constructor, opcional config parameter with project config"""
super(UI_confTransport_widget, self).__init__(parent)
layout = QtWidgets.QGridLayout(self)
line = QtWidgets.QFrame()
line.setFrameShape(QtWidgets.QFrame.HLine)
line.setFrameShadow(QtWidgets.QFrame.Sunken)
layout.addWidget(line, 1, 1, 1, 7)
line = QtWidgets.QFrame()
line.setFrameShape(QtWidgets.QFrame.HLine)
line.setFrameShadow(QtWidgets.QFrame.Sunken)
layout.addWidget(line, 3, 1, 1, 7)
lbl_Pure = QtWidgets.QLabel()
lbl_Pure.setAlignment(QtCore.Qt.AlignCenter)
lbl_Pure.setText(QtWidgets.QApplication.translate(
"pychemqt", "Pure Fluid Correlations"))
layout.addWidget(lbl_Pure, 0, 1, 1, 3)
layout.addWidget(QtWidgets.QLabel(QtWidgets.QApplication.translate(
"pychemqt", "Liquid Density:")), 4, 0)
self.RhoL = QtWidgets.QComboBox()
for method in Componente.METHODS_RhoL:
self.RhoL.addItem(method)
layout.addWidget(self.RhoL, 4, 1)
layout.addWidget(QtWidgets.QLabel(QtWidgets.QApplication.translate(
"pychemqt", "Liquid Viscosity:")), 5, 0)
self.MuL = QtWidgets.QComboBox()
for method in Componente.METHODS_MuL:
self.MuL.addItem(method)
layout.addWidget(self.MuL, 5, 1)
layout.addWidget(QtWidgets.QLabel(QtWidgets.QApplication.translate(
"pychemqt", "Gas Viscosity:")), 6, 0)
self.MuG = QtWidgets.QComboBox()
for method in Componente.METHODS_MuG:
self.MuG.addItem(method)
layout.addWidget(self.MuG, 6, 1)
layout.addWidget(QtWidgets.QLabel(QtWidgets.QApplication.translate(
"pychemqt", "Surface Tension:")), 7, 0)
self.Tension = QtWidgets.QComboBox()
for method in Componente.METHODS_Tension:
self.Tension.addItem(method)
layout.addWidget(self.Tension, 7, 1)
layout.addWidget(QtWidgets.QLabel(QtWidgets.QApplication.translate(
"pychemqt", "Liquid Thermal Conductivity:")), 8, 0)
self.ThCondL = QtWidgets.QComboBox()
for method in Componente.METHODS_ThL:
self.ThCondL.addItem(method)
layout.addWidget(self.ThCondL, 8, 1)
layout.addWidget(QtWidgets.QLabel(QtWidgets.QApplication.translate(
"pychemqt", "Gas Thermal Conductivity:")), 9, 0)
self.ThCondG = QtWidgets.QComboBox()
for method in Componente.METHODS_ThG:
self.ThCondG.addItem(method)
layout.addWidget(self.ThCondG, 9, 1)
layout.addWidget(QtWidgets.QLabel(QtWidgets.QApplication.translate(
"pychemqt", "Vapor Pressure:")), 10, 0)
self.Pv = QtWidgets.QComboBox()
for method in Componente.METHODS_Pv:
self.Pv.addItem(method)
layout.addWidget(self.Pv, 10, 1)
layout.addWidget(QtWidgets.QLabel(QtWidgets.QApplication.translate(
"pychemqt", "Acentric factor:")), 11, 0)
self.w = QtWidgets.QComboBox()
for method in Componente.METHODS_facent:
self.w.addItem(method)
layout.addWidget(self.w, 11, 1)
line = QtWidgets.QFrame()
line.setFrameShape(QtWidgets.QFrame.VLine)
line.setFrameShadow(QtWidgets.QFrame.Sunken)
layout.addWidget(line, 1, 2, 11, 1)
lbl_hP = QtWidgets.QLabel()
lbl_hP.setAlignment(QtCore.Qt.AlignCenter)
lbl_hP.setText(QtWidgets.QApplication.translate(
"pychemqt", "High Pressure Corrections"))
layout.addWidget(lbl_hP, 2, 3)
self.Corr_RhoL = QtWidgets.QComboBox()
for method in Componente.METHODS_RhoLP:
self.Corr_RhoL.addItem(method)
layout.addWidget(self.Corr_RhoL, 4, 3)
self.Corr_MuL = QtWidgets.QComboBox()
for method in Componente.METHODS_MuLP:
self.Corr_MuL.addItem(method)
layout.addWidget(self.Corr_MuL, 5, 3)
self.Corr_MuG = QtWidgets.QComboBox()
for method in Componente.METHODS_MuGP:
self.Corr_MuG.addItem(method)
layout.addWidget(self.Corr_MuG, 6, 3)
self.Corr_ThCondL = QtWidgets.QComboBox()
for method in Componente.METHODS_ThLP:
self.Corr_ThCondL.addItem(method)
layout.addWidget(self.Corr_ThCondL, 8, 3)
self.Corr_ThCondG = QtWidgets.QComboBox()
for method in Componente.METHODS_ThGP:
self.Corr_ThCondG.addItem(method)
layout.addWidget(self.Corr_ThCondG, 9, 3)
line = QtWidgets.QFrame()
line.setFrameShape(QtWidgets.QFrame.VLine)
line.setFrameShadow(QtWidgets.QFrame.Sunken)
layout.addWidget(line, 0, 4, 12, 1)
lbl_Mix = QtWidgets.QLabel()
lbl_Mix.setAlignment(QtCore.Qt.AlignCenter)
lbl_Mix.setText(QtWidgets.QApplication.translate(
"pychemqt", "Mixture Fluid Correlations"))
layout.addWidget(lbl_Mix, 0, 5, 1, 3)
self.RhoLMix = QtWidgets.QComboBox()
for method in Mezcla.METHODS_RhoL:
self.RhoLMix.addItem(method)
layout.addWidget(self.RhoLMix, 4, 5)
self.MuLMix = QtWidgets.QComboBox()
for method in Mezcla.METHODS_MuL:
self.MuLMix.addItem(method)
layout.addWidget(self.MuLMix, 5, 5)
self.MuGMix = QtWidgets.QComboBox()
for method in Mezcla.METHODS_MuG:
self.MuGMix.addItem(method)
layout.addWidget(self.MuGMix, 6, 5)
self.ThLMix = QtWidgets.QComboBox()
for method in Mezcla.METHODS_ThL:
self.ThLMix.addItem(method)
layout.addWidget(self.ThLMix, 8, 5)
self.ThGMix = QtWidgets.QComboBox()
for method in Mezcla.METHODS_ThG:
self.ThGMix.addItem(method)
layout.addWidget(self.ThGMix, 9, 5)
line = QtWidgets.QFrame()
line.setFrameShape(QtWidgets.QFrame.VLine)
line.setFrameShadow(QtWidgets.QFrame.Sunken)
layout.addWidget(line, 1, 6, 11, 1)
lbl_hPMix = QtWidgets.QLabel()
lbl_hPMix.setAlignment(QtCore.Qt.AlignCenter)
lbl_hPMix.setText(QtWidgets.QApplication.translate(
"pychemqt", "High Pressure Corrections"))
layout.addWidget(lbl_hPMix, 2, 7)
self.Corr_RhoLMix = QtWidgets.QComboBox()
for method in Mezcla.METHODS_RhoLP:
self.Corr_RhoLMix.addItem(method)
layout.addWidget(self.Corr_RhoLMix, 4, 7)
self.Corr_MuGMix = QtWidgets.QComboBox()
for method in Mezcla.METHODS_MuGP:
self.Corr_MuGMix.addItem(method)
layout.addWidget(self.Corr_MuGMix, 6, 7)
self.Corr_ThGMix = QtWidgets.QComboBox()
for method in Mezcla.METHODS_ThGP:
self.Corr_ThGMix.addItem(method)
layout.addWidget(self.Corr_ThGMix, 9, 7)
layout.addItem(QtWidgets.QSpacerItem(
10, 10, QtWidgets.QSizePolicy.Fixed,
QtWidgets.QSizePolicy.Fixed), 12, 0)
self.rhoLEoS = QtWidgets.QCheckBox(QtWidgets.QApplication.translate(
"pychemqt", "Use liquid density from EoS if available"))
layout.addWidget(self.rhoLEoS, 13, 0, 1, 8)
layout.addItem(QtWidgets.QSpacerItem(
10, 10, QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding), 15, 8)
if config and config.has_section("Transport"):
self.RhoL.setCurrentIndex(config.getint("Transport", "RhoL"))
self.Corr_RhoL.setCurrentIndex(
config.getint("Transport", "Corr_RhoL"))
self.MuL.setCurrentIndex(config.getint("Transport", "MuL"))
self.Corr_MuL.setCurrentIndex(
config.getint("Transport", "Corr_MuL"))
self.Corr_MuG.setCurrentIndex(
config.getint("Transport", "Corr_MuG"))
self.MuG.setCurrentIndex(config.getint("Transport", "MuG"))
self.Tension.setCurrentIndex(config.getint("Transport", "Tension"))
self.ThCondL.setCurrentIndex(config.getint("Transport", "ThCondL"))
self.Corr_ThCondL.setCurrentIndex(
config.getint("Transport", "Corr_ThCondL"))
self.Corr_ThCondG.setCurrentIndex(
config.getint("Transport", "Corr_ThCondG"))
self.ThCondG.setCurrentIndex(config.getint("Transport", "ThCondG"))
self.Pv.setCurrentIndex(config.getint("Transport", "Pv"))
self.w.setCurrentIndex(config.getint("Transport", "f_acent"))
self.RhoLMix.setCurrentIndex(config.getint("Transport", "RhoLMix"))
self.Corr_RhoLMix.setCurrentIndex(
config.getint("Transport", "Corr_RhoLMix"))
self.MuLMix.setCurrentIndex(config.getint("Transport", "MuLMix"))
self.MuGMix.setCurrentIndex(config.getint("Transport", "MuGMix"))
self.Corr_MuGMix.setCurrentIndex(
config.getint("Transport", "Corr_MuGMix"))
self.ThLMix.setCurrentIndex(
config.getint("Transport", "ThCondLMix"))
self.ThGMix.setCurrentIndex(
config.getint("Transport", "ThCondGMix"))
self.Corr_ThGMix.setCurrentIndex(
config.getint("Transport", "Corr_ThCondGMix"))
self.rhoLEoS.setChecked(config.getboolean("Transport", "RhoLEoS"))
def value(self, config):
"""Function to wizard result"""
if not config.has_section("Transport"):
config.add_section("Transport")
config.set("Transport", "RhoL", str(self.RhoL.currentIndex()))
config.set("Transport", "Corr_RhoL",
str(self.Corr_RhoL.currentIndex()))
config.set("Transport", "MuL", str(self.MuL.currentIndex()))
config.set("Transport", "Corr_MuL", str(self.Corr_MuL.currentIndex()))
config.set("Transport", "MuG", str(self.MuG.currentIndex()))
config.set("Transport", "Corr_MuG", str(self.Corr_MuG.currentIndex()))
config.set("Transport", "Tension", str(self.Tension.currentIndex()))
config.set("Transport", "ThCondL", str(self.ThCondL.currentIndex()))
config.set("Transport", "Corr_ThCondL",
str(self.Corr_ThCondL.currentIndex()))
config.set("Transport", "ThCondG", str(self.ThCondG.currentIndex()))
config.set("Transport", "Corr_ThCondG",
str(self.Corr_ThCondG.currentIndex()))
config.set("Transport", "Pv", str(self.Pv.currentIndex()))
config.set("Transport", "f_acent", str(self.w.currentIndex()))
config.set("Transport", "RhoLMix", str(self.RhoLMix.currentIndex()))
config.set("Transport", "Corr_RhoLMix",
str(self.Corr_RhoLMix.currentIndex()))
config.set("Transport", "MuGMix", str(self.MuGMix.currentIndex()))
config.set("Transport", "Corr_MuGMix",
str(self.Corr_MuGMix.currentIndex()))
config.set("Transport", "MuLMix", str(self.MuLMix.currentIndex()))
config.set("Transport", "ThCondLMix", str(self.ThLMix.currentIndex()))
config.set("Transport", "ThCondGMix", str(self.ThGMix.currentIndex()))
config.set("Transport", "Corr_ThCondGMix",
str(self.Corr_ThGMix.currentIndex()))
config.set("Transport", "RhoLEoS", str(self.rhoLEoS.isChecked()))
return config
@classmethod
def default(cls, config):
config.add_section("Transport")
config.set("Transport", "RhoL", "0")
config.set("Transport", "Corr_RhoL", "0")
config.set("Transport", "MuL", "0")
config.set("Transport", "Corr_MuL", "0")
config.set("Transport", "MuG", "0")
config.set("Transport", "Tension", "0")
config.set("Transport", "ThCondL", "0")
config.set("Transport", "Corr_ThCondL", "0")
config.set("Transport", "ThCondG", "0")
config.set("Transport", "Pv", "0")
config.set("Transport", "f_acent", "0")
config.set("Transport", "RhoLMix", "0")
config.set("Transport", "Corr_RhoLMix", "0")
config.set("Transport", "MuLMix", "0")
config.set("Transport", "MuGMix", "0")
config.set("Transport", "Corr_MuGMix", "0")
config.set("Transport", "ThCondLMix", "0")
config.set("Transport", "ThCondGMix", "0")
config.set("Transport", "Corr_ThCondGMix", "0")
config.set("Transport", "RhoLEoS", "False")
return config
class Dialog(QtWidgets.QDialog):
"""Transport properties dialog"""
def __init__(self, config=None, parent=None):
super(Dialog, self).__init__(parent)
self.setWindowTitle(QtWidgets.QApplication.translate(
"pychemqt", "Transport Properties Methods"))
layout = QtWidgets.QVBoxLayout(self)
self.datos = UI_confTransport_widget(config)
layout.addWidget(self.datos)
btnBox = QtWidgets.QDialogButtonBox(
QtWidgets.QDialogButtonBox.Cancel | QtWidgets.QDialogButtonBox.Ok)
btnBox.accepted.connect(self.accept)
btnBox.rejected.connect(self.reject)
layout.addWidget(btnBox)
def value(self, config):
"""Function to wizard result"""
config = self.datos.value(config)
return config
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Dialog = Dialog()
Dialog.show()
sys.exit(app.exec_())
|
jjgomera/pychemqt
|
tools/UI_confTransport.py
|
Python
|
gpl-3.0
| 14,642
|
# This file is part of Tryton. The COPYRIGHT file at the top level of
# this repository contains the full copyright notices and license terms.
from trytond.pool import Pool
from .product import *
from .move import *
def register():
Pool.register(
Template,
Product,
Move,
module='product_cost_fifo', type_='model')
|
kret0s/gnuhealth-live
|
tryton/server/trytond-3.8.3/trytond/modules/product_cost_fifo/__init__.py
|
Python
|
gpl-3.0
| 354
|