code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
'''
Support for Postfix
This module is currently little more than a config file viewer and editor. It
is able to read the master.cf file (which is one style) and files in the style
of main.cf (which is a different style, that is used in multiple postfix
configuration files).
The design of this module is such that when files are edited, a minimum of
changes are made to them. Each file should look as if it has been edited by
hand; order, comments and whitespace are all preserved.
'''
from __future__ import absolute_import
# Import python libs
import re
import logging
# Import salt libs
import salt.utils
SWWS = re.compile(r'^\s')
log = logging.getLogger(__name__)
MAIN_CF = '/etc/postfix/main.cf'
MASTER_CF = '/etc/postfix/master.cf'
def __virtual__():
'''
Only load the module if Postfix is installed
'''
if salt.utils.which('postfix'):
return True
return False
def _parse_master(path=MASTER_CF):
'''
Parse the master.cf file. This file is essentially a whitespace-delimited
columnar file. The columns are: service, type, private (yes), unpriv (yes),
chroot (yes), wakeup (never), maxproc (100), command + args.
This function parses out the columns, leaving empty lines and comments
intact. Where the value doesn't detract from the default, a dash (-) will
be used.
Returns a dict of the active config lines, and a list of the entire file,
in order. These compliment each other.
'''
with salt.utils.fopen(path, 'r') as fh_:
full_conf = fh_.read()
# Condense the file based on line continuations, but keep order, comments
# and whitespace
conf_list = []
conf_dict = {}
for line in full_conf.splitlines():
if not line.strip() or line.strip().startswith('#'):
conf_list.append(line)
continue
comps = line.strip().split()
conf_line = {
'service': comps[0],
'conn_type': comps[1],
'private': comps[2],
'unpriv': comps[3],
'chroot': comps[4],
'wakeup': comps[5],
'maxproc': comps[6],
'command': ' '.join(comps[7:]),
}
dict_key = '{0} {1}'.format(comps[0], comps[1])
conf_list.append(conf_line)
conf_dict[dict_key] = conf_line
return conf_dict, conf_list
def show_master(path=MASTER_CF):
'''
Return a dict of active config values. This does not include comments,
spacing or order.
The data returned from this function should not be used for direct
modification of the main.cf file; other functions are available for that.
CLI Examples:
salt <minion> postfix.show_master
salt <minion> postfix.show_master path=/path/to/master.cf
'''
conf_dict, conf_list = _parse_master(path) # pylint: disable=W0612
return conf_dict
def set_master(service,
conn_type,
private='y',
unpriv='y',
chroot='y',
wakeup='n',
maxproc='100',
command='',
write_conf=True,
path=MASTER_CF):
'''
Set a single config value in the master.cf file. If the value does not
already exist, it will be appended to the end.
Because of shell parsing issues, '-' cannot be set as a value, as is normal
in the master.cf file; either 'y', 'n' or a number should be used when
calling this function from the command line. If the value used matches the
default, it will internally be converted to a '-'. Calling this function
from the Python API is not affected by this limitation
The settings and their default values, in order, are: service (required),
conn_type (required), private (y), unpriv (y), chroot (y), wakeup (n),
maxproc (100), command (required).
By default, this function will write out the changes to the master.cf file,
and then returns the full contents of the file. By setting the
``write_conf`` option to ``False``, it will skip writing the file.
CLI Example:
salt <minion> postfix.set_master smtp inet n y n n 100 smtpd
'''
conf_dict, conf_list = _parse_master(path)
new_conf = []
dict_key = '{0} {1}'.format(service, conn_type)
new_line = _format_master(
service,
conn_type,
private,
unpriv,
chroot,
wakeup,
maxproc,
command,
)
for line in conf_list:
if isinstance(line, dict):
if line['service'] == service and line['conn_type'] == conn_type:
# This is the one line that we're changing
new_conf.append(new_line)
else:
# No changes to this line, but it still needs to be
# formatted properly
new_conf.append(_format_master(**line))
else:
# This line is a comment or is empty
new_conf.append(line)
if dict_key not in conf_dict:
# This config value does not exist, so append it to the end
new_conf.append(new_line)
if write_conf:
_write_conf(new_conf, path)
return '\n'.join(new_conf)
def _format_master(service,
conn_type,
private,
unpriv,
chroot,
wakeup,
maxproc,
command):
'''
Format the given values into the style of line normally used in the
master.cf file.
'''
#==========================================================================
#service type private unpriv chroot wakeup maxproc command + args
# (yes) (yes) (yes) (never) (100)
#==========================================================================
#smtp inet n - n - - smtpd
if private == 'y':
private = '-'
if unpriv == 'y':
unpriv = '-'
if chroot == 'y':
chroot = '-'
if wakeup == 'n':
wakeup = '-'
maxproc = str(maxproc)
if maxproc == '100':
maxproc = '-'
conf_line = '{0:9s} {1:5s} {2:7s} {3:7s} {4:7s} {5:7s} {6:7s} {7}'.format(
service,
conn_type,
private,
unpriv,
chroot,
wakeup,
maxproc,
command,
)
#print(conf_line)
return conf_line
def _parse_main(path=MAIN_CF):
'''
Parse files in the style of main.cf. This is not just a "name = value" file;
there are other rules:
* Comments start with #
* Any whitespace at the beginning of a line denotes that that line is a
continuation from the previous line.
* The whitespace rule applies to comments.
* Keys defined in the file may be referred to as variables further down in
the file.
'''
with salt.utils.fopen(path, 'r') as fh_:
full_conf = fh_.read()
# Condense the file based on line continuations, but keep order, comments
# and whitespace
conf_list = []
for line in full_conf.splitlines():
if not line.strip():
conf_list.append(line)
continue
if re.match(SWWS, line):
if not conf_list:
# This should only happen at the top of the file
conf_list.append(line)
continue
if not isinstance(conf_list[-1], str):
conf_list[-1] = ''
# This line is a continuation of the previous line
conf_list[-1] = '\n'.join([conf_list[-1], line])
else:
conf_list.append(line)
# Extract just the actual key/value pairs
pairs = {}
for line in conf_list:
if not line.strip():
continue
if line.startswith('#'):
continue
comps = line.split('=')
pairs[comps[0].strip()] = '='.join(comps[1:]).strip()
# Return both sets of data, they compliment each other elsewhere
return pairs, conf_list
def show_main(path=MAIN_CF):
'''
Return a dict of active config values. This does not include comments,
spacing or order. Bear in mind that order is functionally important in the
main.cf file, since keys can be referred to as variables. This means that
the data returned from this function should not be used for direct
modification of the main.cf file; other functions are available for that.
CLI Examples:
salt <minion> postfix.show_main
salt <minion> postfix.show_main path=/path/to/main.cf
'''
pairs, conf_list = _parse_main(path) # pylint: disable=W0612
return pairs
def set_main(key, value, path=MAIN_CF):
'''
Set a single config value in the main.cf file. If the value does not already
exist, it will be appended to the end.
CLI Example:
salt <minion> postfix.set_main mailq_path /usr/bin/mailq
'''
pairs, conf_list = _parse_main(path)
new_conf = []
if key in pairs:
for line in conf_list:
if line.startswith(key):
new_conf.append('{0} = {1}'.format(key, value))
else:
new_conf.append(line)
else:
conf_list.append('{0} = {1}'.format(key, value))
new_conf = conf_list
_write_conf(new_conf, path)
return new_conf
def _write_conf(conf, path=MAIN_CF):
'''
Write out configuration file.
'''
with salt.utils.fopen(path, 'w') as fh_:
for line in conf:
if isinstance(line, dict):
fh_.write(' '.join(line))
else:
fh_.write(line)
fh_.write('\n')
def show_queue():
'''
Show contents of the mail queue
CLI Example:
.. code-block:: bash
salt '*' postfix.show_queue
'''
cmd = 'mailq'
out = __salt__['cmd.run'](cmd).splitlines()
queue = []
queue_pattern = re.compile(r"(?P<queue_id>^[A-Z0-9]+)\s+(?P<size>\d+)\s(?P<timestamp>\w{3}\s\w{3}\s\d{1,2}\s\d{2}\:\d{2}\:\d{2})\s+(?P<sender>.+)")
recipient_pattern = re.compile(r"^\s+(?P<recipient>.+)")
for line in out:
if re.match('^[-|postqueue:|Mail]', line):
# discard in-queue wrapper
continue
if re.match(queue_pattern, line):
m = re.match(queue_pattern, line)
queue_id = m.group('queue_id')
size = m.group('size')
timestamp = m.group('timestamp')
sender = m.group('sender')
elif re.match(recipient_pattern, line): # recipient/s
m = re.match(recipient_pattern, line)
recipient = m.group('recipient')
elif not line: # end of record
queue.append({'queue_id': queue_id, 'size': size, 'timestamp': timestamp, 'sender': sender, 'recipient': recipient})
return queue
def delete(queue_id):
'''
Delete message(s) from the mail queue
CLI Example:
.. code-block:: bash
salt '*' postfix.delete 5C33CA0DEA
salt '*' postfix.delete ALL
'''
ret = {'message': '',
'result': True
}
if not queue_id:
log.error('Require argument queue_id')
if not queue_id == 'ALL':
queue = show_queue()
_message = None
for item in queue:
if item['queue_id'] == queue_id:
_message = item
if not _message:
ret['message'] = 'No message in queue with ID {0}'.format(queue_id)
ret['result'] = False
return ret
cmd = 'postsuper -d {0}'.format(queue_id)
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] == 0:
if queue_id == 'ALL':
ret['message'] = 'Successfully removed all messages'
else:
ret['message'] = 'Successfully removed message with queue id {0}'.format(queue_id)
else:
if queue_id == 'ALL':
ret['message'] = 'Unable to removed all messages'
else:
ret['message'] = 'Unable to remove message with queue id {0}: {1}'.format(queue_id, result['stderr'])
return ret
def hold(queue_id):
'''
Put message(s) on hold from the mail queue
CLI Example:
.. code-block:: bash
salt '*' postfix.hold 5C33CA0DEA
salt '*' postfix.hold ALL
'''
ret = {'message': '',
'result': True
}
if not queue_id:
log.error('Require argument queue_id')
if not queue_id == 'ALL':
queue = show_queue()
_message = None
for item in queue:
if item['queue_id'] == queue_id:
_message = item
if not _message:
ret['message'] = 'No message in queue with ID {0}'.format(queue_id)
ret['result'] = False
return ret
cmd = 'postsuper -h {0}'.format(queue_id)
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] == 0:
if queue_id == 'ALL':
ret['message'] = 'Successfully placed all messages on hold'
else:
ret['message'] = 'Successfully placed message on hold with queue id {0}'.format(queue_id)
else:
if queue_id == 'ALL':
ret['message'] = 'Unable to place all messages on hold'
else:
ret['message'] = 'Unable to place message on hold with queue id {0}: {1}'.format(queue_id, result['stderr'])
return ret
def unhold(queue_id):
'''
Set held message(s) in the mail queue to unheld
CLI Example:
.. code-block:: bash
salt '*' postfix.unhold 5C33CA0DEA
salt '*' postfix.unhold ALL
'''
ret = {'message': '',
'result': True
}
if not queue_id:
log.error('Require argument queue_id')
if not queue_id == 'ALL':
queue = show_queue()
_message = None
for item in queue:
if item['queue_id'] == queue_id:
_message = item
if not _message:
ret['message'] = 'No message in queue with ID {0}'.format(queue_id)
ret['result'] = False
return ret
cmd = 'postsuper -H {0}'.format(queue_id)
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] == 0:
if queue_id == 'ALL':
ret['message'] = 'Successfully set all message as unheld'
else:
ret['message'] = 'Successfully set message as unheld with queue id {0}'.format(queue_id)
else:
if queue_id == 'ALL':
ret['message'] = 'Unable to set all message as unheld.'
else:
ret['message'] = 'Unable to set message as unheld with queue id {0}: {1}'.format(queue_id, result['stderr'])
return ret
def requeue(queue_id):
'''
Requeue message(s) in the mail queue
CLI Example:
.. code-block:: bash
salt '*' postfix.requeue 5C33CA0DEA
salt '*' postfix.requeue ALL
'''
ret = {'message': '',
'result': True
}
if not queue_id:
log.error('Required argument queue_id')
if not queue_id == 'ALL':
queue = show_queue()
_message = None
for item in queue:
if item['queue_id'] == queue_id:
_message = item
if not _message:
ret['message'] = 'No message in queue with ID {0}'.format(queue_id)
ret['result'] = False
return ret
cmd = 'postsuper -r {0}'.format(queue_id)
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] == 0:
if queue_id == 'ALL':
ret['message'] = 'Successfully requeued all messages'
else:
ret['message'] = 'Successfully requeued message with queue id {0}'.format(queue_id)
else:
if queue_id == 'ALL':
ret['message'] = 'Unable to requeue all messages'
else:
ret['message'] = 'Unable to requeue message with queue id {0}: {1}'.format(queue_id, result['stderr'])
return ret
| smallyear/linuxLearn | salt/salt/modules/postfix.py | Python | apache-2.0 | 15,966 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import threading
import pooler
from osv import osv, fields
class check_paid_wizard(osv.osv_memory):
_name = 'check.paid.wizard'
_description = 'Check payments in orders'
def _check_paid(self, cr, uid, ids, context=None):
"""
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of IDs selected
@param context: A standard dictionary
"""
line_obj = self.pool.get('payment.line')
#As this function is in a new thread, i need to open a new cursor, because the old one may be closed
new_cr = pooler.get_db(cr.dbname).cursor()
for line in self.browse(new_cr, uid, ids, context=context):
line_obj.check_paid(new_cr, uid, automatic=False, use_new_cursor=new_cr.dbname,\
context=context)
#close the new cursor
new_cr.close()
return {}
def check_paid(self, cr, uid, ids, context=None):
"""
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of IDs selected
@param context: A standard dictionary
"""
threaded_calculation = threading.Thread(target=self._check_paid, args=(cr, uid, ids, context))
threaded_calculation.start()
return {'type': 'ir.actions.act_window_close'}
check_paid_wizard()
| pablocm-aserti/l10n_es_gestion_comercial_v8_WIP | l10n_es_gestion_comercial/wizard/check_paid.py | Python | agpl-3.0 | 2,454 |
#!/usr/bin/env python
from app import app
from flask_script import Manager
manager = Manager(app)
if __name__ == "__main__":
manager.run()
| aitoehigie/gidimagic | manage.py | Python | mit | 143 |
# Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import binascii
import unittest
from nose.tools import *
import struct
from ryu import exception
from ryu.ofproto import ofproto_common, ofproto_parser
from ryu.ofproto import ofproto_v1_0, ofproto_v1_0_parser
import logging
LOG = logging.getLogger(__name__)
class TestOfproto_Parser(unittest.TestCase):
def setUp(self):
LOG.debug('setUp')
self.bufHello = binascii.unhexlify('0100000800000001')
fr = '010600b0000000020000000000000abc' \
+ '00000100010000000000008700000fff' \
+ '0002aefa39d2b9177472656d61302d30' \
+ '00000000000000000000000000000000' \
+ '000000c0000000000000000000000000' \
+ 'fffe723f9a764cc87673775f30786162' \
+ '63000000000000000000000100000001' \
+ '00000082000000000000000000000000' \
+ '00012200d6c5a1947472656d61312d30' \
+ '00000000000000000000000000000000' \
+ '000000c0000000000000000000000000'
self.bufFeaturesReply = binascii.unhexlify(fr)
pi = '010a005200000000000001010040' \
+ '00020000000000000002000000000001' \
+ '080045000032000000004011f967c0a8' \
+ '0001c0a8000200010001001e00000000' \
+ '00000000000000000000000000000000' \
+ '00000000'
self.bufPacketIn = binascii.unhexlify(pi)
def tearDown(self):
LOG.debug('tearDown')
pass
def testHello(self):
(version,
msg_type,
msg_len,
xid) = ofproto_parser.header(self.bufHello)
eq_(version, 1)
eq_(msg_type, 0)
eq_(msg_len, 8)
eq_(xid, 1)
def testFeaturesReply(self):
(version,
msg_type,
msg_len,
xid) = ofproto_parser.header(self.bufFeaturesReply)
msg = ofproto_parser.msg(self,
version,
msg_type,
msg_len,
xid,
self.bufFeaturesReply)
LOG.debug(msg)
ok_(isinstance(msg, ofproto_v1_0_parser.OFPSwitchFeatures))
LOG.debug(msg.ports[65534])
ok_(isinstance(msg.ports[1], ofproto_v1_0_parser.OFPPhyPort))
ok_(isinstance(msg.ports[2], ofproto_v1_0_parser.OFPPhyPort))
ok_(isinstance(msg.ports[65534], ofproto_v1_0_parser.OFPPhyPort))
def testPacketIn(self):
(version,
msg_type,
msg_len,
xid) = ofproto_parser.header(self.bufPacketIn)
msg = ofproto_parser.msg(self,
version,
msg_type,
msg_len,
xid,
self.bufPacketIn)
LOG.debug(msg)
ok_(isinstance(msg, ofproto_v1_0_parser.OFPPacketIn))
@raises(AssertionError)
def test_check_msg_len(self):
(version,
msg_type,
msg_len,
xid) = ofproto_parser.header(self.bufPacketIn)
msg_len = len(self.bufPacketIn) + 1
ofproto_parser.msg(self,
version,
msg_type,
msg_len,
xid,
self.bufPacketIn)
@raises(exception.OFPUnknownVersion)
def test_check_msg_parser(self):
(version,
msg_type,
msg_len,
xid) = ofproto_parser.header(self.bufPacketIn)
version = 0xff
ofproto_parser.msg(self,
version,
msg_type,
msg_len,
xid,
self.bufPacketIn)
class TestMsgBase(unittest.TestCase):
""" Test case for ofproto_parser.MsgBase
"""
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
pass
def test_set_xid(self):
xid = 3841413783
c = ofproto_parser.MsgBase(object)
c.set_xid(xid)
eq_(xid, c.xid)
@raises(AssertionError)
def test_set_xid_check_xid(self):
xid = 2160492514
c = ofproto_parser.MsgBase(object)
c.xid = xid
c.set_xid(xid)
def _test_parser(self, msg_type=ofproto_v1_0.OFPT_HELLO):
version = ofproto_v1_0.OFP_VERSION
msg_len = ofproto_v1_0.OFP_HEADER_SIZE
xid = 2183948390
data = '\x00\x01\x02\x03'
fmt = ofproto_v1_0.OFP_HEADER_PACK_STR
buf = struct.pack(fmt, version, msg_type, msg_len, xid) \
+ data
res = ofproto_v1_0_parser.OFPHello.parser(
object, version, msg_type, msg_len, xid, bytearray(buf))
eq_(version, res.version)
eq_(msg_type, res.msg_type)
eq_(msg_len, res.msg_len)
eq_(xid, res.xid)
eq_(buffer(buf), res.buf)
# test __str__()
list_ = ('version:', 'msg_type', 'xid')
check = {}
str_ = str(res)
str_ = str_.rsplit()
i = 0
for s in str_:
if s in list_:
check[str_[i]] = str_[i + 1]
i += 1
eq_(hex(ofproto_v1_0.OFP_VERSION).find(check['version:']), 0)
eq_(hex(ofproto_v1_0.OFPT_HELLO).find(check['msg_type']), 0)
eq_(hex(xid).find(check['xid']), 0)
return True
def test_parser(self):
ok_(self._test_parser())
@raises(AssertionError)
def test_parser_check_msg_type(self):
self._test_parser(ofproto_v1_0.OFPT_ERROR)
def _test_serialize(self):
class Datapath(object):
ofproto = ofproto_v1_0
ofproto_parser = ofproto_v1_0_parser
c = ofproto_v1_0_parser.OFPHello(Datapath)
c.serialize()
eq_(ofproto_v1_0.OFP_VERSION, c.version)
eq_(ofproto_v1_0.OFPT_HELLO, c.msg_type)
eq_(0, c.xid)
return True
def test_serialize(self):
ok_(self._test_serialize())
class TestMsgStrAttr(unittest.TestCase):
""" Test case for ofproto_parser.msg_str_attr
"""
def test_msg_str_attr(self):
class Check(object):
check = 'msg_str_attr_test'
c = Check()
buf = ''
res = ofproto_parser.msg_str_attr(c, buf, ('check',))
str_ = str(res)
str_ = str_.rsplit()
eq_('check', str_[0])
eq_('msg_str_attr_test', str_[1])
| haniehrajabi/ryu | ryu/tests/unit/ofproto/test_ofproto_parser.py | Python | apache-2.0 | 7,100 |
"""Module grouping tests for the bodemclassificatie search module."""
from owslib.fes import PropertyIsEqualTo
from pydov.search.bodemclassificatie import BodemclassificatieSearch
from pydov.types.bodemclassificatie import Bodemclassificatie
from pydov.util.dovutil import build_dov_url
from tests.abstract import AbstractTestSearch
location_md_metadata = 'tests/data/types/bodemclassificatie/md_metadata.xml'
location_fc_featurecatalogue = \
'tests/data/types/bodemclassificatie/fc_featurecatalogue.xml'
location_wfs_describefeaturetype = \
'tests/data/types/bodemclassificatie/wfsdescribefeaturetype.xml'
location_wfs_getfeature = 'tests/data/types/bodemclassificatie/wfsgetfeature.xml'
location_wfs_feature = 'tests/data/types/bodemclassificatie/feature.xml'
location_dov_xml = 'tests/data/types/bodemclassificatie/bodemclassificatie.xml'
location_xsd_base = 'tests/data/types/bodemclassificatie/xsd_*.xml'
class TestBodemclassificatieSearch(AbstractTestSearch):
search_instance = BodemclassificatieSearch()
datatype_class = Bodemclassificatie
valid_query_single = PropertyIsEqualTo(
propertyname='pkey_bodemclassificatie',
literal=build_dov_url('data/belgischebodemclassificatie/2018-000146'))
inexistent_field = 'onbestaand'
wfs_field = 'bodemtype'
xml_field = None
valid_returnfields = ('pkey_bodemclassificatie', 'bodemtype',
'classificatietype')
valid_returnfields_subtype = None
valid_returnfields_extra = None
df_default_columns = [
'pkey_bodemclassificatie', 'pkey_bodemlocatie', 'x', 'y', 'mv_mtaw',
'classificatietype', 'bodemtype', 'auteurs'
]
| DOV-Vlaanderen/pydov | tests/test_search_bodemclassificatie.py | Python | mit | 1,677 |
# pyca.simple
# One dimensional Cellular Automata
#
# Author: Benjamin Bengfort <benjamin@bengfort.com>
# Created: Fri Jan 31 10:49:41 2014 -0500
#
# Copyright (C) 2014 Bengfort.com
# For license information, see LICENSE.txt
#
# ID: simple.py [] benjamin@bengfort.com $
"""
Space-Time animation for one dimensional cellular automata.
"""
##########################################################################
## Imports
##########################################################################
import numpy as np
##########################################################################
## Automata
##########################################################################
class Automata(object):
def __init__(self, rule, width=100, height=100, randstart=False):
self.width = width
self.height = height
self.rule = rule
self.time = 0
self.init_world(randstart)
@property
def rule(self):
return self._rule
@rule.setter
def rule(self, rule):
if isinstance(rule, basestring):
rule = int(rule)
if isinstance(rule, int):
self._rule = [(rule/pow(2,i) % 2) for i in range(8)]
else:
self._rule = rule
@property
def shape(self):
return (self.height, self.width)
def init_world(self, randstart=False):
self.world = np.zeros(shape=self.shape)
if randstart:
self.world[0] = np.random.choice((0,1), self.width, p=(0.2, 0.8))
else:
self.world[0,self.width/2] = 1
return self.world
def compute_states(self, state):
N = self.width
for j in range(N):
left = state[(j-1)%N]
cell = state[j]
right = state[(j+1)%N]
yield self.rule[int(4 * left + 2*cell + right )]
def __len__(self):
return self.width*self.height
def __iter__(self):
return self
def __next__(self):
# Get state at current time, then increment time
state = self.world[self.time]
self.time += 1
# Halting condition
if self.time >= self.height:
raise StopIteration()
# Calculate the world at this timestep
self.world[self.time] = np.array(list(self.compute_states(state)))
return self.world[self.time]
next = __next__
if __name__ == '__main__':
from animation import AutomataAnimation
#automata = Automata(110, width=1280, height=720, randstart=False)
automata = Automata(110, width=100, height=100, randstart=True)
animation = AutomataAnimation(automata)
animation.show()
| bbengfort/cellular-automata | pyca/simple.py | Python | mit | 2,656 |
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
# Standard imports
from future import standard_library
standard_library.install_aliases()
from builtins import *
import unittest
import datetime as pydt
import logging
import pymongo
import json
import bson.json_util as bju
import pandas as pd
from uuid import UUID
# Our imports
import emission.analysis.intake.cleaning.filter_accuracy as eaicf
import emission.storage.timeseries.abstract_timeseries as esta
import emission.storage.pipeline_queries as epq
import emission.tests.common as etc
class TestFilterAccuracy(unittest.TestCase):
def setUp(self):
# We need to access the database directly sometimes in order to
# forcibly insert entries for the tests to pass. But we put the import
# in here to reduce the temptation to use the database directly elsewhere.
import emission.core.get_database as edb
import uuid
self.testUUID = UUID('079e0f1a-c440-3d7c-b0e7-de160f748e35')
with open("emission/tests/data/smoothing_data/tablet_2015-11-03") as fp:
self.entries = json.load(fp,
object_hook=bju.object_hook)
tsdb = edb.get_timeseries_db()
for entry in self.entries:
entry["user_id"] = self.testUUID
tsdb.insert_one(entry)
self.ts = esta.TimeSeries.get_time_series(self.testUUID)
def tearDown(self):
import emission.core.get_database as edb
edb.get_timeseries_db().delete_many({"user_id": self.testUUID})
edb.get_pipeline_state_db().delete_many({"user_id": self.testUUID})
def testEmptyCallToPriorDuplicate(self):
time_query = epq.get_time_range_for_accuracy_filtering(self.testUUID)
unfiltered_points_df = self.ts.get_data_df("background/location", time_query)
self.assertEqual(len(unfiltered_points_df), 205)
# Check call to check duplicate with a zero length dataframe
entry = unfiltered_points_df.iloc[5]
self.assertEqual(eaicf.check_prior_duplicate(pd.DataFrame(), 0, entry), False)
def testEmptyCall(self):
# Check call to the entire filter accuracy with a zero length timeseries
import emission.core.get_database as edb
edb.get_timeseries_db().remove({"user_id": self.testUUID})
# We expect that this should not throw
eaicf.filter_accuracy(self.testUUID)
self.assertEqual(len(self.ts.get_data_df("background/location")), 0)
def testCheckPriorDuplicate(self):
time_query = epq.get_time_range_for_accuracy_filtering(self.testUUID)
unfiltered_points_df = self.ts.get_data_df("background/location", time_query)
self.assertEqual(len(unfiltered_points_df), 205)
entry = unfiltered_points_df.iloc[5]
unfiltered_appended_df = pd.DataFrame([entry] * 5).append(unfiltered_points_df).reset_index()
logging.debug("unfiltered_appended_df = %s" % unfiltered_appended_df[["fmt_time"]].head())
self.assertEqual(eaicf.check_prior_duplicate(unfiltered_appended_df, 0, entry), False)
self.assertEqual(eaicf.check_prior_duplicate(unfiltered_appended_df, 5, entry), True)
self.assertEqual(eaicf.check_prior_duplicate(unfiltered_points_df, 5, entry), False)
def testConvertToFiltered(self):
time_query = epq.get_time_range_for_accuracy_filtering(self.testUUID)
unfiltered_points_df = self.ts.get_data_df("background/location", time_query)
self.assertEqual(len(unfiltered_points_df), 205)
entry_from_df = unfiltered_points_df.iloc[5]
entry_copy = eaicf.convert_to_filtered(self.ts.get_entry_at_ts("background/location",
"metadata.write_ts",
entry_from_df.metadata_write_ts))
self.assertNotIn("_id", entry_copy)
self.assertEquals(entry_copy["metadata"]["key"], "background/filtered_location")
def testExistingFilteredLocation(self):
time_query = epq.get_time_range_for_accuracy_filtering(self.testUUID)
unfiltered_points_df = self.ts.get_data_df("background/location", time_query)
self.assertEqual(len(unfiltered_points_df), 205)
entry_from_df = unfiltered_points_df.iloc[5]
logging.debug("entry_from_df: data.ts = %s, metadata.ts = %s" %
(entry_from_df.ts, entry_from_df.metadata_write_ts))
self.assertEqual(eaicf.check_existing_filtered_location(self.ts, entry_from_df), False)
entry_copy = self.ts.get_entry_at_ts("background/location", "metadata.write_ts",
entry_from_df.metadata_write_ts)
self.ts.insert(eaicf.convert_to_filtered(entry_copy))
self.assertEqual(eaicf.check_existing_filtered_location(self.ts, entry_from_df), True)
def testFilterAccuracy(self):
unfiltered_points_df = self.ts.get_data_df("background/location", None)
self.assertEqual(len(unfiltered_points_df), 205)
pre_filtered_points_df = self.ts.get_data_df("background/filtered_location", None)
self.assertEqual(len(pre_filtered_points_df), 0)
eaicf.filter_accuracy(self.testUUID)
filtered_points_df = self.ts.get_data_df("background/filtered_location", None)
self.assertEqual(len(filtered_points_df), 124)
if __name__ == '__main__':
etc.configLogging()
unittest.main()
| sunil07t/e-mission-server | emission/tests/analysisTests/intakeTests/TestFilterAccuracy.py | Python | bsd-3-clause | 5,514 |
# Copyright 2014 Google Inc. All Rights Reserved.
"""Command for adding tags to instances."""
import copy
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.compute.lib import base_classes
class InstancesAddTags(base_classes.InstanceTagsMutatorMixin,
base_classes.ReadWriteCommand):
"""Add tags to Google Compute Engine virtual machine instances."""
@staticmethod
def Args(parser):
base_classes.InstanceTagsMutatorMixin.Args(parser)
tags = parser.add_argument(
'--tags',
required=True,
type=arg_parsers.ArgList(min_length=1),
action=arg_parsers.FloatingListValuesCatcher(),
help='A list of tags to attach to the instance.',
metavar='TAG')
tags.detailed_help = """\
Specifies strings to be attached to the instance for later
identifying the instance when adding network firewall rules.
Multiple tags can be attached by repeating this flag.
"""
def Modify(self, args, existing):
new_object = copy.deepcopy(existing)
# Do not re-order the items if the object won't change, or the objects
# will not be considered equal and an unnecessary API call will be made.
new_tags = set(new_object.tags.items + args.tags)
if new_tags != set(new_object.tags.items):
new_object.tags.items = sorted(new_tags)
return new_object
InstancesAddTags.detailed_help = {
'brief': 'Add tags to Google Compute Engine virtual machine instances',
'DESCRIPTION': """\
*{command}* is used to add tags to Google Compute Engine virtual
machine instances. For example, running:
$ {command} example-instance --tags tag-1 tag-2
will add tags ``tag-1'' and ``tag-2'' to 'example-instance'.
Tags can be used to identify the instances when adding network
firewall rules. Tags can also be used to get firewall rules that
already exist to be applied to the instance. See
gcloud_compute_firewall-rules_create(1) for more details.
""",
}
| wemanuel/smry | smry/server-auth/ls/google-cloud-sdk/lib/googlecloudsdk/compute/subcommands/instances/add_tags.py | Python | apache-2.0 | 2,058 |
# Copyright 2013-2015 Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bottle Server Module."""
import logging
import os
import bottle
LOG = logging.getLogger(__name__)
class EventletLogFilter(object): # pylint: disable=R0903
"""Receives eventlet log.write() calls and routes them.
Thread and WSGI messages only get written in debug mode. Eventlet writes
these out starting with a thread id in parens or "wsgi" messages:
- "(46722) wsgi starting up on http://127.0.0.1:8080/"
- "(47001) accepted ('127.0.0.1', 53046)"
- "wsgi exiting"
All other calls are assumed to be apache log-style API calls and we want
these written as INFO and to an access_log if provided.
API calls are assumed to start with the IP address:
- "127.0.0.1 - - [07/Jul/2015 16:16:31] "GET /version HTTP/1.1" 200 ..."
An instance of this class can be passed in to a bottle.run command using
the `log` keyword.
"""
def __init__(self, log, access_log=None):
"""Initialize with config and optional access_log.
:param log: logger instance (ex. logging.getLogger()).
:keyword access_log: a file handle to an access log that should receive
apache-style entries for each call and response.
"""
self.log = log
self.access_log = access_log
def write(self, text):
"""Write to appropriate target."""
if text:
if text[0] in '(w':
# write thread and wsgi messages to debug only
self.log.debug(text[:-1])
return
if self.access_log:
self.access_log.write(text)
self.log.info(text[:-1])
class XEventletServer(bottle.ServerAdapter):
r"""Eventlet Bottle Server Adapter with extensions.
Supports SSL. Accepts additional tuning parameters:
* `backlog` adjust the eventlet backlog parameter which is the maximum
number of queued connections. Should be at least 1; the maximum
value is system-dependent.
* `family`: (default is 2) socket family, optional. See socket
documentation for available families.
* `**kwargs`: directly map to python's ssl.wrap_socket arguments from
https://docs.python.org/2/library/ssl.html#ssl.wrap_socket and
wsgi.server arguments from
http://eventlet.net/doc/modules/wsgi.html#wsgi-wsgi-server
To create a self-signed key and start the eventlet server using SSL::
openssl genrsa -des3 -out server.orig.key 2048
openssl rsa -in server.orig.key -out test.key
openssl req -new -key test.key -out server.csr
openssl x509 -req -days 365 -in server.csr -signkey test.key -out \
test.crt
bottle.run(server='eventlet', keyfile='test.key', certfile='test.crt')
"""
def get_socket(self):
"""Create listener socket based on bottle server parameters."""
import eventlet
# Separate out socket.listen arguments
socket_args = {}
for arg in ('backlog', 'family'):
try:
socket_args[arg] = self.options.pop(arg)
except KeyError:
pass
# Separate out wrap_ssl arguments
ssl_args = {}
for arg in ('keyfile', 'certfile', 'server_side', 'cert_reqs',
'ssl_version', 'ca_certs', 'do_handshake_on_connect',
'suppress_ragged_eofs', 'ciphers'):
try:
ssl_args[arg] = self.options.pop(arg)
except KeyError:
pass
address = (self.host, self.port)
try:
sock = eventlet.listen(address, **socket_args)
except TypeError:
# Fallback, if we have old version of eventlet
sock = eventlet.listen(address)
if ssl_args:
sock = eventlet.wrap_ssl(sock, **ssl_args)
return sock
def run(self, handler):
"""Start bottle server."""
import eventlet.patcher
if not eventlet.patcher.is_monkey_patched(os):
msg = ("%s requires eventlet.monkey_patch() (before "
"import)" % self.__class__.__name__)
raise RuntimeError(msg)
# Separate out wsgi.server arguments
wsgi_args = {}
for arg in ('log', 'environ', 'max_size', 'max_http_version',
'protocol', 'server_event', 'minimum_chunk_size',
'log_x_forwarded_for', 'custom_pool', 'keepalive',
'log_output', 'log_format', 'url_length_limit', 'debug',
'socket_timeout', 'capitalize_response_headers'):
try:
wsgi_args[arg] = self.options.pop(arg)
except KeyError:
pass
if 'log_output' not in wsgi_args:
wsgi_args['log_output'] = not self.quiet
import eventlet.wsgi
sock = self.options.pop('shared_socket', None) or self.get_socket()
eventlet.wsgi.server(sock, handler, **wsgi_args)
def __repr__(self):
"""Show class name, even if subclassed."""
return self.__class__.__name__
bottle.server_names['xeventlet'] = XEventletServer
class XTornadoServer(bottle.ServerAdapter): # pylint: disable=R0903
"""The Tornado Server Adapter with xheaders enabled."""
def run(self, handler):
"""Start up the server."""
import tornado.httpserver
import tornado.ioloop
import tornado.wsgi
container = tornado.wsgi.WSGIContainer(handler)
server = tornado.httpserver.HTTPServer(container, xheaders=True)
server.listen(port=self.port, address=self.host)
tornado.ioloop.IOLoop.instance().start()
bottle.server_names['xtornado'] = XTornadoServer
| ryandub/simpl | simpl/server.py | Python | apache-2.0 | 6,254 |
# Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Train models on MNIST data."""
from lingvo import model_registry
from lingvo.core import base_model_params
from lingvo.tasks.image import classifier
from lingvo.tasks.image import input_generator
class Base(base_model_params.SingleTaskModelParams):
"""Input params for MNIST."""
@property
def path(self):
# Generated using lingvo/tools:keras2ckpt.
return '/tmp/mnist/mnist'
def Train(self):
p = input_generator.MnistTrainInput.Params()
p.ckpt = self.path
return p
def Test(self):
p = input_generator.MnistTestInput.Params()
p.ckpt = self.path
return p
def Dev(self):
return self.Test()
@model_registry.RegisterSingleTaskModel
class LeNet5(Base):
"""LeNet params for MNIST classification."""
BN = False
DROP = 0.2
def Task(self):
p = classifier.ModelV1.Params()
p.name = 'lenet5'
# Overall architecture:
# conv, maxpool, conv, maxpool, fc, softmax.
p.filter_shapes = [(5, 5, 1, 20), (5, 5, 20, 50)]
p.window_shapes = [(2, 2), (2, 2)]
p.batch_norm = self.BN
p.dropout_prob = self.DROP
p.softmax.input_dim = 300
p.softmax.num_classes = 10
p.train.save_interval_seconds = 10 # More frequent checkpoints.
p.eval.samples_per_summary = 0 # Eval the whole set.
return p
| tensorflow/lingvo | lingvo/tasks/image/params/mnist.py | Python | apache-2.0 | 1,996 |
from waitress import serve
from pyramid.config import Configurator
from pyramid.view import view_config
from pyramid.response import Response
from pyramid_socketio.io import SocketIOContext, socketio_manage
@view_config(route_name="root", renderer="root/index.mak")
def index(request):
return dict(title="insights visualizer")
class ConnectIOContext(SocketIOContext):
def msg_connect(self, msg):
print "Connect message received", msg
self.msg('connected', hello='world')
@view_config(route_name='socket.io')
def socketio_service(request):
print('Socket.IO request running')
print request
retval = socketio_manage(ConnectIOContext(request))
return Response('')
if __name__ == '__main__':
import os
here = os.path.dirname(__file__)
settings = {
'mako.directories': [
os.path.abspath(os.path.join(here, 'templates')),
],
}
config = Configurator(settings=settings)
config.add_route('root', '/')
config.add_route('socket.io', 'socket.io/*remaining')
config.add_static_view(name="static", path='frontend/static')
config.scan()
app = config.make_wsgi_app()
serve(app, host='0.0.0.0', port=8888) | thujikun/insights-visualizer | hello.py | Python | mit | 1,223 |
# encoding: utf-8
import sys
sys.path.append('/home/zjd/jmm/JPPCF/')
import os
import numpy as np
import util
from JPPCF import *
import logging
argvs = sys.argv
# We fix the num of latent feature
k = 100
lambd = 0.5
if len(argvs) == 3:
k = int(float(argvs[1]))
lambd = float(argvs[2])
print 'k: ', k, '\tlambda: ', lambd, '\n'
time_interval = 360
filter_threshold = 10
origin_data_path = './data/preprocessed_data/'
data_path = origin_data_path + 'data_divided_by_' + str(time_interval) + '_days/'
filter_data_path = data_path + 'filtered_by_user_doc_like_list_len_' + str(filter_threshold) +'/'
user_id_map = np.loadtxt(filter_data_path + 'user_id_map.dat.txt', int)
doc_id_map = np.loadtxt(filter_data_path + 'doc_id_map.dat.txt', int)
user_time_dist = np.loadtxt(filter_data_path + 'user_time_distribute.dat.txt', int)
doc_time_dist = np.loadtxt(filter_data_path + 'doc_time_distribute.dat.txt', int)
user_time_dict = dict(zip(user_time_dist[:,0], user_time_dist[:,1]))
doc_time_dict = dict(zip(doc_time_dist[:,0], doc_time_dist[:,1]))
user_id_dict = dict(zip(user_id_map[:,0], user_id_map[:,1]))
ruser_id_dict = dict(zip(user_id_map[:,1], user_id_map[:,0]))
doc_id_dict = dict(zip(doc_id_map[:,0], doc_id_map[:,1]))
rdoc_id_dict = dict(zip(doc_id_map[:,1], doc_id_map[:,0]))
user_num = user_id_map.shape[0]
doc_num = doc_id_map.shape[0]
print 'user_num: ', user_num, '\n'
print 'doc_num: ', doc_num, '\n'
R = np.loadtxt(filter_data_path + 'rating_file.dat.txt', int)
time_step_num = R[:, 2].max()
row = R.shape[0]
for i in range(row):
user_id = R[i, 0]
doc_id = R[i, 1]
R[i, 0] = ruser_id_dict[user_id]
R[i, 1] = rdoc_id_dict[doc_id]
#exit(0)
regl1nmf = 0.05
regl1jpp = 0.05
epsilon = 1
maxiter = 100
#recall_num = 100
fold_num = 5
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(filename)s[line:%(lineno)d]\
%(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filename='./log/trm_k_' + str(k) + '_lambda_' + \
str(lambd) + '_alpha_' + str(regl1jpp) + '.log',
filemode='w')
##################################################################
#定义一个StreamHandler,将INFO级别或更高的日志信息打印到标准错误,
#并将其添加到当前的日志处理对象#
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
##################################################################
#logging.debug('This is debug message')
#logging.info('This is info message')
#logging.warning('This is warning message')
time_filter_dir = './result/time_interval_' + str(time_interval) + '_filter_by_' + \
str(filter_threshold)
if not os.path.isdir(time_filter_dir):
os.mkdir(time_filter_dir)
result_dir = time_filter_dir + '/trm_fold_' + str(fold_num) + \
'_k_' + str(k) + '_lambda_' + str(lambd) + '_alpha_' + str(regl1jpp)
recall_result_dir = result_dir + '/recall'
ndcg_result_dir = result_dir + '/ndcg'
ap_result_dir = result_dir + '/ap'
if not os.path.isdir(result_dir):
os.mkdir(result_dir)
if not os.path.isdir(recall_result_dir):
os.mkdir(recall_result_dir)
if not os.path.isdir(ndcg_result_dir):
os.mkdir(ndcg_result_dir)
if not os.path.isdir(ap_result_dir):
os.mkdir(ap_result_dir)
logging.info('user num: ' + str(user_num) + '\n')
logging.info('doc num: ' + str(doc_num) + '\n')
logging.info('time step num: ' + str(time_step_num) + '\n')
# the start time period used for init of W(1) and H(1), using normal NMF
start = 1
Rt = util.generate_matrice_between_time(R, user_time_dict[start], doc_time_dict[start], start, start)
logging.info('non zero cell num: ' + str(len(np.nonzero(Rt)[0])))
logging.info('start nmf:\n')
(P, Q) = util.nmf(Rt, k, maxiter, regl1nmf, epsilon)
logging.info('[ok]\n')
logging.info(str(P.shape) + '\t' + str(Q.shape) + '\n')
# number of period we consider
finT = time_step_num
#for all the consecutive periods
for current_time_step in range(start+1, finT+1):
logging.info('\n=========================\n')
logging.info('time_step number %i:\t' + str(current_time_step))
logging.info('----------------\n')
Po = P
jrecall_dict = {}
jndcg_dict = {}
jap_dict = {}
current_user_num = user_time_dict[current_time_step]
current_doc_num = doc_time_dict[current_time_step]
current_user_like_dict = {}
like_file = open(filter_data_path + 'user_like_list_at_time_step' + \
str(current_time_step) + '.dat.txt')
for user in like_file.readlines():
splits = user.split()
like_list = []
for i in range(2, len(splits)):
like_list.append(rdoc_id_dict[int(splits[i])])
current_user_like_dict[ruser_id_dict[int(splits[0])]] = like_list
for fold_id in range(fold_num):
current_data_path = filter_data_path + 'time_step_' + \
str(current_time_step) + '/data_' + \
str(fold_id)
train_data_path = current_data_path + '/train.dat.txt'
Rt = util.generate_matrice_for_file2(train_data_path,
current_user_num,
current_doc_num,
ruser_id_dict,
rdoc_id_dict
)
logging.info('non zero cell num: ' + str(len(np.nonzero(Rt)[0])))
logging.info('computing JPP decomposition...')
Po = util.reshape_matrix(Po, current_user_num, k)
P, Q, S = JPPCF(Rt, Po, k, lambd, regl1jpp, epsilon, maxiter, True)
PredictR = np.dot(P, Q)
NormPR = PredictR / PredictR.max()
logging.info('[ok]\n')
logging.info('\t fold_id:' + str(fold_id) + '\n')
for recall_num in [3,5,10,20,50,100,150,200,250,300]:
logging.info('\trecall at ' + str(recall_num) + ':')
jppcf_recall = util.performance_cross_validate_recall2(
NormPR, current_data_path, recall_num,
ruser_id_dict, rdoc_id_dict, current_user_like_dict)
if jrecall_dict.has_key(recall_num):
jrecall_dict[recall_num].append(jppcf_recall)
else:
jrecall_dict[recall_num] = [jppcf_recall]
logging.info('\t\tJPPCF : ' + str(jppcf_recall) + '\n')
# ndcg performance
logging.info('\nndcg at ' + str(recall_num) + ':')
jppcf_ndcg = util.performance_ndcg(
NormPR, current_data_path, recall_num,
ruser_id_dict, rdoc_id_dict, current_user_like_dict)
if jndcg_dict.has_key(recall_num):
jndcg_dict[recall_num].append(jppcf_ndcg)
else:
jndcg_dict[recall_num] = [jppcf_ndcg]
logging.info('\t\tJPPCF : ' + str(jppcf_ndcg) + '\n')
# ap performance
logging.info('\nap at ' + str(recall_num) + ':')
jppcf_ap = util.performance_ap(
NormPR, current_data_path, recall_num,
ruser_id_dict, rdoc_id_dict, current_user_like_dict)
if jap_dict.has_key(recall_num):
jap_dict[recall_num].append(jppcf_ap)
else:
jap_dict[recall_num] = [jppcf_ap]
logging.info('\t\tJPPCF : ' + str(jppcf_ap) + '\n')
logging.info('current_time_step: ' + str(current_time_step) + '\n')
for recall_num in [3,5,10,20,50,100,150,200,250,300]:
# recall
logging.info('\trecall at ' + str(recall_num) + ':')
avg_jppcf_recall = util.avg_of_list(jrecall_dict[recall_num])
logging.info('\t\tavg JPPCF : ' + str(avg_jppcf_recall) + '\n')
exist = False
if os.path.isfile(recall_result_dir + '/recall_at_' + str(recall_num) + '.txt'):
exist = True
result_file = open(recall_result_dir + '/recall_at_' + str(recall_num) + '.txt', 'a')
if not exist:
result_file.write('jppcf\n')
result_file.write(str(avg_jppcf_recall) + '\n')
result_file.close()
# ndcg
logging.info('\tndcg at ' + str(recall_num) + ':')
avg_jppcf_ndcg = util.avg_of_list(jndcg_dict[recall_num])
logging.info('\t\tavg JPPCF : ' + str(avg_jppcf_ndcg) + '\n')
exist = False
if os.path.isfile(ndcg_result_dir + '/ndcg_at_' + str(recall_num) + '.txt'):
exist = True
result_file = open(ndcg_result_dir + '/ndcg_at_' + str(recall_num) + '.txt', 'a')
if not exist:
result_file.write('jppcf\n')
result_file.write(str(avg_jppcf_ndcg) + '\n')
result_file.close()
# ap
logging.info('\tap at ' + str(recall_num) + ':')
avg_jppcf_ap = util.avg_of_list(jap_dict[recall_num])
logging.info('\t\tavg JPPCF : ' + str(avg_jppcf_ap) + '\n')
exist = False
if os.path.isfile(ap_result_dir + '/ap_at_' + str(recall_num) + '.txt'):
exist = True
result_file = open(ap_result_dir + '/ap_at_' + str(recall_num) + '.txt', 'a')
if not exist:
result_file.write('jppcf\n')
result_file.write(str(avg_jppcf_ap) + '\n')
result_file.close()
logging.info('=========================\n')
logging.info('\n all process done! exit now...')
| bit-jmm/ttarm | demo/trm.py | Python | gpl-2.0 | 10,034 |
import functools
import ipaddress
import itertools
from django import http
from django.contrib import admin, messages
from django.contrib.admin.utils import unquote
from django.db.models import Count, F, Q
from django.db.utils import IntegrityError
from django.http import (
Http404,
HttpResponseForbidden,
HttpResponseNotAllowed,
HttpResponseRedirect,
)
from django.template.response import TemplateResponse
from django.urls import re_path, reverse
from django.utils.encoding import force_str
from django.utils.html import format_html, format_html_join
from django.utils.translation import gettext, gettext_lazy as _
from olympia import amo
from olympia.abuse.models import AbuseReport
from olympia.access import acl
from olympia.activity.models import ActivityLog, IPLog, UserLog
from olympia.addons.models import Addon, AddonUser
from olympia.amo.admin import CommaSearchInAdminMixin
from olympia.api.models import APIKey, APIKeyConfirmation
from olympia.bandwagon.models import Collection
from olympia.ratings.models import Rating
from olympia.zadmin.admin import related_content_link, related_single_content_link
from . import forms
from .models import (
DeniedName,
DisposableEmailDomainRestriction,
EmailUserRestriction,
GroupUser,
IPNetworkUserRestriction,
UserProfile,
UserRestrictionHistory,
)
class GroupUserInline(admin.TabularInline):
model = GroupUser
raw_id_fields = ('user',)
@admin.register(UserProfile)
class UserAdmin(CommaSearchInAdminMixin, admin.ModelAdmin):
list_display = ('__str__', 'email', 'last_login', 'is_public', 'deleted')
extra_list_display_for_ip_searches = (
'last_login_ip',
# Those fields don't exist, and the admin doesn't know how to traverse
# relations, especially reverse ones, so these are actually methods
# defined below that match the exact relation string, so the
# annotations and filter expressions needed are built directly from
# the strings defined here.
'restriction_history__last_login_ip',
'restriction_history__ip_address',
'_ratings_all__ip_address',
# FIXME: IPLog makes this query too slow in production, need to
# fix #17504 to enable.
# 'activitylog__iplog__ip_address',
)
# A custom ip address search is also implemented in get_search_results()
search_fields = ('=id', '^email', '^username')
# A custom field used in search json in zadmin, not django.admin.
search_fields_response = 'email'
inlines = (GroupUserInline,)
show_full_result_count = False # Turn off to avoid the query.
readonly_fields = (
'id',
'created',
'modified',
'picture_img',
'banned',
'deleted',
'is_public',
'last_login',
'last_login_ip',
'known_ip_adresses',
'last_known_activity_time',
'ratings_authorship',
'collections_authorship',
'addons_authorship',
'activity',
'abuse_reports_by_this_user',
'abuse_reports_for_this_user',
'has_active_api_key',
'restriction_history_for_this_user',
)
fieldsets = (
(
None,
{
'fields': (
'id',
'created',
'modified',
'email',
'fxa_id',
'username',
'display_name',
'biography',
'homepage',
'location',
'occupation',
'picture_img',
),
},
),
(
'Flags',
{
'fields': ('display_collections', 'deleted', 'is_public'),
},
),
(
'Content',
{
'fields': (
'addons_authorship',
'collections_authorship',
'ratings_authorship',
)
},
),
(
'Abuse Reports',
{'fields': ('abuse_reports_by_this_user', 'abuse_reports_for_this_user')},
),
(
'Admin',
{
'fields': (
'last_login',
'last_known_activity_time',
'activity',
'restriction_history_for_this_user',
'last_login_ip',
'known_ip_adresses',
'banned',
'notes',
'bypass_upload_restrictions',
'has_active_api_key',
)
},
),
)
actions = ['ban_action', 'reset_api_key_action', 'reset_session_action']
class Media:
js = ('js/admin/userprofile.js',)
css = {'all': ('css/admin/userprofile.css',)}
def get_list_display(self, request):
search_term = request.GET.get('q')
if search_term and self.ip_addresses_if_query_is_all_ip_addresses(search_term):
return (*self.list_display, *self.extra_list_display_for_ip_searches)
return self.list_display
def ip_addresses_if_query_is_all_ip_addresses(self, search_term):
search_terms = search_term.split(',')
ips = []
for term in search_terms:
try:
ip = ipaddress.ip_address(term)
ip_str = str(ip)
if ip_str != term:
raise ValueError
ips.append(ip_str)
except ValueError:
break
if search_terms == ips:
# If all we are searching for are IPs, we'll use our custom IP
# search.
# Note that this comparison relies on ips being stored as strings,
# if that were to change that it would break.
return ips
return None
def get_search_results(self, request, queryset, search_term):
ips = self.ip_addresses_if_query_is_all_ip_addresses(search_term)
if ips:
q_objects = Q()
annotations = {}
for arg in self.extra_list_display_for_ip_searches:
q_objects |= Q(**{f'{arg}__in': ips})
if '__' in arg:
annotations[arg] = F(arg)
queryset = queryset.filter(q_objects).annotate(**annotations)
# We force the distinct() ourselves and tell Django there are no
# duplicates, otherwise the admin de-duplication logic, which
# doesn't use distinct() after Django 3.1, would break our
# annotations.
# This can cause some users to show up multiple times, but that's
# a feature: it will happen when the IPs returned are different
# (so technically the rows are not duplicates), since the
# annotations are part of the distinct().
queryset = queryset.distinct()
may_have_duplicates = False
else:
queryset, may_have_duplicates = super().get_search_results(
request,
queryset,
search_term,
)
return queryset, may_have_duplicates
def restriction_history__last_login_ip(self, obj):
return getattr(obj, 'restriction_history__last_login_ip', '-') or '-'
restriction_history__last_login_ip.short_description = (
'Restriction History Last Login IP'
)
def restriction_history__ip_address(self, obj):
return getattr(obj, 'restriction_history__ip_address', '-') or '-'
restriction_history__ip_address.short_description = 'Restriction History IP'
def activitylog__iplog__ip_address(self, obj):
return getattr(obj, 'activitylog__iplog__ip_address', '-') or '-'
activitylog__iplog__ip_address.short_description = 'Activity IP'
def _ratings_all__ip_address(self, obj):
return getattr(obj, '_ratings_all__ip_address', '-') or '-'
_ratings_all__ip_address.short_description = 'Rating IP'
def get_urls(self):
def wrap(view):
def wrapper(*args, **kwargs):
return self.admin_site.admin_view(view)(*args, **kwargs)
return functools.update_wrapper(wrapper, view)
urlpatterns = super().get_urls()
custom_urlpatterns = [
re_path(
r'^(?P<object_id>.+)/ban/$',
wrap(self.ban_view),
name='users_userprofile_ban',
),
re_path(
r'^(?P<object_id>.+)/reset_api_key/$',
wrap(self.reset_api_key_view),
name='users_userprofile_reset_api_key',
),
re_path(
r'^(?P<object_id>.+)/reset_session/$',
wrap(self.reset_session_view),
name='users_userprofile_reset_session',
),
re_path(
r'^(?P<object_id>.+)/delete_picture/$',
wrap(self.delete_picture_view),
name='users_userprofile_delete_picture',
),
]
return custom_urlpatterns + urlpatterns
def get_actions(self, request):
actions = super().get_actions(request)
if not acl.action_allowed(request, amo.permissions.USERS_EDIT):
# You need Users:Edit to be able to ban users and reset their api
# key confirmation.
actions.pop('ban_action')
actions.pop('reset_api_key_action')
actions.pop('reset_session_action')
return actions
def change_view(self, request, object_id, form_url='', extra_context=None):
extra_context = extra_context or {}
extra_context['has_users_edit_permission'] = acl.action_allowed(
request, amo.permissions.USERS_EDIT
)
lookup_field = UserProfile.get_lookup_field(object_id)
if lookup_field != 'pk':
try:
if lookup_field == 'email':
user = UserProfile.objects.get(email=object_id)
except UserProfile.DoesNotExist:
raise http.Http404
url = request.path.replace(object_id, str(user.id), 1)
if request.GET:
url += '?' + request.GET.urlencode()
return http.HttpResponsePermanentRedirect(url)
return super().change_view(
request,
object_id,
form_url,
extra_context=extra_context,
)
def delete_model(self, request, obj):
# Deleting a user through the admin also deletes related content
# produced by that user.
ActivityLog.create(amo.LOG.ADMIN_USER_ANONYMIZED, obj)
obj.delete()
def save_model(self, request, obj, form, change):
changes = {
k: (form.initial.get(k), form.cleaned_data.get(k))
for k in form.changed_data
}
ActivityLog.create(amo.LOG.ADMIN_USER_EDITED, obj, details=changes)
obj.save()
def ban_view(self, request, object_id, extra_context=None):
if request.method != 'POST':
return HttpResponseNotAllowed(['POST'])
obj = self.get_object(request, unquote(object_id))
if obj is None:
raise Http404()
if not acl.action_allowed(request, amo.permissions.USERS_EDIT):
return HttpResponseForbidden()
ActivityLog.create(amo.LOG.ADMIN_USER_BANNED, obj)
UserProfile.ban_and_disable_related_content_bulk([obj], move_files=True)
kw = {'user': force_str(obj)}
self.message_user(request, gettext('The user "%(user)s" has been banned.' % kw))
return HttpResponseRedirect(
reverse('admin:users_userprofile_change', args=(obj.pk,))
)
def reset_api_key_view(self, request, object_id, extra_context=None):
if request.method != 'POST':
return HttpResponseNotAllowed(['POST'])
obj = self.get_object(request, unquote(object_id))
if obj is None:
raise Http404()
if not acl.action_allowed(request, amo.permissions.USERS_EDIT):
return HttpResponseForbidden()
self.reset_api_key_action(request, UserProfile.objects.filter(pk=obj.pk))
return HttpResponseRedirect(
reverse('admin:users_userprofile_change', args=(obj.pk,))
)
def reset_session_view(self, request, object_id, extra_context=None):
if request.method != 'POST':
return HttpResponseNotAllowed(['POST'])
obj = self.get_object(request, unquote(object_id))
if obj is None:
raise Http404()
if not acl.action_allowed(request, amo.permissions.USERS_EDIT):
return HttpResponseForbidden()
self.reset_session_action(request, UserProfile.objects.filter(pk=obj.pk))
return HttpResponseRedirect(
reverse('admin:users_userprofile_change', args=(obj.pk,))
)
def delete_picture_view(self, request, object_id, extra_context=None):
if request.method != 'POST':
return HttpResponseNotAllowed(['POST'])
obj = self.get_object(request, unquote(object_id))
if obj is None:
raise Http404()
if not acl.action_allowed(request, amo.permissions.USERS_EDIT):
return HttpResponseForbidden()
ActivityLog.create(amo.LOG.ADMIN_USER_PICTURE_DELETED, obj)
obj.delete_picture()
kw = {'user': force_str(obj)}
self.message_user(
request,
gettext('The picture belonging to user "%(user)s" has been deleted.' % kw),
)
return HttpResponseRedirect(
reverse('admin:users_userprofile_change', args=(obj.pk,))
)
def ban_action(self, request, qs):
users = []
UserProfile.ban_and_disable_related_content_bulk(qs)
for obj in qs:
ActivityLog.create(amo.LOG.ADMIN_USER_BANNED, obj)
users.append(force_str(obj))
kw = {'users': ', '.join(users)}
self.message_user(
request, gettext('The users "%(users)s" have been banned.' % kw)
)
ban_action.short_description = _('Ban selected users')
def reset_session_action(self, request, qs):
users = []
qs.update(auth_id=None) # A new value will be generated at next login.
for obj in qs:
ActivityLog.create(amo.LOG.ADMIN_USER_SESSION_RESET, obj)
users.append(force_str(obj))
kw = {'users': ', '.join(users)}
self.message_user(
request, gettext('The users "%(users)s" had their session(s) reset.' % kw)
)
reset_session_action.short_description = _('Reset session')
def reset_api_key_action(self, request, qs):
users = []
APIKeyConfirmation.objects.filter(user__in=qs).delete()
APIKey.objects.filter(user__in=qs).update(is_active=None)
for user in qs:
ActivityLog.create(amo.LOG.ADMIN_API_KEY_RESET, user)
users.append(force_str(user))
kw = {'users': ', '.join(users)}
self.message_user(
request, gettext('The users "%(users)s" had their API Key reset.' % kw)
)
reset_api_key_action.short_description = _('Reset API Key')
def picture_img(self, obj):
return format_html('<img src="{}" />', obj.picture_url)
picture_img.short_description = _('Profile Photo')
def known_ip_adresses(self, obj):
ip_adresses = set(
Rating.objects.filter(user=obj)
.values_list('ip_address', flat=True)
.order_by()
.distinct()
)
ip_adresses.update(
itertools.chain(
*UserRestrictionHistory.objects.filter(user=obj)
.values_list('last_login_ip', 'ip_address')
.order_by()
.distinct()
)
)
ip_adresses.update(
IPLog.objects.filter(activity_log__user=obj)
.values_list('ip_address', flat=True)
.order_by()
.distinct()
)
ip_adresses.add(obj.last_login_ip)
contents = format_html_join('', '<li>{}</li>', ((ip,) for ip in ip_adresses))
return format_html('<ul>{}</ul>', contents)
def last_known_activity_time(self, obj):
from django.contrib.admin.utils import display_for_value
# We sort by -created by default, so first() gives us the last one, or
# None.
user_log = (
UserLog.objects.filter(user=obj).values_list('created', flat=True).first()
)
return display_for_value(user_log, '')
def has_active_api_key(self, obj):
return obj.api_keys.filter(is_active=True).exists()
has_active_api_key.boolean = True
def collections_authorship(self, obj):
return related_content_link(obj, Collection, 'author')
collections_authorship.short_description = _('Collections')
def addons_authorship(self, obj):
counts = (
AddonUser.unfiltered.filter(user=obj)
.order_by()
.aggregate(
active_role=Count('role', filter=~Q(role=amo.AUTHOR_ROLE_DELETED)),
deleted_role=Count('role', filter=Q(role=amo.AUTHOR_ROLE_DELETED)),
)
)
return related_content_link(
obj,
Addon,
'authors',
text=format_html(
'{} (active role), {} (deleted role)',
counts['active_role'],
counts['deleted_role'],
),
)
addons_authorship.short_description = _('Addons')
def ratings_authorship(self, obj):
return related_content_link(obj, Rating, 'user')
ratings_authorship.short_description = _('Ratings')
def activity(self, obj):
return related_content_link(obj, ActivityLog, 'user')
activity.short_description = _('Activity Logs')
def abuse_reports_by_this_user(self, obj):
return related_content_link(obj, AbuseReport, 'reporter')
def abuse_reports_for_this_user(self, obj):
return related_content_link(obj, AbuseReport, 'user')
def restriction_history_for_this_user(self, obj):
return related_content_link(obj, UserRestrictionHistory, 'user')
@admin.register(DeniedName)
class DeniedNameAdmin(admin.ModelAdmin):
list_display = search_fields = ('name',)
view_on_site = False
model = DeniedName
model_add_form = forms.DeniedNameAddForm
class Media:
js = ('js/i18n/en-US.js',)
def add_view(self, request, form_url='', extra_context=None):
"""Override the default admin add view for bulk add."""
form = self.model_add_form()
if request.method == 'POST':
form = self.model_add_form(request.POST)
if form.is_valid():
inserted = 0
duplicates = 0
for x in form.cleaned_data['names'].splitlines():
# check with the cache
if self.model.blocked(x):
duplicates += 1
continue
try:
self.model.objects.create(**{'name': x.lower()})
inserted += 1
except IntegrityError:
# although unlikely, someone else could have added
# the same value.
# note: unless we manage the transactions manually,
# we do lose a primary id here.
duplicates += 1
msg = '%s new values added to the deny list.' % (inserted)
if duplicates:
msg += ' %s duplicates were ignored.' % (duplicates)
messages.success(request, msg)
form = self.model_add_form()
context = {
'form': form,
'add': True,
'change': False,
'has_view_permission': self.has_view_permission(request, None),
'has_add_permission': self.has_add_permission(request),
'app_label': 'DeniedName',
'opts': self.model._meta,
'title': 'Add DeniedName',
'save_as': False,
}
return TemplateResponse(
request, 'admin/users/denied_name/add_form.html', context
)
@admin.register(IPNetworkUserRestriction)
class IPNetworkUserRestrictionAdmin(admin.ModelAdmin):
list_display = ('network', 'restriction_type')
list_filter = ('restriction_type',)
search_fields = ('=network',)
form = forms.IPNetworkUserRestrictionForm
@admin.register(EmailUserRestriction)
class EmailUserRestrictionAdmin(admin.ModelAdmin):
list_display = ('email_pattern', 'restriction_type')
list_filter = ('restriction_type',)
search_fields = ('^email_pattern',)
@admin.register(DisposableEmailDomainRestriction)
class DisposableEmailDomainRestrictionAdmin(admin.ModelAdmin):
list_display = ('domain', 'restriction_type')
list_filter = ('restriction_type',)
search_fields = ('^domain',)
@admin.register(UserRestrictionHistory)
class UserRestrictionHistoryAdmin(admin.ModelAdmin):
raw_id_fields = ('user',)
readonly_fields = (
'restriction',
'ip_address',
'user_link',
'last_login_ip',
'created',
)
list_display = (
'created',
'user_link',
'restriction',
'ip_address',
'last_login_ip',
)
extra = 0
can_delete = False
view_on_site = False
def user_link(self, obj):
return related_single_content_link(obj, 'user')
user_link.short_description = _('User')
def has_add_permission(self, request):
return False
def get_queryset(self, request):
base_qs = UserRestrictionHistory.objects.all()
return base_qs.prefetch_related('user')
| wagnerand/addons-server | src/olympia/users/admin.py | Python | bsd-3-clause | 22,013 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Dog.in_adoption_process'
db.add_column(u'dogs_dog', 'in_adoption_process',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Dog.in_adoption_process'
db.delete_column(u'dogs_dog', 'in_adoption_process')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'dogs.address': {
'Meta': {'object_name': 'Address'},
'apartment': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'neighbourhood': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '9'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'dogs.breed': {
'Meta': {'object_name': 'Breed'},
'breed_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'dogs.dog': {
'Meta': {'object_name': 'Dog'},
'adopted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'adopted_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'adopted_by'", 'null': 'True', 'to': u"orm['dogs.Person']"}),
'birth_date': ('django.db.models.fields.DateField', [], {}),
'breed': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dogs.Breed']"}),
'color': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'description': ('django.db.models.fields.TextField', [], {}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_adoption_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'in_adoption_by'", 'to': u"orm['dogs.Person']"}),
'in_adoption_process': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'photo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True'}),
'size': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
u'dogs.inadoption': {
'Meta': {'object_name': 'InAdoption'},
'adopter': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'adopter'", 'to': u"orm['dogs.Person']"}),
'dog': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dogs.Dog']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'dogs.message': {
'Meta': {'object_name': 'Message'},
'content': ('django.db.models.fields.TextField', [], {'max_length': '500'}),
'date': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sender': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sender'", 'to': u"orm['dogs.Person']"}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dogs.MessageThread']"})
},
u'dogs.messagethread': {
'Meta': {'object_name': 'MessageThread'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person1': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'person1'", 'to': u"orm['dogs.Person']"}),
'person2': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'person2'", 'to': u"orm['dogs.Person']"}),
'related_dog': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'related_dog'", 'null': 'True', 'to': u"orm['dogs.Dog']"}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'dogs.person': {
'Meta': {'object_name': 'Person'},
'address': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dogs.Address']", 'null': 'True'}),
'birth_date': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tel': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['dogs'] | brenolf/myfriend | dogs/migrations/0010_auto__add_field_dog_in_adoption_process.py | Python | apache-2.0 | 8,592 |
#!/usr/bin/python3
import numpy as np
from scipy.sparse.linalg import svds
from scipy import sparse
from math import sqrt
def vector_to_diagonal(vector):
"""
将向量放在对角矩阵的对角线上
:param vector:
:return:
"""
if (isinstance(vector, np.ndarray) and vector.ndim == 1) or \
isinstance(vector, list):
length = len(vector)
diag_matrix = np.zeros((length, length))
np.fill_diagonal(diag_matrix, vector)
return diag_matrix
return None
data = {}
users = []
movies = []
user_avgs = []
movie_avgs = []
# Load data
train_file = 'train.txt'
with open(train_file, 'r') as f:
next(f)
for line in f:
fields = line.strip().split(',')
user = fields[0]
movie = fields[1]
rating = int(fields[2])
if user not in users:
users.append(user)
if movie not in movies:
movies.append(movie)
data.setdefault(user, {})
data[user][movie] = rating
# Generate original rating matrix
user_count = len(users)
movie_count = len(movies)
R = np.zeros((user_count, movie_count))
for (user, ratings) in data.items():
for (movie, rating) in ratings.items():
user_idx = users.index(user)
movie_idx = movies.index(movie)
R[user_idx][movie_idx] = rating
# Calculate user average rating
for user_idx in range(user_count):
user_ratings = R[user_idx, :]
n = 0
sum_rating = 0.0
for rating in user_ratings:
if rating != 0:
n += 1
sum_rating += rating
user_avgs.append(sum_rating / n)
# Calculate movie average rating
for movie_idx in range(movie_count):
movie_ratings = R[:, movie_idx]
n = 0
sum_rating = 0.0
for rating in movie_ratings:
if rating != 0:
n += 1
sum_rating += rating
movie_avgs.append(sum_rating / n)
# Normalize rating matrix R
R_normal = R
for i in range(user_count):
for j in range(movie_count):
if R_normal[i][j] == 0:
R_normal[i][j] = user_avgs[i]
for i in range(user_count):
for j in range(movie_count):
R_normal[i][j] -= movie_avgs[j]
# SVD R_normal
R_normal = R_normal.astype('float')
U, S, VT = svds(sparse.csr_matrix(R_normal), k=2, maxiter=200) # 2个隐主题
S = vector_to_diagonal(S)
S_Sqrt = S
for i in range(S.ndim):
S_Sqrt[i][i] = sqrt(S[i][i])
R_red = np.dot(np.dot(U, S), VT)
# Calculate movie similarity matrix
S_Sqrt_VT = np.dot(S_Sqrt, VT)
sim = {}
for i in range(movie_count - 1):
sim.setdefault(i, [])
for j in range(i + 1, movie_count):
sim.setdefault(j, [])
sum_xy = 0
sum_x2 = 0
sum_y2 = 0
for k in range(S.ndim):
sum_xy += S_Sqrt_VT[k][i] * S_Sqrt_VT[k][j]
sum_x2 += S_Sqrt_VT[k][i] ** 2
sum_y2 += S_Sqrt_VT[k][j] ** 2
simij = sum_xy / sqrt(sum_x2 * sum_y2)
sim[i].append((simij, j))
sim[j].append((simij, i))
for (key, val) in sim.items():
val.sort(key=lambda tup: tup[0], reversed=True)
def predict_rating(user_idx, movie_idx, l):
numerator = 0
denominator = 0
a = user_idx
j = movie_idx
for k in range(l):
simjk = sim[j][k][0]
sim_movie_idx = sim[j][k][1]
numerator += simjk * (R_red[a][sim_movie_idx] + user_avgs[a])
denominator += abs(simjk)
if denominator == 0:
return 3
else:
return numerator / denominator
test_file = 'test.txt'
result = ''
with open(test_file, 'r') as f:
next(f)
for line in f:
fields = line.strip().split(',')
user = fields[0]
movie = fields[1]
if user in users and movie in movies:
user_idx = users.index(user)
movie_idx = movies.index(movie)
rating = predict_rating(user_idx, movie_idx, 3)
if rating < 1:
rating = 1
elif rating > 5:
rating = 5
elif user in users:
rating = int(round(user_avgs[users.index(user)]))
else:
rating = 3
result += str(rating)
print(result)
| ryanorz/code-camp | data_mining/collaborative_filtering/nmf_co_filter.py | Python | apache-2.0 | 4,137 |
# Copyright 2016 Casey Jaymes
# This file is part of PySCAP.
#
# PySCAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PySCAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PySCAP. If not, see <http://www.gnu.org/licenses/>.
from scap.Model import Model
import logging
logger = logging.getLogger(__name__)
class GeneralSuffixElement(Model):
MODEL_MAP = {
'tag_name': 'GeneralSuffix',
'attributes': {
'Type': {},
'Code': {},
'*': {},
}
}
| cjaymes/pyscap | src/scap/model/xnl_2_0/GeneralSuffixElement.py | Python | gpl-3.0 | 962 |
import math
def sieve(limit):
primes_set = set()
primes = [True] * limit
primes[0] = primes[1] = False
for (i, is_prime) in enumerate(primes):
if is_prime:
primes_set.add(i)
for n in xrange(i*i, limit, i):
primes[n] = False
return primes_set
def P035():
limit = 1000000
prime_set = sieve(limit)
count = 0
for prime in prime_set:
rotating_prime = prime
rotation_count = int(math.log10(prime))
for _ in xrange(rotation_count):
rotating_prime, digit = divmod(rotating_prime, 10)
rotating_prime += digit * 10 ** (rotation_count)
if rotating_prime not in prime_set:
count += 1
break
return len(prime_set) - count
print P035() | brendanzhao/ProjectEuler | src/P035.py | Python | mit | 719 |
import RPi.GPIO as GPIO
from lib_nrf24 import NRF24
import time
import spidev
import sys
GPIO.setmode(GPIO.BCM)
# Build up the radio transmitter
pipe = [0xE8, 0xE8, 0xF0, 0xF0, 0xE1]
radio = NRF24(GPIO, spidev.SpiDev())
radio.begin(0, 17)
radio.setPayloadSize(32)
radio.setChannel(0x76)
radio.setDataRate(NRF24.BR_1MBPS)
radio.setPALevel(NRF24.PA_MIN)
radio.setAutoAck(True)
radio.enableDynamicPayloads()
radio.openWritingPipe(pipe)
#DEV ONLY: radio.printDetails()
###########################
### TRANSMISSION SCRIPT ###
###########################
# Whats the beacon status? [given by sys var from main script]
beaconStatus = str(sys.argv[1])
# Here the message is defnied for the output
message = list("stat:"+beaconStatus);
# If the message has lower than 32 chars, fill it up with 0s
while len(message) < 32:
message.append(0)
# Publish the message
radio.write(message)
# Give us a note in the console
print("Sent the message")
# Clean the channel up to prevent 'buffering'
GPIO.cleanup()
| d3221/urban | urbanBeacon/sendArduinoOnce.py | Python | gpl-3.0 | 1,005 |
# -*- coding: utf-8 -*-
import argparse
import yaml
from os import getcwd
from os.path import (
abspath,
dirname,
exists,
join,
realpath,
isabs
)
from s3deploy.s3 import init as init_s3
from s3deploy.core import (
sync_static_site,
overwrite_static_site
)
parser = argparse.ArgumentParser(
description='Deploy to S3 with alacrity.'
)
parser.add_argument(
'target_directory',
help='The top-level directory containing your static site.'
)
parser.add_argument(
'--config',
help='The path to your config file.'
)
parser.add_argument(
'--overwrite',
help='Forcibly overwrite the target bucket. Defaults to false.',
action='store_true'
)
def _scrub_path(path):
if isabs(path):
path = realpath(path)
else:
cwd = abspath(getcwd())
path = realpath(join(cwd, path))
return path
def _read_config(path):
o = {}
path = _scrub_path(path)
if exists(path):
with open(path, 'r') as f:
o = yaml.load(f)
return o
def main(*args, **kwargs):
cli_args = vars(parser.parse_args())
config = None
if cli_args['config']:
config = _read_config(cli_args['config'])
else:
cwd = getcwd()
default_path = join(cwd, 's3deploy.config.yaml')
if exists(default_path):
config = _read_config(default_path)
if not config:
raise RuntimeError(
'S3 Deploy cannot function without a config file :(')
init_s3(config['aws_key'], config['aws_secret'])
if cli_args['target_directory']:
t_dir = _scrub_path(cli_args['target_directory'])
b_name = config['bucket_name']
if cli_args['overwrite']:
overwrite_static_site(t_dir, b_name)
else:
sync_static_site(t_dir, b_name)
| petermelias/s3deploy | s3deploy/__init__.py | Python | mit | 1,825 |
#-*- coding: utf-8 -*-
#+---------------------------------------------------------------------------+
#| 01001110 01100101 01110100 01111010 01101111 01100010 |
#| |
#| Netzob : Inferring communication protocols |
#+---------------------------------------------------------------------------+
#| Copyright (C) 2015 Christian Bruns |
#| This program is free software: you can redistribute it and/or modify |
#| it under the terms of the GNU General Public License as published by |
#| the Free Software Foundation, either version 3 of the License, or |
#| (at your option) any later version. |
#| |
#| This program is distributed in the hope that it will be useful, |
#| but WITHOUT ANY WARRANTY; without even the implied warranty of |
#| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
#| GNU General Public License for more details. |
#| |
#| You should have received a copy of the GNU General Public License |
#| along with this program. If not, see <http://www.gnu.org/licenses/>. |
#+---------------------------------------------------------------------------+
#| @url : http://www.netzob.org |
#| @contact : contact@netzob.org |
#| @sponsors : Amossys, http://www.amossys.fr |
#| Supélec, http://www.rennes.supelec.fr/ren/rd/cidre/ |
#+---------------------------------------------------------------------------+
#+---------------------------------------------------------------------------+
#| File contributors : |
#| - Christian Bruns <christian.bruns1 (a) stud.uni-goettingen.de> |
#+---------------------------------------------------------------------------+
#+---------------------------------------------------------------------------+
#| Standard library imports |
#+---------------------------------------------------------------------------+
#+---------------------------------------------------------------------------+
#| Related third party imports |
#+---------------------------------------------------------------------------+
import random
#+---------------------------------------------------------------------------+
#| Local application imports |
#+---------------------------------------------------------------------------+
from netzob.Common.Models.Grammar.States.State import State
from netzob.Common.Utils.Decorators import typeCheck, NetzobLogger
@NetzobLogger
class PrismaState(State):
""" Performs like original State; incorporates features of invalidating a Transitions Symbol (if it is to faulty)
and even the Transition itself, if it has no Symbols left. Also removes itself, if no Transitions are left.
"""
def __init__(self, name=None):
super(PrismaState, self).__init__(name=name)
self.active = False
self.trans = []
self.usedTransitions = []
self.invalid = False
def executeAsInitiator(self, abstractionLayer):
if abstractionLayer is None:
raise TypeError("AbstractionLayer cannot be None")
self.active = True
# Pick the next transition
nextTransition = self.pickNextTransition()
if nextTransition is None:
self.active = False
raise Exception("No transition to execute, we stop here.")
# Execute picked transition as an initiator
try:
nextState = nextTransition.executeAsInitiator(abstractionLayer)
except Exception, e:
self.active = False
raise e
if nextState is None:
self.active = False
raise Exception("The execution of transition {0} on state {1} did not return the next state.".format(str(nextTransition), self.name))
self.active = False
return nextState
def pickNextTransition(self):
""" Advanced picking method; incorporates features of deleting Symbols from Transitions, Transitions from
current State and current State itself. Picks Transitions cyclically.
:return: the Transition to be executed
"""
flag = True
while flag:
pos = list(set(self.trans)-set(self.usedTransitions))
c = random.choice(pos)
# is endState invalid?
if c.endState.invalid:
# remove transition to it
self.trans.remove(c)
else:
flag = False
self.usedTransitions.append(c)
if c.invalid:
self.trans.remove(c)
if len(self.trans) == 0:
self.invalid = True
if self.name.split('|')[-1] == 'START':
exit()
# if c in self.trans:
if len(self.trans) <= len(self.usedTransitions):
self.usedTransitions = []
return c
def setTransitions(self, transitions):
self.trans = transitions
@property
def transitions(self):
return self.trans
| dasbruns/netzob | src/netzob/Common/Models/Grammar/States/PrismaState.py | Python | gpl-3.0 | 5,647 |
###############################
# This file is part of PyLaDa.
#
# Copyright (C) 2013 National Renewable Energy Lab
#
# PyLaDa is a high throughput computational platform for Physics. It aims to make it easier to submit
# large numbers of jobs on supercomputers. It provides a python interface to physical input, such as
# crystal structures, as well as to a number of DFT (VASP, CRYSTAL) and atomic potential programs. It
# is able to organise and launch computational jobs on PBS and SLURM.
#
# PyLaDa is free software: you can redistribute it and/or modify it under the terms of the GNU General
# Public License as published by the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
#
# PyLaDa is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along with PyLaDa. If not, see
# <http://www.gnu.org/licenses/>.
###############################
""" I/O Lock for inter-process synchronization.
Leverages the (presumed?) atomicity of mkdir on unices.
"""
__docformat__ = 'restructuredtext en'
__all__ = ['LockFile', 'open_exclusive']
from contextlib import contextmanager
class LockFile(object):
""" Gets an advisory lock for file C{filename}.
Creates a lock directory named after a file. Relies on the presumably
atomic nature of creating a directory.
*Beware* of mpi problems! `LockFile` is (purposefully) not mpi aware.
If used unwisely, processes will lock each other out.
"""
def __init__(self, filename, timeout=None, sleep=0.05):
""" Creates a lock object.
:Parameters:
timeout
will raise a RuntimeError when calling `lock` if
the lock could not be aquired within this time.
sleep
Time to sleep between checks when waiting to acquire lock.
Does not acquire lock at this stage.
"""
from os.path import abspath
self.filename = abspath(filename)
""" Name of file to lock. """
self.timeout = timeout
""" Maximum amount of time to wait when acquiring lock. """
self.sleep = sleep
""" Sleep time between checks on lock. """
self._owns_lock = False
""" True if this object owns the lock. """
def lock(self):
""" Waits until lock is acquired. """
from os import makedirs, error, mkdir
from os.path import exists
import time
# creates parent directory first, if necessary.
if not exists(self._parent_directory):
try:
makedirs(self._parent_directory)
except error:
pass
start_time = time.time()
# loops until acqires lock.
while self._owns_lock == False:
# tries to create director.
try:
self._owns_lock = True
mkdir(self.lock_directory)
# if fails, then another process already created it. Just keep looping.
except error:
self._owns_lock = False
# 2013-11-11: disable timeout to make it try forever,
# since timeouts are causing large runs to fail.
# if self.timeout is not None:
# if time.time() - start_time > self.timeout:
# raise RuntimeError("Could not acquire lock on file {0}.".format(self.filename))
time.sleep(self.sleep)
def __del__(self):
""" Deletes hold on object. """
if self.owns_lock:
self.release()
def __enter__(self):
""" Enters context. """
self.lock()
return self
def __exit__(self, *args):
""" Exits context. """
self.release()
@property
def lock_directory(self):
""" Name of lock directory. """
from os.path import join, basename
return join(self._parent_directory, "." + basename(self.filename) + "-pylada_lockdir")
@property
def _parent_directory(self):
from os.path import abspath, dirname
return dirname(abspath(self.filename))
@property
def is_locked(self):
""" True if a lock for this file exists. """
from os.path import exists
return exists(self.lock_directory)
@property
def owns_lock(self):
""" True if this object owns the lock. """
return self._owns_lock
def release(self):
""" Releases a lock.
It is an error to release a lock not owned by this object.
It is also an error to release a lock which is not locked.
Makes sure things are syncro. The latter is an internal bug though.
"""
from os import rmdir
assert self._owns_lock, IOError("Filelock object does not own lock.")
assert self.is_locked, IOError("Filelock object owns an unlocked lock.")
self._owns_lock = False
rmdir(self.lock_directory)
def __del__(self):
""" Releases lock if still held. """
if self.owns_lock and self.is_locked:
self.release()
def remove_stale(self):
""" Removes a stale lock. """
from os import rmdir
from os.path import exists
if exists(self.lock_directory):
try:
rmdir(self.lock_directory)
except:
pass
def acquire_lock(filename, sleep=0.5, timeout=None):
""" Alias for a `LockFile` context.
*Beware* of mpi problems! `LockFile` is (purposefully) not mpi aware.
Only the root node should use this method.
"""
return LockFile(filename, sleep=sleep, timeout=timeout)
@contextmanager
def open_exclusive(filename, mode="r", sleep=0.5, timeout=None):
""" Opens file while checking for advisory lock.
This context uses `LockFile` to first obtain a lock.
*Beware* of mpi problems! `LockFile` is (purposefully) not mpi aware.
Only the root node should use this method.
"""
# Enter context.
with LockFile(filename, sleep=sleep, timeout=timeout) as lock:
yield open(filename, mode)
| pylada/pylada-light | src/pylada/misc/lockfile.py | Python | gpl-3.0 | 6,381 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from hurry.filesize import size
import numpy
from PIL import Image
def read_pixels(filename):
"""Read the image `filename` and return the pixels.
Parameters
----------
filename : string
A path to an image which will be read.
Returns
-------
numpy array
Two-dimensional (with × height) numpy array with pixel data.
"""
im = Image.open(filename)
pix = im.getdata()
# print(im.size)
# print(im.mode)
# print(pix[0][2])
# print(len(pix))
# print(len(pix[0]))
annotation = "%s-annotated.gif" % filename[:-4]
ima = Image.open(annotation)
ann = ima.getdata()
print(ima.mode)
if im.mode == 'L':
return (numpy.array(pix).reshape(im.size),
numpy.array(ann).reshape(ima.size))
elif im.mode == 'RGB':
width, height = im.size
return (numpy.array(pix).reshape((width, height, 3)),
numpy.array(ann).reshape((width, height)))
else:
raise ValueError("Mode '%s' not known." % im.mode)
def patchify(pixels, annotation, target_folder='.', patch_width=20, patch_height=20):
patch_width = 3
patch_height = 3
X = []
labels = []
for x in range(patch_width + 1, len(pixels) - patch_width):
for y in range(patch_height + 1, len(pixels[0]) - patch_height):
context = pixels[x-patch_width:x+patch_width,
y - patch_height:y+patch_height]
#print(context)
X.append(context.flatten())
labels.append(annotation[x][y])
return numpy.array(X), numpy.array(labels)
def main():
filename = ("/home/moose/GitHub/informatik-2011/Master/Wahlfach/"
"ML-Praktikum/KITTI/2011_09_26_drive_0001_sync/"
"2011_09_26_drive_0001_sync/image_00/data/0000000000.png")
pixels, annotation = read_pixels(filename)
#print(pixels.shape)
#print(pixels[0][0])
X, y = patchify(pixels, annotation)
for i in range(357000, 357500):
print(y[i])
print(len(y))
print(size(y.nbytes))
# for el in pix:
# print(el)
if __name__ == '__main__':
main()
| MartinThoma/datatools | datatools/images.py | Python | mit | 2,198 |
## {{{ http://code.activestate.com/recipes/577715/ (r3)
# Random 2D Slice Of 4D Mandelbrot Fractal
# FB - 201105231
"""Random 2D Slice Of 4D Mandelbrot Fractal.
Modified by Symion 2011
Now works with Visual Python v5.40.
Produce 2D slice of 4D Mandelbrot Fractal and Map it in 3D!
Visual Python Controls:
Click Left Mouse Key = Navigate
Press Right Mouse Key = Spin
Press Both Keys = Zoom
q = Point size - 1
w = Point size + 1
e = Point shape + 1
r = Scene Alignment
"""
vp_flag = 0
if vp_flag == 0:
from visual import *
else:
import random
from math import *
from PIL import Image
imgx = 512
imgy = 512
forge = 0
pcc = 0
pii = 0
psize = 3
pshape = 0
if vp_flag == 0:
scene.width=imgx
scene.height=imgy
image = [points(size=psize, shape="square")]
forward = vector(scene.forward)
print "Number of Points objects: {0}".format(len(image))
else:
image = Image.new("RGB", (imgx, imgy))
#
print __doc__
# drawing area (xa < xb & ya < yb)
xa = -2.0
xb = 2.0
ya = -2.0
yb = 2.0
maxIt = 32 # max number of iterations allowed
maxit = maxIt / 2.0
if True:
# random rotation angles to convert 2d plane to 4d plane
xy = random.random() * 2.0 * pi
xz = random.random() * 2.0 * pi
xw = random.random() * 2.0 * pi
yz = random.random() * 2.0 * pi
yw = random.random() * 2.0 * pi
zw = random.random() * 2.0 * pi
else:
# default rotation angles
xy=1.3536589728
xz=2.30808965705
xw=3.50029464114
yz=3.37449518258
yw=4.23401560176
zw=2.44695022478
sxy = sin(xy)
cxy = cos(xy)
sxz = sin(xz)
cxz = cos(xz)
sxw = sin(xw)
cxw = cos(xw)
syz = sin(yz)
cyz = cos(yz)
syw = sin(yw)
cyw = cos(yw)
szw = sin(zw)
czw = cos(zw)
origx = (xa + xb) / 2.0
origy = (ya + yb) / 2.0
for ky in range(imgy):
b = ky * (yb - ya) / (imgy - 1) + ya
for kx in range(imgx):
a = kx * (xb - xa) / (imgx - 1) + xa
x = a
y = b
z = 0 # c = 0
w = 0 # d = 0
# 4d rotation around center of the plane
x = x - origx
y = y - origy
x0 = x * cxy - y * sxy
y = x * sxy + y * cxy
x = x0 # xy-plane rotation
x0 = x * cxz - z * sxz
z = x * sxz + z * cxz
x = x0 # xz-plane rotation
x0 = x * cxw - z * sxw
w = x * sxw + z * cxw
x = x0 # xw-plane rotation
y0 = y * cyz - z * syz
z = y * syz + z * cyz
y = y0 # yz-plane rotation
y0 = y * cyw - w * syw
w = y * syw + w * cyw
y = y0 # yw-plane rotation
z0 = z * czw - w * szw
w = z * szw + w * czw
z = z0 # zw-plane rotation
x = x + origx
y = y + origy
if forge:
for i in range(maxIt):
# iteration using quaternion numbers
x0 = x * x - y * y - z * z - w * w + a
y = 2.0 * x * y + b
z = 2.0 * x * z
w = 2.0 * x * w
x = x0
s = x * x + y * y + z * z + w * w # 4d absolute value squared
if s > 4.0:
break
else:
for i in range(maxIt):
# iteration using hyper-complex numbers
x0 = x * x - y * y - z * z - w * w + a
y0 = 2.0 * x * y - 2.0 * z * w + b
z0 = 2.0 * x * z - 2.0 * y * w
w = 2.0 * x * w + 2.0 * z * y
x = x0
y = y0
z = z0
s = x * x + y * y + z * z + w * w # 4d absolute value squared
if s > 4.0:
break
pcc += i
pii += 1
if (i%maxIt) != 0:
if vp_flag == 0:
if len(image[-1].pos) > 9999:
image.append(points(size=psize, shape="square"))
print "Number of Points objects: {0}".format(len(image))
c = (i/maxit, i/maxit, i/maxit)
image[-1].append((kx-256, ky-256, i/maxit))
image[-1].color[-1] = c
else:
image.putpixel((kx, ky), (i % 4 * 64, i % 8 * 32, i % 16 * 16))
if vp_flag == 0:
scene.autoscale=False
scene.range=mag(scene.mouse.camera)/sqrt(3)
scene.visible = True
else:
image.save("4D_Mandelbrot_Fractal.png", "PNG")
print "Finished!"
print "{0} / {1} = {2}".format(pcc, pii, pcc / pii)
mess = "Base Set:\nxy={0}, xz={1}, xw={2}, yz={3}, yw={4}, zw={5}"
print mess.format(xy, xz, xw, yz, yw, zw)
if vp_flag == 0:
while 1:
if scene.mouse.events>0:
mk = scene.mouse.getevent()
if mk.release == "left":
scene.center = mk.pos
elif scene.kb.keys:
km = scene.kb.getkey()
if km in ["x", "X"]:
break
elif km in ["w"]:
psize = psize%50
psize += 1
for a in image:
a.size = psize
elif km in ["q"]:
psize -= 1
if psize<1:
psize = 50
for a in image:
a.size = psize
elif km in ["e"]:
pshape = (pshape+1)%2
for a in image:
a.shape = ["square","round"][pshape]
elif km in ["r"]:
scene.forward = forward
| ActiveState/code | recipes/Python/577723_2D_slice_4D_Mandelbrot_Fractal_Map_it/recipe-577723.py | Python | mit | 5,397 |
import tensorflow as tf
import numpy as np
x_data = np.random.rand(100)
y_data = x_data * 0.1 + 0.2
b = tf.Variable(0.)
k = tf.Variable(0.)
y = k * x_data + b
loss = tf.reduce_mean(tf.square(y_data - y))
optimizer = tf.train.GradientDescentOptimizer(0.2)
train = optimizer.minimize(loss)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for step in range(201):
sess.run(train)
if step % 20 == 0:
print(step, sess.run([k,b]))
| hwangcc23/programming | tensorflow-hellowork.py | Python | gpl-2.0 | 473 |
"""Tkinter-based GUI for FileSorter."""
import os
import platform
import sorter
import sys
from Tkinter import *
import tkFileDialog
import util
__author__ = "Alex Cappiello"
__license__ = "See LICENSE.txt"
import sys
import util
# Workaround for wonky Python handling of lambdas in the widget commands.
# Workaround for get() on BooleanVar returning an int.
lambda_wrapper = lambda f, v: lambda: f(v)
lambda_getbool_wrapper = lambda f, v: lambda: f(bool(v.get()))
class Gui:
@staticmethod
def browse_for_file(e):
"""Pull up a file browsing dialog and dump the resulting path into the
Tkinter.Entry widget e."""
path = tkFileDialog.askopenfilename(filetypes=[("PNG", ".png")])
if (path != ""): # Unless they hit cancel.
e.delete(0, END)
e.insert(0, path)
@staticmethod
def browse_for_folder(e):
"""Pull up a folder browsing dialog and dump the resulting path into
the Tkinter.Entry widget e."""
path = tkFileDialog.askdirectory()
if (path != ""): # Unless they hit cancel.
e.delete(0, END)
e.insert(0, path)
def init_extras_dict(self):
"""Extras is the category of other unrelated options.
Dictionary format is:
display text : (action on button press, checked on default)
"""
d = dict()
d["Recurse into subfolders."] = (self.sorter.set_recurse, True)
d["Preserve directory structure."] = \
(self.sorter.set_keep_directory, False)
self.extras_dict = d
def init_path_inputs(self):
"""Add an Entry field to the GUI for each file and one for the working
directory. Each is in its own Frame with a browse Button."""
Label(self.canvas, text="Source directory:").pack(anchor="w")
container = Frame(self.canvas)
source_path = Entry(container, width=80)
source_path.pack(side=LEFT)
self.source_path = source_path
Button(container, text="Browse",
command=
lambda: Gui.browse_for_folder(source_path)).pack(side=LEFT)
container.pack()
Label(self.canvas,
text="Destination directory (must not be in source):").pack(anchor="w")
container = Frame(self.canvas)
dest_path = Entry(container, width=80)
dest_path.pack(side=LEFT)
self.dest_path = dest_path
Button(container, text="Browse",
command=
lambda: Gui.browse_for_folder(dest_path)).pack(side=LEFT)
container.pack()
def init_time_checkboxes(self, parent):
"""Add checkboxes to the GUI for each date/time component."""
components = [
('Year', self.sorter.set_use_year),
('Month', self.sorter.set_use_month),
('Day', self.sorter.set_use_day),
('Day of Week', self.sorter.set_use_dow),
('Hour', self.sorter.set_use_hour),
('Minute', self.sorter.set_use_minute),
('Second', self.sorter.set_use_second)
]
defaults = ['Year', 'Month', 'Day']
checkboxes = Frame(parent)
Label(checkboxes, text="Organize based on:").pack()
for (component, action) in components:
state = BooleanVar()
box = Checkbutton(checkboxes, text=component, variable=state,
onvalue=True, offvalue=False,
command=lambda_getbool_wrapper(action, state))
box.pack(anchor="w")
if (component in defaults):
box.select()
action(bool(state.get()))
checkboxes.pack(side=LEFT)
def init_selectors(self, parent):
"""Add radio buttons to select which time to look at and copy/move."""
options = [(sorter.MTIME, 'Modification Time'),
(sorter.ATIME, 'Access Time')]
if ('Windows' in platform.platform()):
options += [(sorter.CTIME, 'Creation Time')]
else:
options += [(sorter.CTIME, 'Metadata Change Time')]
radiobuttons = Frame(parent)
Label(radiobuttons, text="Time Option:").pack()
self.time_type = IntVar()
for (short_name, long_name) in options:
radio = Radiobutton(radiobuttons, text=long_name,
variable=self.time_type, value=short_name,
command=lambda_wrapper(self.sorter.set_time_type, short_name))
radio.pack(anchor="w")
self.time_type.set(sorter.MTIME)
Label(radiobuttons, text="Operation Type:").pack()
self.op_type = IntVar()
Radiobutton(radiobuttons, text="Copy Files", variable=self.op_type,
command=lambda: self.sorter.set_op_type(sorter.COPY),
value=sorter.COPY).pack(anchor="w")
Radiobutton(radiobuttons, text="Move Files", variable=self.op_type,
command=lambda: self.sorter.set_op_type(sorter.MOVE),
value=sorter.MOVE).pack(anchor="w")
self.op_type.set(sorter.COPY)
self.sorter.set_op_type(sorter.COPY)
radiobuttons.pack(side=LEFT, anchor="n")
def init_options(self):
"""Wrapper for calling everything in the Frame for various options."""
opt_box = Frame(self.canvas)
self.init_time_checkboxes(opt_box)
self.init_selectors(opt_box)
self.init_extras(opt_box)
opt_box.pack(anchor="w")
def init_extras(self, parent):
"""Various options that don't fall into any more broad category."""
self.init_extras_dict()
extras_box = Frame(parent)
Label(extras_box, text="Other options:").pack(anchor="w")
for item in self.extras_dict.keys():
state = BooleanVar()
(action, default) = self.extras_dict[item]
button = Checkbutton(extras_box, text=item, variable=state,
command=lambda_getbool_wrapper(action, state))
if (default):
button.select()
action(state.get())
button.pack(anchor="w")
extras_box.pack(side=LEFT, anchor="nw")
def build(self):
"""Wrapper for calling init functions for various other pieces of the
GUI."""
self.init_path_inputs()
self.init_options()
start = Button(self.canvas, text="Start",
command=lambda: self.start())
start.pack(side=RIGHT)
def start(self):
print self.sorter.__dict__
self.sorter.run(self.source_path.get(), self.dest_path.get())
def __init__(self):
"""Create the root and canvas. Then, build the GUI and run."""
root = Tk()
self.sorter = sorter.Sorter()
self.canvas = Canvas(root)
self.canvas.pack()
root.resizable(width=0, height=0)
self.build()
# and launch the app
# This call BLOCKS (so your program waits until you close the window!)
root.mainloop()
| acappiello/file-sorter | tkgui.py | Python | mit | 7,071 |
from fiona import crs
def test_proj_keys():
assert len(crs.all_proj_keys) == 86
assert 'init' in crs.all_proj_keys
assert 'proj' in crs.all_proj_keys
assert 'no_mayo' in crs.all_proj_keys
def test_from_string():
# A PROJ.4 string with extra whitespace.
val = crs.from_string(
" +proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs +foo " )
assert len(val.items()) == 4
assert val['proj'] == 'longlat'
assert val['ellps'] == 'WGS84'
assert val['datum'] == 'WGS84'
assert val['no_defs'] == True
assert 'foo' not in val
def test_from_string_utm():
# A PROJ.4 string with extra whitespace and integer UTM zone.
val = crs.from_string(
" +proj=utm +zone=13 +ellps=WGS84 +foo " )
assert len(val.items()) == 3
assert val['proj'] == 'utm'
assert val['ellps'] == 'WGS84'
assert val['zone'] == 13
assert 'foo' not in val
def test_to_string():
# Make a string from a mapping with a few bogus items
val = {
'proj': 'longlat', 'ellps': 'WGS84', 'datum': 'WGS84',
'no_defs': True, 'foo': True, 'axis': False, 'belgium': [1,2] }
assert crs.to_string(
val) == "+datum=WGS84 +ellps=WGS84 +no_defs +proj=longlat"
def test_to_string_utm():
# Make a string from a mapping with a few bogus items
val = {
'proj': 'utm', 'ellps': 'WGS84', 'zone': 13,
'no_defs': True, 'foo': True, 'axis': False, 'belgium': [1,2] }
assert crs.to_string(
val) == "+ellps=WGS84 +no_defs +proj=utm +zone=13"
def test_to_string_epsg():
val = {'init': 'epsg:4326', 'no_defs': True}
assert crs.to_string(val) == "+init=epsg:4326 +no_defs"
def test_to_string_zeroval():
# Make a string with some 0 values (e.g. esri:102017)
val = {'proj': 'laea', 'lat_0': 90, 'lon_0': 0, 'x_0': 0, 'y_0': 0,
'ellps': 'WGS84', 'datum': 'WGS84', 'units': 'm', 'no_defs': True}
assert crs.to_string(val) == (
"+datum=WGS84 +ellps=WGS84 +lat_0=90 +lon_0=0 +no_defs +proj=laea "
"+units=m +x_0=0 +y_0=0")
def test_from_epsg():
val = crs.from_epsg(4326)
assert val['init'] == "epsg:4326"
assert val['no_defs'] == True
def test_from_epsg_neg():
try:
val = crs.from_epsg(-1)
except ValueError:
pass
except:
raise
def test_to_string_unicode():
# See issue #83.
val = crs.to_string({
u'units': u'm',
u'no_defs': True,
u'datum': u'NAD83',
u'proj': u'utm',
u'zone': 16})
assert 'NAD83' in val
| johanvdw/Fiona | tests/test_crs.py | Python | bsd-3-clause | 2,552 |
# Copyright 2014 Open Connectome Project (http://openconnecto.me)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import argparse
# Make it so that you can get settings from django
sys.path += [os.path.abspath('../')]
import ndtilecache.settings
os.environ['DJANGO_SETTINGS_MODULE'] = 'ndtilecache.settings'
from django.conf import settings
from dataset import Dataset
import logging
logger=logging.getLogger("ndtilecache")
def main():
parser = argparse.ArgumentParser(description='Remove a dataset from the cache')
parser.add_argument('dataset_name', action="store")
result = parser.parse_args()
ds = Dataset(result.dataset_name)
ds.removeDataset()
if __name__ == "__main__":
main()
| openconnectome/ocptilecache | tilecache/removeDataset.py | Python | apache-2.0 | 1,223 |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the Logging utility."""
import logging
from tests.unittest_utils import ForsetiTestCase
from google.cloud.security.common.util import log_util
class LogUtilTest(ForsetiTestCase):
"""Test the Logging utility."""
def test_set_logger_level_changes_existing_loggers(self):
"""Test if loggers instantiated before set_logger_level will be affected."""
previous_logger = log_util.get_logger('test_module_1')
self.assertEqual(previous_logger.level, logging.INFO, 'Expecting default level to be info')
log_util.set_logger_level(logging.ERROR)
self.assertEqual(previous_logger.level, logging.ERROR, 'Level should have changed to ERROR')
log_util.set_logger_level(logging.INFO)
self.assertEqual(previous_logger.level, logging.INFO, 'Level should have changed back to INFO')
def test_set_logger_level_changes_future_loggers(self):
"""Test if loggers instantiated after set_logger_level will be affected."""
previous_logger = log_util.get_logger('test_module_2')
self.assertEqual(previous_logger.level, logging.INFO, 'Expecting default level to be info')
log_util.set_logger_level(logging.ERROR)
future_logger = log_util.get_logger('test_module_3')
self.assertEqual(future_logger.level, logging.ERROR, 'Level should have changed to ERROR')
log_util.set_logger_level(logging.INFO)
self.assertEqual(previous_logger.level, logging.INFO, 'Level should have changed back to INFO')
if __name__ == '__main__':
unittest.main()
| cschnei3/forseti-security | tests/common/util/log_util_test.py | Python | apache-2.0 | 2,137 |
from Tkinter import *
import mimetypes
class AreaVi(Text):
ACTIVE = None
def __init__(self, default_filename, *args, **kwargs):
"""
This class receives all Text widget arguments
and one named default_filename which means
the filename that is saved when no filename
is specified.
default_filename:
The default path file where contents are saved.
It implements a system of modes to handle
tkinter keypresses events.
The method hook can be used to bind events
to callbacks mapped to specific modes.
"""
Text.__init__(self, *args, **kwargs)
self.setup = dict()
# Maybe it should be?
# abspath(default_filename)
self.default_filename = default_filename
# The file's path and name.
self.filename = default_filename
# Shouldn't it be LAST_COL and MSEL?
self.last_col = '_last_col_'
self.mark_set(self.last_col, '1.0')
# This mark is used in AreaVi.replace_all.
self.STOP_REPLACE_INDEX = '_stop_replace_index_'
# Tags have name starting and ending with __
# def cave(event):
# AreaVi.ACTIVE = event.widget
# self.hook(-1, '<FocusIn>', cave)
AreaVi.ACTIVE = self
def active(self):
"""
It is used to create a model of target for plugins
defining python functions to run on fly.
With such an abstraction it is possible to define a AreaVi instance target
that python code will act on.
"""
AreaVi.ACTIVE = self
def chmode(self, id):
"""
This function is used to change the AreaVi instance's mode.
It receives one parameter named id which means the
mode number.
"""
opt = self.setup[id]
self.id = id
MODE_X = 'mode%s-1' % self
MODE_Y = 'mode%s%s' % (self, id)
if opt: self.bindtags((MODE_X, MODE_Y, self, 'Text', '.'))
else: self.bindtags((MODE_X, MODE_Y, self, '.'))
def add_mode(self, id, opt=False):
"""
It adds a new mode. The opt argument means whether
it should propagate the event to the internal text widget callbacks.
"""
self.setup[id] = opt
def del_mode(self, id):
"""
It performs the opposite of add_mode.
"""
del self.setup[id]
def hook(self, id, seq, callback):
"""
This method is used to hook a callback to a sequence
specified with its mode. The standard modes are insert and selection.
The insert mode prints the key character on the text area.
"""
MODE_Y = 'mode%s%s' % (self, id)
self.bind_class(MODE_Y, seq, callback, add=True)
def unhook(self, id, seq, callback=None):
"""
It performs the opposite of unhook.
"""
MODE_Y = 'mode%s%s' % (self, id)
self.unbind_class(MODE_Y, seq)
def install(self, *args):
"""
It is like self.hook but accepts
a sequence of (id, seq, callback).
"""
for id, seq, callback in args:
self.hook(id, seq, callback)
def uninstall(self, *args):
"""
Like self.hook but accepts.
(id, seq, callback).
"""
for id, seq, callback in args:
self.unhook(id, seq, callback)
def tag_update(self, name, index0, index1, *args):
"""
It removes a given tag from index0 to index1 and re adds
the tag to the ranges of text delimited in args.
Example:
DATA_X = 'It is black.\n'
DATA_Y = 'It is blue.\n'
text = Text()
text.pack()
text.insert('1.0', DATA_X)
text.insert('2.0', DATA_Y)
text.tag_add('X', '1.0', '1.0 lineend')
text.tag_add('Y', '2.0', '2.0 lineend')
text.tag_config('X', background='black')
text.tag_config('Y', foreground='blue')
text.tag_update(text, 'X', '1.0', 'end', ('2.0', '2.0 lineend'))
It removes the X tag from '1.0' to 'end' then adds
the X tag to the range '2.0' '2.0 lineend'.
"""
self.tag_remove(name, index0, index1)
for indi, indj in args:
self.tag_add(name, indi, indj)
def indref(self, index):
"""
This is a short hand function.
It is used to convert a Text index
into two integers.
Ex:
a, b = area.indref('insert')
Now, a and b can be manipulated
as numbers.
"""
a, b = self.index(index).split('.')
return int(a), int(b)
def setcur(self, line, col):
"""
It is used to set the cursor position at
a given index using line and col.
line is a number which represents
a given line index in the AreaVi instance.
col is a column.
"""
self.mark_set('insert', '%s.%s' % (line, col))
self.see('insert')
def setcurl(self, line):
"""
set cursor line.
It is used to set the cursor position at a given
line. It sets the cursor at line.0 position.
"""
self.mark_set('insert', '%s.%s' % (line, '0'))
self.see('insert')
def indint(self, index):
"""
This method is used
when i can't use self.indref.
It seems self.indref returns
2.10 when the input is 2.34
it happens when the index col
is longer than the actual line col.
"""
a, b = index.split('.')
return int(a), int(b)
def indcol(self):
"""
This is a short hand method for getting
the last col in which the cursor was in.
It is useful when implementing functions to
select pieces of text.
"""
a, b = self.indref(self.last_col)
return int(a), int(b)
def setcol(self, line, col):
"""
It sets the mark used by the arrows
keys and selection state.
"""
self.mark_set(self.last_col, '%s.%s' % (line, col))
def indcur(self):
"""
It returns the actual line, col for the
cursor position. So, the values can be
manipulated with integers.
"""
a, b = self.indref('insert')
return int(a), int(b)
def seecur(self):
"""
Just a shorthand for area.see('insert')
which makes the cursor visible wherever it is in.
"""
self.see('insert')
def inset(self, index):
"""
Just a shorthand for area.mark_set('insert', index)
so we spare some typing.
"""
self.mark_set('insert', index)
def is_end(self):
"""
This function returns True if the cursor is positioned
at the end of the AreaVi instance.
This is useful when implementing other methods.
Like those from visual block selection to avoid
the cursor jumping to odd places when it achieves
the end of the text region.
"""
# I have to use 'end -1l linestart' since it seems the 'end' tag
# corresponds to a one line after the last visible line.
# So last line lineend != 'end'.
return self.compare('insert linestart', '!=', 'end -1l linestart')
def is_start(self):
"""
This function returns True if the cursor is
at the start of the text region. It is on index '1.0'
"""
return self.compare('insert linestart', '!=', '1.0')
def down(self):
"""
It sets the cursor position one line down.
"""
if self.is_end():
# We first check if it is at the end
# so we avoid the cursor jumping at odd positions.
a, b = self.indcol()
c, d = self.indcur()
self.setcur(c + 1, b)
def up(self):
"""
It sets the cursor one line up.
"""
if self.is_start():
a, b = self.indcol()
c, d = self.indcur()
self.setcur(c - 1, b)
def left(self):
"""
It moves the cursor one character left.
"""
self.mark_set('insert', 'insert -1c')
# The mark used by self.down, self.up.
self.mark_set(self.last_col, 'insert')
def right(self):
"""
It moves the cursor one character right.
"""
self.mark_set('insert', 'insert +1c')
# The mark used by self.down, self.up.
self.mark_set(self.last_col, 'insert')
def start_selection(self):
"""
It sets the mark sel_start to the insert position.
So, when sel_up, sel_down, sel_right, sel_left are
called then they will select a region from this mark.
"""
self.mark_set('_sel_start_', 'insert')
def start_block_selection(self):
self.mark_set('_block_sel_start_', 'insert')
def is_add_up(self, index):
"""
It checks whether the selection must be
removed or added.
If it returns True then the selection must be
removed. True means that the '_sel_start_'
mark is positioned above the cursor position.
So, it must remove the selection instead of
adding it.
"""
return self.compare('%s linestart' % index, '<=', 'insert linestart')
def rmsel(self, index0, index1):
"""
This method is a short hand for area.tag_remove('sel', index0, index1)
"""
index2 = self.min(index0, index1)
index3 = self.max(index0, index1)
self.tag_remove('sel', index2, index3)
def addsel(self, index0, index1):
"""
It adds 'sel' to the range (AreaVi.min(index0, index1),
AreaVi.max(index0, index1))
"""
index2 = self.min(index0, index1)
index3 = self.max(index0, index1)
self.tag_add('sel', index2, index3)
def min(self, index0, index1):
"""
It returns the min between index0 and index1.
"""
if self.compare(index0, '<=', index1):
return index0
else:
return index1
def max(self, index0, index1):
"""
It returns the max between index0 and index1.
"""
if self.compare(index0, '<=', index1):
return index1
else:
return index0
def sel_up(self):
"""
It adds 'sel' one line up the 'insert' position
and sets the cursor one line up.
"""
index0 = self.min('_sel_start_', 'insert')
index1 = self.max('_sel_start_', 'insert')
self.rmsel(index0, index1)
self.up()
index0 = self.min('_sel_start_', 'insert')
index1 = self.max('_sel_start_', 'insert')
self.addsel(index0, index1)
def is_add_down(self, index):
"""
It returns True if the cursor is positioned below
the initial mark for selection.
It determins if the selection must be removed or added when
sel_down is called.
"""
return self.compare('%s linestart' % index, '>=', 'insert linestart')
def sel_down(self):
"""
It adds or removes selection one line down.
"""
index0 = self.min('_sel_start_', 'insert')
index1 = self.max('_sel_start_', 'insert')
self.rmsel(index0, index1)
self.down()
index0 = self.min('_sel_start_', 'insert')
index1 = self.max('_sel_start_', 'insert')
self.addsel(index0, index1)
def is_add_right(self, index):
"""
It returns True if the cursor is positioned at the left
of the initial selection mark. It is useful for sel_right method.
"""
return self.compare(index, '>=', 'insert')
def sel_right(self):
"""
It adds or removes selection one character right.
"""
index0 = self.min('_sel_start_', 'insert')
index1 = self.max('_sel_start_', 'insert')
self.rmsel(index0, index1)
self.right()
index0 = self.min('_sel_start_', 'insert')
index1 = self.max('_sel_start_', 'insert')
self.addsel(index0, index1)
def is_add_left(self, index):
"""
It returns True if the cursor is positioned at the right of
the initial mark selection.
"""
return self.compare(index, '<=', 'insert')
def sel_left(self):
"""
It adds or removes selection one character left.
"""
index0 = self.min('_sel_start_', 'insert')
index1 = self.max('_sel_start_', 'insert')
self.rmsel(index0, index1)
self.left()
index0 = self.min('_sel_start_', 'insert')
index1 = self.max('_sel_start_', 'insert')
self.addsel(index0, index1)
def indmsel(self):
"""
It is just a shorthand for getting the last selection mark.
"""
a, b = self.indref('_sel_start_')
return int(a), int(b)
def addblock(self, index0, index1):
"""
It adds block selection from index0 to index1.
"""
index2 = self.min(index0, index1)
index3 = self.max(index0, index1)
a, b = self.indint(index2)
c, d = self.indint(index3)
for ind in xrange(a, c + 1):
e = min(b, d)
f = max(b, d)
self.addsel('%s.%s' % (ind, e), '%s.%s' % (ind, f))
def rmblock(self, index0, index1):
"""
It removes block selection from index0 to index1.
"""
index2 = self.min(index0, index1)
index3 = self.max(index0, index1)
a, b = self.indint(index2)
c, d = self.indint(index3)
for ind in xrange(a, c + 1):
e = min(b, d)
f = max(b, d)
self.rmsel('%s.%s' % (ind, e), '%s.%s' % (ind, f))
def block_down(self):
"""
It adds or removes block selection one line down.
"""
a, b = self.indcol()
c, d = self.indcur()
index = self.index('_block_sel_start_')
self.rmblock(index, '%s.%s' % (c, b))
self.down()
a, b = self.indcol()
c, d = self.indcur()
self.addblock(index, '%s.%s' % (c, b))
def block_up(self):
"""
It adds or removes block selection one line up.
"""
a, b = self.indcol()
c, d = self.indcur()
index = self.index('_block_sel_start_')
self.rmblock(index, '%s.%s' % (c, b))
self.up()
a, b = self.indcol()
c, d = self.indcur()
self.addblock(index, '%s.%s' % (c, b))
def is_line_start(self):
"""
It returns True if the cursor is at the start of the cursor line.
"""
return self.compare('insert', '!=', 'insert linestart')
def block_left(self):
"""
It adds block selection to the left.
"""
a, b = self.indcol()
c, d = self.indcur()
index = self.index('_block_sel_start_')
self.rmblock(index, '%s.%s' % (c, b))
self.left()
a, b = self.indcol()
c, d = self.indcur()
self.addblock(index, '%s.%s' % (c, b))
def is_line_end(self):
"""
It returns True if the cursor is at the end of the cursor line.
"""
return self.compare('insert', '!=', 'insert lineend')
def block_right(self):
"""
It adds a block selection to the right.
"""
a, b = self.indcol()
c, d = self.indcur()
index = self.index('_block_sel_start_')
self.rmblock(index, '%s.%s' % (c, b))
self.right()
a, b = self.indcol()
c, d = self.indcur()
self.addblock(index, '%s.%s' % (c, b))
def clear_selection(self):
"""
It removes 'sel' tag from all ranges.
"""
try:
self.tag_remove('sel', 'sel.first', 'sel.last')
except Exception:
pass
def select_char(self):
"""
it adds 'sel' a char ahead the cursor position.
"""
self.addsel('insert', 'insert +1c')
def unselect_char(self):
"""
It removes 'sel' a char from the cursor position.
"""
self.rmsel('insert', 'insert +1c')
def clchar(self):
"""
It deletes a char from the cursor position.
"""
self.edit_separator()
self.delete('insert', 'insert +1c')
def do_undo(self):
"""
It does undo.
"""
try:
self.edit_undo()
except TclError:
pass
def do_redo(self):
"""
It redoes.
"""
try:
self.edit_redo()
except TclError:
pass
def sel_text_start(self):
"""
It selects all text from insert position to the start position
of the text.
"""
index = self.index('insert')
self.go_text_start()
self.addsel(index, 'insert')
def sel_text_end(self):
"""
It selects all text form the insert position to the end of the text.
"""
index = self.index('insert')
self.go_text_end()
self.addsel(index, 'insert')
def go_text_start(self):
"""
It goes to the first position in the text.
"""
self.mark_set('insert', '1.0')
self.see('insert')
def go_text_end(self):
"""
It goes to the end of the text.
"""
self.mark_set('insert', 'end linestart')
self.see('insert')
def sel_line_start(self):
"""
It adds selection from the insert position to the
start of the line.
"""
index = self.index('insert')
self.go_line_start()
self.addsel(index, 'insert')
def sel_line_end(self):
"""
It selects all text from insert position to the end of the line.
"""
index = self.index('insert')
self.go_line_end()
self.addsel(index, 'insert')
def go_line_start(self):
"""
It goes to the beginning of the cursor position line.
"""
self.mark_set('insert', 'insert linestart')
def go_line_end(self):
"""
It goes to the end of the cursor position line.
"""
self.mark_set('insert', 'insert lineend')
def go_next_word(self):
"""
It puts the cursor on the beginning of the next word.
"""
count = IntVar()
# It seems that regexp doesnt match ^ as starting of the line when nolinestop=True
index = self.search(' [^ ]|^[^ ]', 'insert +1c', regexp=True, nolinestop=False, count=count)
if not index: return
self.mark_set('insert', '%s +%sc' % (index, count.get() - 1))
self.see('insert')
def go_prev_word(self):
"""
It puts the cursor in the beginning of the previous word.
"""
count = IntVar()
# It seems that regexp doesnt match ^ as starting of the line when nolinestop=True
index = self.search(' [^ ]|^[^ ]', 'insert -1c', '1.0', regexp=True, nolinestop=False, count=count, backwards=True)
if not index: return
self.mark_set('insert', '%s +%sc' % (index, count.get() - 1))
self.see('insert')
def go_next_sym(self, chars=('(', ')', '.', '[', ']', '{', '}', ',', ':', ';', "'", '"')):
"""
It puts the cursor on the next occurency of:
('(', ')', '.', '[', ']', '{', '}', ',', ':', ';', '"', "'")
"""
chars = map(lambda ind: '\%s' % ind, chars)
REG = '|'.join(chars)
index = self.search(REG, 'insert +1c', 'end', regexp=True, nolinestop=True)
if not index: return
self.mark_set('insert', index)
self.see('insert')
def go_prev_sym(self, chars=('(', ')', '.', '[', ']', '{', '}', ',', ':', ';', "'", '"')):
"""
It puts the cursor on the previous occurency of:
('(', ')', '.', '[', ']', '{', '}', ',', ':', ';', '"', "'")
"""
chars = map(lambda ind: '\%s' % ind, chars)
REG = '|'.join(chars)
index = self.search(REG, 'insert', '1.0', regexp=True, nolinestop=True, backwards=True)
if not index: return
self.mark_set('insert', index)
self.see('insert')
def cllin(self):
"""
It deletes the cursor position line, makes the cursor visible
and adds a separator to the undo stack.
"""
self.edit_separator()
self.delete('insert linestart', 'insert +1l linestart')
self.see('insert')
def cpsel(self):
"""
It copies to the clip board ranges of text
that are selected and removes the selection.
"""
data = self.tag_get_ranges('sel')
self.clipboard_clear()
self.clipboard_append(data)
self.tag_remove('sel', 'sel.first', 'sel.last')
def cpblock(self):
"""
It copies blocks of text that are selected
with a separator '\n'.
"""
data = self.tag_get_ranges('sel', '\n')
self.clipboard_clear()
self.clipboard_append(data)
self.tag_remove('sel', 'sel.first', 'sel.last')
def ctblock(self):
"""
It cuts blocks of text with a separator '\n'.
"""
data = self.tag_get_ranges('sel', '\n')
self.clipboard_clear()
self.clipboard_append(data)
self.edit_separator()
self.tag_delete_ranges('sel')
def ctsel(self):
"""
It cuts the selected text.
"""
data = self.tag_get_ranges('sel')
self.clipboard_clear()
self.clipboard_append(data)
self.edit_separator()
self.tag_delete_ranges('sel')
def clsel(self):
"""
It deletes all selected text.
"""
self.edit_separator()
self.tag_delete_ranges('sel')
def ptsel(self):
"""
It pastes over the cursor position data from the clipboard
and adds a separator.
"""
data = self.clipboard_get()
self.edit_separator()
self.insert('insert', data)
def ptsel_after(self):
"""
It pastes one line after the cursor position data from clipboard
and adds a separator.
"""
data = self.clipboard_get()
self.edit_separator()
self.insert('insert +1l linestart', data)
def ptsel_before(self):
"""
It pastes data from the cursor position one line before the cursor
position and adds a separator.
"""
data = self.clipboard_get()
self.edit_separator()
self.insert('insert linestart', data)
def select_line(self):
"""
It adds selection to the cursor position line.
"""
self.tag_add('sel', 'insert linestart', 'insert +1l linestart')
def unselect_line(self):
"""
It removes selection from the cursor position line.
"""
self.tag_remove('sel', 'insert linestart', 'insert +1l linestart')
def toggle_line_selection(self):
map = self.tag_contains('sel', 'insert linestart', 'insert +1l linestart')
if map:
self.unselect_line()
else:
self.select_line()
def select_word(self):
"""
It selects a word from the cursor position.
"""
index1 = self.search(' ', 'insert', stopindex='insert linestart', backwards=True)
index2 = self.search(' ', 'insert', stopindex='insert lineend')
self.tag_add('sel', 'insert linestart' if not index1 else '%s +1c' % index1,
'insert lineend' if not index2 else index2)
def scroll_line_up(self):
"""
It scrolls one line up
"""
self.yview(SCROLL, -1, 'units')
is_visible = self.dlineinfo('insert')
if not is_visible:
self.mark_set('insert', 'insert -1l')
def scroll_line_down(self):
"""
It scrolls one line down.
"""
self.yview(SCROLL, 1, 'units')
is_visible = self.dlineinfo('insert')
if not is_visible:
self.mark_set('insert', 'insert +1l')
def scroll_page_down(self):
"""
It goes one page down.
"""
self.yview(SCROLL, 1, 'page')
self.mark_set('insert', '@0,0')
def scroll_page_up(self):
"""
It goes one page up.
"""
self.yview(SCROLL, -1, 'page')
self.mark_set('insert', '@0,0')
def insert_line_down(self):
"""
It inserts one line down from the cursor position.
"""
self.edit_separator()
self.insert('insert +1l linestart', '\n')
self.mark_set('insert', 'insert +1l linestart')
self.see('insert')
def select_all(self):
"""
It selects all text.
"""
self.tag_add('sel', '1.0', 'end')
def insert_line_up(self):
"""
It inserts one line up.
"""
self.edit_separator()
self.insert('insert linestart', '\n')
self.mark_set('insert', 'insert -1l linestart')
self.see('insert')
def shift_sel_right(self, width, char):
"""
"""
srow, scol = self.indref('sel.first')
erow, ecol = self.indref('sel.last')
self.shift_right(srow, erow, width, char)
def shift_sel_left(self, width):
"""
"""
srow, scol = self.indref('sel.first')
erow, ecol = self.indref('sel.last')
self.shift_left(srow, erow, width)
def shift_right(self, srow, erow, width, char):
"""
Given a start row and a end row it shifts
a block of text to the right.
This is specially useful when working with
source code files.
"""
self.edit_separator()
for ind in xrange(srow, erow + 1):
self.insert('%s.0' % ind, width * char)
def shift_left(self, srow, erow, width):
"""
Given a start row and a end row it shifts
a block of text to the left.
This is specially useful when working with
source code files.
"""
self.edit_separator()
for ind in xrange(srow, erow + 1):
self.delete('%s.0' % ind, '%s.%s' % (ind, width))
def tag_find_ranges(self, name, regex, *args, **kwargs):
"""
It returns an interator corresponding to calling AreaVi.find
between the ranges of the tag specified by name.
You shouldn't delete or insert data while performing this operation.
"""
# It should be built on top of nextrange.
map = self.tag_ranges(name)
for indi in range(0, len(map) - 1, 2):
seq = self.find(regex, map[indi], map[indi + 1], *args, **kwargs)
for indj in seq:
yield indj
def tag_replace_ranges(self, name, regex, data, index='1.0', stopindex='end',
*args, **kwargs):
"""
It replaces all occurrences of regex inside a tag ranges
for data.
name - Name of the tag.
regex - The pattern.
data - The data to replace.
args - Arguments given to AreaVi.find.
**kwargs - A dictionary of arguments given to AreaVi.find.
"""
while True:
map = self.tag_nextrange(name, index, stopindex)
if not map: break
index3, index4 = map
index = index4
self.replace_all(regex, data, index3, index4, *args, **kwargs)
def tag_setup(self, theme):
"""
Just a short hand for
theme = {'tag_name': {'background': 'blue'}
for name, kwargs in theme.iteritems():
self.tag_config(name, **kwargs)
self.tag_lower(name)
"""
for name, kwargs in theme.iteritems():
self.tag_config(name, **kwargs)
self.tag_lower(name)
def tag_add_found(self, name, map):
""""
It adds a tag to the match ranges from either AreaVi.find or
AreaVi.tag_find_ranges.
name - The tag to be added.
map - An iterator from AreaVi.find or AreaVi.tag_find_ranges.
"""
for _, index0, index1 in map:
self.tag_add(name, index0, index1)
def split_with_cond(self, regex, cond, *args, **kwargs):
"""
It determines which chunks should be yielded based on cond.
"""
for chk, index0, index1 in self.split(regex, *args, **kwargs):
data = cond(chk, index0, index1)
if data: yield data
def split(self, *args, **kwargs):
"""
It splits the contents of the text widget into chunks based on a regex.
"""
index0 = '1.0'
for chk, index1, index2 in self.find(*args, **kwargs):
if self.compare(index1, '>', index0):
yield(self.get(index0, index1), index0, index1)
yield(chk, index1, index2)
index0 = index2
def find_with_cond(self, regex, cond, *args, **kwargs):
"""
It determines which matches should be yielded.
"""
for chk, index0, index1 in self.find(regex, *args, **kwargs):
data = cond(chk, index0, index1)
if not data: continue
yield(data)
def find_one_by_line(self, regex, index, stopindex='end', exact=None, regexp=True, nocase=None,
elide=None, nolinestop=None):
count = IntVar()
while True:
index = self.search(regex, index, stopindex, exact=exact, nocase=nocase,
nolinestop=nolinestop, regexp=regexp, elide=elide, count=count)
if not index:
break
len = count.get()
tmp = '%s +%sc' % (index, len)
chunk = self.get(index, tmp)
pos0 = self.index(index)
pos1 = self.index('%s +%sc' % (index, len))
index = '%s +1l' % pos0
yield(chunk, pos0, pos1)
def find(self, regex, index, stopindex='end', exact=None, regexp=True, nocase=None,
elide=None, nolinestop=None):
"""
It returns an iterator of matches. It is based on the Text.search method
"""
count = IntVar()
while True:
index = self.search(regex, index, stopindex, exact=exact, nocase=nocase,
nolinestop=nolinestop, regexp=regexp, elide=elide, count=count)
if not index:
break
len = count.get()
tmp = '%s +%sc' % (index, len)
chunk = self.get(index, tmp)
pos0 = self.index(index)
pos1 = self.index('%s +%sc' % (index, len))
index = '%s +1c' % tmp
yield(chunk, pos0, pos1)
def search(self, pattern, index, stopindex=None, forwards=None,
backwards=None, exact=None, regexp=None, nocase=None,
count=None, elide=None, nolinestop=None):
'''Standard search method, but with support for the nolinestop
option which is new in tk 8.5 but not supported by tkinter out
of the box.
'''
args = [self._w, 'search']
if forwards: args.append('-forwards')
if backwards: args.append('-backwards')
if exact: args.append('-exact')
if regexp: args.append('-regexp')
if nocase: args.append('-nocase')
if elide: args.append('-elide')
if nolinestop: args.append("-nolinestop")
if count: args.append('-count'); args.append(count)
if pattern and pattern[0] == '-': args.append('--')
args.append(pattern)
args.append(index)
if stopindex: args.append(stopindex)
return str(self.tk.call(tuple(args)))
def pick_next_up(self, name, regex, index0='insert', stopindex='1.0', exact=None, regexp=True,
nocase=None, elide=None, nolinestop=None):
"""
Find the next match with regex up the cursor.
It sets the cursor at the index of the occurrence.
"""
count = IntVar()
index = self.search(regex, index0, stopindex=stopindex, regexp=regexp, exact=exact,
nocase=nocase, elide=elide, nolinestop=nolinestop, backwards=True, count=count)
if not index: return
index1 = self.index('%s +%sc' % (index, count.get()))
self.tag_add(name, index, index1)
self.mark_set('insert', index)
self.see('insert')
return index, index1
def pick_next_down(self, name, regex, index0='insert', stopindex='end', exact=None, regexp=True,
nocase=None, elide=None, nolinestop=None):
"""
Find the next match with regex down.
It sets the cursor at the index of the occurrence.
"""
count = IntVar()
index = self.search(regex, index0, stopindex=stopindex, regexp=regexp, exact=exact, nocase=nocase,
elide=elide, nolinestop=nolinestop, count=count)
if not index: return
index1 = self.index('%s +%sc' % (index, count.get()))
self.tag_add(name, index, index1)
self.mark_set('insert', index1)
self.see('insert')
return index, index1
def replace(self, regex, data, index=None, stopindex=None, forwards=None,
backwards=None, exact=None, regexp=True, nocase=None, elide=None, nolinestop=None):
"""
It is used to replace occurrences of a given match.
It is possible to use a callback function to return what is replaced
as well.
"""
count = IntVar()
index = self.search(regex, index, stopindex, forwards=forwards, backwards=backwards, exact=exact, nocase=nocase,
nolinestop=nolinestop, regexp=regexp, elide=elide, count=count)
if not index: return
if callable(data): data = data(index, self.index('%s +%sc' % (index, count.get())))
index0 = self.index('%s +%sc' % (index, count.get()))
self.delete(index, index0)
self.insert(index, data)
return index, len(data)
def replace_all(self, regex, data, index='1.0', stopindex='end', exact=None, regexp=True, nocase=None,
elide=None, nolinestop=None):
"""
It is used to replace all occurrences of a given match in a range.
It accepts a callback function that determines what is replaced.
"""
# It is needed because the range will grow
# when data is inserted, the intent is searching
# over a pre defined range.
self.mark_set(self.STOP_REPLACE_INDEX, stopindex)
while True:
map = self.replace(regex, data, index, self.STOP_REPLACE_INDEX, exact=exact, nocase=nocase,
nolinestop=nolinestop, regexp=regexp, elide=elide)
if not map: return
index, size = map
index = self.index('%s +%sc' % (index, size))
def get_paren_search_dir(self, start, end):
"""
"""
char = self.get('insert', 'insert +1c')
if char == start:
return False
elif char == end:
return True
else:
return None
def get_paren_search_sign(self, start, end):
"""
"""
char = self.get('insert', 'insert +1c')
if char == start:
return '+'
elif char == end:
return '-'
else:
return None
def select_case_pair(self, pair, MAX=1500):
"""
"""
index = self.case_pair(MAX, *pair)
if not index: return
min = self.min(index, 'insert')
if self.compare(min, '==', 'insert'): min = '%s +1c' % min
max = self.max(index, 'insert')
if self.compare(max, '==', 'insert'): min = '%s +1c' % min
self.tag_add('sel', min, max)
def case_pair(self, max, start='(', end=')'):
"""
Once this method is called, it returns an index for the next
matching parenthesis or None if the char over the cursor
isn't either '(' or ')'.
"""
dir = self.get_paren_search_dir(start, end)
# If dir is None then there is no match.
if dir == None: return ''
REG = '\%s|\%s' % (start, end)
sign = self.get_paren_search_sign(start, end)
count = 0
# If we are searching fowards we don't need
# to add 1c.
index = 'insert %s' % ('+1c' if dir else '')
size = IntVar(0)
while True:
index = self.search(REG, index = index,
stopindex = 'insert %s%sc' % (sign, max),
count = size,
backwards = dir,
regexp = True)
if not index: return ''
char = self.get(index, '%s +1c' % index)
count = count + (1 if char == start else -1)
if not count:
return index
# When we are searching backwards we don't need
# to set a character back because index will point
# to the start of the match.
index = '%s %s' % (index, '+1c' if not dir else '')
def clear_data(self):
"""
It clears all text inside an AreaVi instance.
"""
import os
self.delete('1.0', 'end')
self.filename = os.path.abspath(self.default_filename)
self.event_generate('<<ClearData>>')
def load_data(self, filename):
"""
It dumps all text from a file into an AreaVi instance.
filename - Name of the file.
"""
import os
filename = os.path.abspath(filename)
self.delete('1.0', 'end')
fd = open(filename, 'r')
data = fd.read()
fd.close()
self.insert('1.0', data)
self.filename = filename
self.event_generate('<<LoadData>>')
type, _ = mimetypes.guess_type(self.filename)
self.event_generate('<<Load-%s>>' % type)
def save_data(self):
"""
It saves the actual text content in the current file.
"""
data = self.get('1.0', 'end')
data = data.encode('utf-8')
fd = open(self.filename, 'w')
fd.write(data)
fd.close()
self.event_generate('<<SaveData>>')
type, _ = mimetypes.guess_type(self.filename)
self.event_generate('<<Save-%s>>' % type)
def save_data_as(self, filename):
"""
It saves the content of the given AreaVi instance into
a file whose name is specified in filename.
filename - Name of the file to save the data.
"""
self.filename = filename
self.save_data()
def tag_contains(self, name, index0, index1):
"""
It returns True if there is a tag range for
a tag name which contains index0 and index1.
The algorithm consists of:
It calls text.tag_ranges(name)
which returns a list of indexs
that the tag is attached to.
Then it goes through the list of indexs
checking which of the indexes contains index0 and index1.
"""
ls = self.tag_ranges(name)
for ind in xrange(0, len(ls) - 1, 2):
index2 = ls[ind].string
index3 = ls[ind + 1].string
r1 = self.compare(index2, '<=', index0)
r2 = self.compare(index3, '>=', index1)
if r1 and r2: return index2, index3
return ()
def tag_sub_ranges(self, name, data, index0='1.0', index1='end'):
"""
It replaces ranges of text delimited by tag between index0
and index1 for data.
"""
while 1:
map = self.tag_nextrange(name, index0, index1)
if not map: break
index3, index4 = map
self.delete(index3, index4)
self.insert(index3, data)
def tag_delete_ranges(self, name, *args):
"""
It deletes ranges of text that are mapped to tag name.
"""
self.tag_sub_ranges(name, '', *args)
def tag_get_ranges(self, name, sep=''):
"""
It should be built from get_slices_data
"""
data = ''
for ind in self.tag_get_data(name):
data = data + ind + sep
return data
def tag_get_data(self, name):
"""
It returns an iterator with the text inside tag name.
"""
try:
map = self.tag_ranges(name)
except Exception:
pass
else:
for ind in xrange(0, len(map) - 1, 2):
data = self.get(map[ind], map[ind + 1])
yield(data)
def mark_set_next(self, tag, mark):
"""
"""
next_tag = self.tag_nextrange(tag, '%s +1c' % mark)
if next_tag:
self.mark_set(mark, next_tag[0])
def mark_set_prev(self, tag, mark):
"""
"""
prev_tag = self.tag_prevrange(tag, mark)
if prev_tag:
self.mark_set(mark, prev_tag[0])
def tag_prev_occur(self, tag_names, index0, index1, default):
for ind in tag_names:
pos = self.tag_prevrange(ind, index0, index1)
if pos: return pos[1]
return default
def tag_next_occur(self, tag_names, index0, index1, default):
for ind in tag_names:
pos = self.tag_nextrange(ind, index0, index1)
if pos: return pos[0]
return default
| kk9599/vy | vyapp/areavi.py | Python | mit | 42,534 |
from __future__ import print_function, absolute_import
from collections import OrderedDict
import json
from functools import partial
import operator
import os
import traceback
import logging
from ...vendor import Qt
from pymel.core import Callback, cmds, hide, scriptJob, select, selected, setParent, PyNode, \
showHidden, warning, xform, \
button, columnLayout, deleteUI, textFieldGrp
import pdil
from ... import nodeApi
from . import card as fossil_card # Hack to not deal with the fact that "card" is a var used all over, thusly shadowing this import
from . import cardparams
from ._lib2 import controllerShape
from ._core import config
from ._core import find
from ._core import skinning
from ._lib import proxyskel
from ._lib import tpose
from . import updater
from . import util
from .ui import controllerEdit
from .ui import _visGroup
from .ui import spacesTab
from .ui import startingTab
log = logging.getLogger(__name__)
RigToolUI = pdil.ui.getQtUIClass( os.path.dirname(__file__) + '/ui/rigToolUI.ui', 'pdil.tool.fossil.ui.rigToolUI')
def matchOrient():
if len(selected()) < 2:
return
src = selected()[0]
rot = xform(src, q=True, ws=True, ro=True)
for dest in selected()[1:]:
xform( dest, ws=True, ro=rot )
def customUp():
if not selected():
return
arrow = selected()[0]
if not arrow.name().count('arrow'):
arrow = None
if not util.selectedJoints():
warning('No BPJoints were selected')
return
for jnt in util.selectedJoints():
fossil_card.customUp(jnt, arrow)
def complexJson(s):
if not s:
return '{\n}'
output = ['{']
for side, info in s.items():
output.append( '"%s": {' % (side) )
for component, data in info.items():
output.append( ' "%s": [' % (component) )
for d in data:
output.append( ' %s,' % json.dumps(d) )
output[-1] = output[-1][:-1] # strip off trailing comma
output.append(' ],')
output[-1] = output[-1][:-1] # strip off trailing comma
output.append('},')
output[-1] = output[-1][:-1] # strip off trailing comma
output.append('}')
return '\n'.join(output)
def simpleJson(s):
if not s:
return '{\n}'
output = ['{']
for side, info in s.items():
output.append( '"%s": {' % (side) )
for component, data in info.items():
output.append( ' "%s": %s,' % (component, json.dumps(data)) )
output[-1] = output[-1][:-1] # strip off trailing comma
output.append('},')
output[-1] = output[-1][:-1] # strip off trailing comma
output.append('}')
return '\n'.join(output)
class RigTool(Qt.QtWidgets.QMainWindow):
_inst = None
FOSSIL_START_TAB = 'Fossil_RigTool_StartTab'
FOSSIL_SPACE_TAB = 'Fossil_RigTool_SpacedTab'
@staticmethod
@pdil.alt.name( 'Rig Tool' )
def run():
return RigTool()
def connectorDisplayToggle(self):
if self.ui.actionConnectors.isChecked():
showHidden( fossil_card.getConnectors() )
else:
hide( fossil_card.getConnectors() )
def handleDisplayToggle(self):
val = self.ui.actionHandles.isChecked()
#cards = ls( '*.skeletonInfo', o=1 )
for card in find.blueprintCards():
for joint in card.joints:
joint.displayHandle.set(val)
def orientsToggle(self):
if self.ui.actionCard_Orients_2.isChecked():
showHidden( fossil_card.getArrows() )
else:
hide( fossil_card.getArrows() )
def __init__(self, *args, **kwargs):
self.settings = pdil.ui.Settings( 'Fossil GUI Settings',
{
'spineCount': 5,
'fingerCount': 4,
'thumb': True,
'spineOrient': 'Vertical',
'legType': 'Human',
'currentTabIndex': 1, # 1-base AFAIK THE ONLY ONE ACTUALLY NEEDED
'panels': [75, 75, 25, 100, 75, 25],
'rebuildMode': 'Use Current Shapes',
'closedControlFrame': False,
'closeDebugFrame': True,
'showIndividualRestore': False,
'showRigStateDebug': False,
'runUpdaters': True,
})
objectName = 'Rig_Tool'
# Remove any existing windows first
pdil.ui.deleteByName(objectName)
super(RigTool, self).__init__(pdil.ui.mayaMainWindow())
# Not sure how else to get window's scale factor for high dpi displays
self.scaleFactor = self.font().pixelSize() / 11.0
self.ui = RigToolUI()
self.ui.setupUi(self)
self.setObjectName(objectName)
self.setWindowTitle('Fossil')
# Menu callbacks
self.ui.actionReconnect_Real_Joints.triggered.connect( Callback(fossil_card.reconnectRealBones) )
self.ui.actionMatch_Selected_Orients.triggered.connect( Callback(matchOrient) )
self.ui.actionCard_Orients_2.triggered.connect( Callback(self.orientsToggle) )
# &&& I think this isn't useful but I'm going to wait a while to be sure.
#self.ui.actionConnectors.triggered.connect( Callback(self.connectorDisplayToggle) )
self.ui.menuVisibility.removeAction(self.ui.actionConnectors)
self.ui.actionHandles.triggered.connect( Callback(self.handleDisplayToggle) )
self.ui.actionNaming_Rules.triggered.connect( Callback(nameRulesWindow) )
self.ui.actionShow_Individual_Restores.setChecked( self.settings['showIndividualRestore'] )
self.ui.actionShow_Card_Rig_State.setChecked( self.settings['showRigStateDebug'] )
self.ui.actionShow_Individual_Restores.triggered.connect( Callback(self.restoreToggle) )
self.ui.actionShow_Card_Rig_State.triggered.connect( Callback(self.rigStateToggle) )
# Callback setup
self.ui.makeCardBtn.clicked.connect(self.makeCard)
self.ui.selectAllBtn.clicked.connect(self.selectAll)
self.ui.buildBonesBtn.clicked.connect(Callback(fossil_card.buildBones))
self.ui.deleteBonesBtn.clicked.connect( Callback(fossil_card.deleteBones) )
self.ui.buildRigBtn.clicked.connect( fossil_card.buildRig )
self.ui.deleteRigBtn.clicked.connect( partial(util.runOnEach, operator.methodcaller('removeRig'), 'Delting Rig') )
self.ui.saveModsBtn.clicked.connect( partial(util.runOnEach, operator.methodcaller('saveState'), 'Saving State') )
self.ui.restoreModsBtn.clicked.connect( partial(util.runOnEach, operator.methodcaller('restoreState'), 'Restoring State') )
self.ui.duplicateCardBtn.clicked.connect(self.duplicateCard)
self.ui.mergeCardBtn.clicked.connect(self.mergeCard)
self.ui.splitCardBtn.clicked.connect(self.splitCard)
self.ui.addCardIkButton.clicked.connect( Callback(self.addCardIk) )
self.ui.remCardIkButton.clicked.connect( Callback(self.removeCardIk) )
self.ui.insertJointBtn.clicked.connect(self.insertJoint)
self.ui.addTipBtn.clicked.connect(partial(self.insertJoint, True))
self.ui.deleteJointBtn.clicked.connect(self.deleteJoint)
self.ui.rebuildProxyBtn.clicked.connect( proxyskel.rebuildConnectorProxy )
self.ui.customUpBtn.clicked.connect(Callback(customUp))
self.ui.updateRigState.clicked.connect(self.updateField)
self.ui.space_save.clicked.connect( partial(self.targeted_save, 'spaces') )
self.ui.space_load.clicked.connect( partial(self.targeted_load, 'spaces') )
self.ui.vis_save.clicked.connect( partial(self.targeted_save, 'visGroup') )
self.ui.vis_load.clicked.connect( partial(self.targeted_load, 'visGroup') )
self.ui.shape_save.clicked.connect( partial(self.targeted_save, 'shape') )
self.ui.shape_local_load.clicked.connect( partial(self.targeted_load, 'shape_local') )
self.ui.shape_world_load.clicked.connect( partial(self.targeted_load, 'shape_world') )
self.ui.constraints_save.clicked.connect( partial(self.targeted_save, 'constraints') )
self.ui.constraints_load.clicked.connect( partial(self.targeted_load, 'constraints') )
self.ui.connections_save.clicked.connect( partial(self.targeted_save, 'connections') )
self.ui.connections_load.clicked.connect( partial(self.targeted_load, 'connections') )
self.ui.driven_save.clicked.connect( partial(self.targeted_save, 'setDriven') )
self.ui.driven_load.clicked.connect( partial(self.targeted_load, 'setDriven') )
self.ui.custom_save.clicked.connect( partial(self.targeted_save, 'customAttrs') )
self.ui.custom_load.clicked.connect( partial(self.targeted_load, 'customAttrs') )
self.ui.locked_save.clicked.connect( partial(self.targeted_save, 'lockedAttrs') )
self.ui.locked_load.clicked.connect( partial(self.targeted_load, 'lockedAttrs') )
def restore(key, restoreFunc):
print('Restoring', key)
[ c._restoreData(restoreFunc, c.rigState[key]) for c in util.selectedCards() ]
# Individual restore commands
for niceName, (harvestFunc, restoreFunc) in nodeApi.Card.toSave.items():
button = getattr(self.ui, niceName + 'Restore')
button.clicked.connect( partial(restore, niceName, restoreFunc))
'''
self.restoreShapes(objectSpace=shapesInObjectSpace)
'''
# Start Group Tab
self.startTabLayout = Qt.QtWidgets.QVBoxLayout(self.ui.tab)
self.startTabLayout.setObjectName( self.FOSSIL_START_TAB )
setParent( self.FOSSIL_START_TAB )
self.startTab = startingTab.StartLayout( self )
# Vis Group Tab
self.visGroupProxy = _visGroup.VisGroupLayout(self.ui)
# Space Tab
self.spaceTab = spacesTab.SpaceTab(self.ui)
# Card Lister setup
self.updateId = scriptJob( e=('SelectionChanged', pdil.alt.Callback(self.selectionChanged)) )
self.ui.cardLister.setup(self.scaleFactor)
self.ui.cardLister.itemSelectionChanged.connect(self.cardListerSelection)
self.ui.cardLister.cardListerRefresh(force=True)
self.ui.cardLister.updateHighlight()
self.ui.jointLister.setup(self.scaleFactor)
self.ui.cardLister.namesChanged.connect( self.ui.jointLister.jointListerRefresh )
self.ui.restoreContainer.setVisible( self.settings['showIndividualRestore'] )
self.ui.rigStateContainer.setVisible( self.settings['showRigStateDebug'] )
pdil.pubsub.subscribe('fossil rig type changed', self.forceCardParams)
# Controller Edit
self.shapeEditor = controllerEdit.ShapeEditor(self)
#-
self.show()
pdil.pubsub.subscribe(pdil.pubsub.Event.MAYA_DAG_OBJECT_CREATED, self.ui.cardLister.newObjMade)
self.uiActive = True
self._uiActiveStack = []
self.ui.tabWidget.setCurrentIndex(self.settings['currentTabIndex'])
if 'geometry' in self.settings:
pdil.ui.setGeometry( self, self.settings['geometry'] )
pdil.pubsub.publish('fossil rig type changed')
selectedCard = util.selectedCardsSoft(single=True)
self.ui.jointLister.jointListerRefresh(selectedCard)
self.ui.jointLister.refreshHighlight()
if self.settings['runUpdaters']:
self.runUpdatersId = scriptJob( e=('SceneOpened', updater.checkAll) )
updater.checkAll()
def forceCardParams(self):
# Called rig type changes to update the params
selectedCard = util.selectedCardsSoft(single=True)
cardparams.update(self, selectedCard, force=True)
@staticmethod
def targeted_save(key):
print( 'Saving', key )
if key.startswith('shape'):
for card in util.selectedCards():
card.saveShapes()
else:
harvestFunc, restoreFunc = nodeApi.Card.toSave[key]
for card in util.selectedCards():
card._saveData(harvestFunc)
@staticmethod
def targeted_load(key):
print( 'Loading', key )
if key.startswith('shape'):
if 'world' in key:
for card in util.selectedCards():
card.restoreShapes(objectSpace=False)
else:
for card in util.selectedCards():
card.restoreShapes()
else:
harvestFunc, restoreFunc = nodeApi.Card.toSave[key]
for card in util.selectedCards():
card._restoreData(restoreFunc, card.rigState[key])
"""
@staticmethod
def deleteBones(cards=None):
global _meshStorage
if not cards:
cards = util.selectedCards()
# &&& NEED TO COLLECT CHILDREN JOINTS THAT WILL GET DELETED
'''
joints = []
for card in cards:
joints += card.getOutputJoints()
joints = cmds.ls(joints) # Quickly determine if any joints actually exists
if joints:
meshes = pdil.weights.findBoundMeshes(joints)
storeMeshes(meshes)
'''
#meshes = getBoundMeshes(cards)
#if meshes:
# storeMeshes(meshes)
skinning.cacheWeights(cards, _meshStorage)
with pdil.ui.progressWin(title='Deleting Bones', max=len(cards)) as prog:
for card in cards:
card.removeBones()
prog.update()
"""
def noUiUpdate(self):
self._uiActiveStack.append( self.uiActive )
self.uiActive = False
yield
self.uiActive = self._uiActiveStack.pop()
self.updateId = scriptJob( e=('SelectionChanged', pdil.alt.Callback(self.selectionChanged)) )
def restoreToggle(self):
self.settings['showIndividualRestore'] = not self.settings['showIndividualRestore']
self.ui.restoreContainer.setVisible( self.settings['showIndividualRestore'] )
def rigStateToggle(self):
self.settings['showRigStateDebug'] = not self.settings['showRigStateDebug']
self.ui.rigStateContainer.setVisible( self.settings['showRigStateDebug'] )
def selectAll(self):
select( find.blueprintCards() )
def closeEvent(self, event):
#print('------ - - - i am closing')
pdil.pubsub.unsubscribe(pdil.pubsub.Event.MAYA_DAG_OBJECT_CREATED, self.ui.cardLister.newObjMade)
try:
if self.updateId is not None:
jid = self.updateId
self.updateId = None
scriptJob(kill=jid)
self.spaceTab.close()
self.settings['geometry'] = pdil.ui.getGeometry(self)
self.settings['currentTabIndex'] = self.ui.tabWidget.currentIndex()
if self.runUpdatersId is not None:
jid = self.runUpdatersId
self.updateId = None
scriptJob(kill=jid)
except Exception:
pass
# Might be overkill but I'm trying to prevent new gui parenting to the old widgets
#self.spaceTabLayout.setObjectName( 'delete_me2' )
# self.shapeEditor.curveColorLayout.setObjectName( 'delete_me3' )
# self.shapeEditor.surfaceColorLayout.setObjectName( 'delete_me4' )
self.startTabLayout.setObjectName('delete_me5')
event.accept()
formatter = {
'visGroup': simpleJson,
'connections': complexJson,
'setDriven': complexJson,
'customAttrs': complexJson,
'spaces': complexJson,
'constraints': complexJson,
'lockedAttrs': simpleJson,
}
def selectionChanged(self):
self.ui.cardLister.updateHighlight()
selectedCard = util.selectedCardsSoft(single=True)
cardparams.update(self, selectedCard)
self.ui.jointLister.jointListerRefresh(selectedCard)
self.ui.jointLister.refreshHighlight()
self.shapeEditor.refresh()
if self.ui.rigStateContainer.isVisible():
if selectedCard:
for key, data in selectedCard.rigState.items():
getattr(self.ui, key + 'Field').setText( self.formatter[key](data) )
allInfo = ''
for _node, side, type in selectedCard._outputs():
shapeInfo = pdil.factory.getStringAttr( selectedCard, 'outputShape' + side + type)
if shapeInfo:
allInfo += pdil.text.asciiDecompress(shapeInfo).decode('utf-8') + '\n\n'
self.ui.shapesField.setText( allInfo )
else:
for key in nodeApi.Card.toSave:
getattr(self.ui, key + 'Field').setText( '' )
self.ui.shapesField.setText('')
def updateField(self):
# Get the tab title
label = self.ui.rigStateTab.tabText( self.ui.rigStateTab.currentIndex() )
label = (label[0].lower() + label[1:]).replace(' ', '')
print(label)
text = self.ui.rigStateTab.currentWidget().children()[-1].toPlainText()
try:
data = json.loads(text, object_pairs_hook=OrderedDict)
except Exception:
pdil.ui.notify(m='Invalid json, see script editor for details')
print( traceback.format_exc() )
return
selectedCard = util.selectedCardsSoft(single=True)
rigState = selectedCard.rigState
rigState[label] = data
selectedCard.rigState = rigState
def cardListerSelection(self):
if self.ui.cardLister.uiActive:
cards = [item.card for item in self.ui.cardLister.selectedItems()]
select(cards)
def makeCard(self):
'''
Make a new card and child it if a BPJoint is selected.
.. todo::
I think, when adding a chain, if the parent doesn't have an orient target
already, give it its existing child. Of course this won't quite work
for the pelvis but whatever.
'''
try:
radius = 1
targetParent = util.selectedJoints()[0] if util.selectedJoints() else None
if not targetParent and selected():
# Quick hack for if the annotation is selected instead of the
# handle. This is really just a pain and I should link the
# Annotation to the real joint.
try:
intendedTarget = selected()[0].t.listConnections()[0].output3D.listConnections()[0]
if intendedTarget.__class__.__name__ == 'BPJoint':
targetParent = intendedTarget
except Exception:
pass
count = self.ui.jointCount.value()
name = str(self.ui.cardJointNames.text())
# Auto repeat the name if only one was given
if len(name.split()) == 1 and count > 1 and name[-1] != '*':
name += '*'
try:
head, repeat, tail = util.parse(name)
except Exception:
raise Exception('Invalid characters given')
if count <= 0:
raise Exception( 'You must specify at least one joint!' )
namedCount = len(head) + len(tail) + (1 if repeat else 0)
print( namedCount )
if count < namedCount:
raise Exception( 'Not enough joints exist to take all of the given names' )
if count > namedCount and not repeat:
raise Exception( 'No name was specified as repeating and too many joints were given.' )
#card = skeletonTool.pdil.Card( jointCount=count, name=name, rigInfo=None, size=(4, 6) )
newCard = fossil_card.makeCard(jointCount=count, jointNames=name, rigInfo=None, size=(4, 6) )
if targetParent:
fossil_card.moveTo( newCard, targetParent )
#skeletonTool.proxyskel.pointer( targetParent, newCard.start() )
newCard.start().setBPParent(targetParent)
radius = targetParent.radius.get()
else:
proxyskel.makeProxy(newCard.start())
newCard.start().proxy.setParent( proxyskel.getProxyGroup() )
for j in newCard.joints:
j.radius.set(radius)
j.proxy.radius.set(radius)
select( newCard )
except Exception as ex:
print( traceback.format_exc() )
m = str(ex) + '''\n
All names must be valid as Maya names.
Optionally one may end with a '*' signifying it repeats.
Ex: Chest Neck* Head HeadTip
Would only be valid if the card had at least 4 joints, any above
that would increase the Neck: Chest Neck01 Neck02 .. Neck<N> Head HeadTip
Repeating can be at the start or end or no repeats at all, as long as the numbers make sense.
'''
pdil.ui.notify(t='Error', m=m)
raise
#-- Joints and Cards ------------------------------------------------------
def insertJoint(self, tip=False):
sel = util.selectedJoints()
if not sel:
warning('You must select the blueprint joint you want to insert after.')
return
children = sel[0].proxyChildren[:]
card = sel[0].card
newJoint = card.insertChild(sel[0])
if tip:
rigData = card.rigData
names = rigData.get('nameInfo', {})
if names.get('tail'):
names['tail'].append( names['tail'][-1] + 'Tip' )
else:
names['tail'] = ['Tip']
card.rigData = rigData
newJoint.isHelper = True
# Repoint the children back to the selected joint since the tip is for orienting
for child in children:
proxyskel.pointer(sel[0], child)
self.ui.cardLister.updateNames(card)
select( newJoint )
pdil.pubsub.publish('fossil joint added')
def deleteJoint(self):
sel = util.selectedJoints()
if not sel:
return
sel[0].card.deleteJoint(sel[0])
select(cl=True)
def duplicateCard(self):
'''
Prompts, if possible, for a new name.
.. todo:: See if it's possible to use difflib for more elaborate name
matching.
'''
unableToRename = []
dups = []
sources = {}
for card in util.selectedCards():
d = fossil_card.duplicateCard(card)
sources[card] = d
dups.append(d)
names = d.rigData.get('nameInfo', {})
if not names:
names['repeat'] = 'DUP'
else:
if 'head' in names['head']:
for i, name in enumerate(names['head']):
names['head'][i] = name + '_DUP'
if 'repeat' in names['repeat']:
names['repeat'] = name + '_DUP'
if 'tail' in names['tail']:
for i, name in enumerate(names['tail']):
names['tail'][i] = name + '_DUP'
rigData = d.rigData
rigData['nameInfo'] = names
d.rigData = rigData
for src, newCard in zip(sources, dups):
if src.parentCard:
if src.parentCard in sources:
index = src.parentCard.joints.index( src.parentCardJoint )
newParent = sources[src.parentCard].joints[index]
proxyskel.pointer( newParent, newCard.start())
if unableToRename:
pdil.ui.notify( t='Unable to rename',
m="{0} were unable to find a common element to rename, you must do this manually".format( '\n'.join(unableToRename)) )
select(unableToRename)
else:
select(dups)
def mergeCard(self):
sel = util.selectedCards()
if len(sel) != 2:
pdil.ui.notify(m='You can only merge two cards at a time, please select 2 cards')
return
if sel[0].parentCard == sel[1]:
sel[1].merge(sel[0])
elif sel[1].parentCard == sel[0]:
sel[0].merge(sel[1])
else:
pdil.ui.notify(m='You can only merge cards that are related to eachother')
return
def splitCard(self):
j = util.selectedJoints()
if j:
fossil_card.splitCard(j[0])
def addCardIk(self):
fossil_card.cardIk( selected()[0] )
def removeCardIk(self):
fossil_card.removeCardIk( selected()[0] )
"""
def accessoryFixup(newJoints, card):
''' Place the topmost joints in a separate group so they aren't exported.
'''
if not newJoints: # CtrlGroup doesn't make joints so just leave
return
newJoints = set(newJoints)
if card.rigData.get('accessory'):
# Freeform and mirrored joints have several the need parent fixup
for jnt in newJoints:
parent = jnt.getParent()
if parent not in newJoints:
jnt.setParent( node.accessoryGroup() )
jnt.addAttr('fossilAccessoryInfo', dt='string')
jnt.fossilAccessoryInfo.set( json.dumps( {'parent': parent.longName()} ) )
"""
def nameRulesWindow():
with pdil.ui.singleWindow('nameRulesWindow', t='Choose what is displayed to indicate the side of the joints and controllers.') as win:
with columnLayout(adj=True):
jl = textFieldGrp('jointLeft', l='Joint Left Side', tx=config._settings['joint_left'] )
jr = textFieldGrp('jointRight', l='Joint Right Side', tx=config._settings['joint_right'] )
cl = textFieldGrp('controlLeft', l='Control Left Side', tx=config._settings['control_left'] )
cr = textFieldGrp('controlRight', l='Control Right Side', tx=config._settings['control_right'] )
root = textFieldGrp('root', l='Root Joint Name', tx=config._settings['root_name'] )
prefixField = textFieldGrp('jointPrefix', l='Joint Prefix', tx=config._settings['joint_prefix'] )
def setNames():
jlText = jl.getText().strip()
jrText = jr.getText().strip()
clText = cl.getText().strip()
crText = cr.getText().strip()
rootName = root.getText().strip()
if jlText == jrText or clText == crText:
pdil.ui.notify(m='The left and right sides must be different\n(but the control and joint text for the same side can be the same)')
return
if not clText or not crText or not jlText or not jrText or not rootName:
pdil.ui.notify(m='You cannot leave any side empty and root must have a name')
return
config._settings['joint_left'] = jlText
config._settings['joint_right'] = jrText
config._settings['control_left'] = clText
config._settings['control_right'] = crText
config.JOINT_SIDE_CODE_MAP['left'] = jlText
config.JOINT_SIDE_CODE_MAP['right'] = jrText
config.CONTROL_SIDE_CODE_MAP['left'] = clText
config.CONTROL_SIDE_CODE_MAP['right'] = crText
config._settings['root_name'] = rootName
config._settings['joint_prefix'] = prefixField.getText().strip()
deleteUI(win)
button(l='Apply', c=Callback(setNames))
return win, setNames
def getBoundMeshes(cards=None):
''' Returns the meshes bound to the joints made by the rig.
Defaults to using all cards but specific cards can be specified.
'''
if not cards:
cards = find.blueprintCards()
allJoints = []
for card in cards:
for joint in card.joints:
if joint.real:
allJoints.append(joint.real)
if joint.realMirror:
allJoints.append(joint.realMirror)
meshes = pdil.weights.findBoundMeshes(allJoints)
return meshes
def fullRebuild(weights=None):
''' Detached any bound meshes and rebuild everything, including the tpose if it exists.
'''
cards = find.blueprintCards()
meshStorage = {}
with pdil.ui.progressWin(title='Full Rebuild', max=len(cards) * 4 + 9 ) as pr:
pr.update(status='Searching for bound meshes')
if not weights:
meshes = getBoundMeshes(cards)
if meshes:
pr.update(status='Storing weights')
#storeMeshes(meshes)
skinning.cacheWeights(cards, meshStorage)
pr.update(status='Saving states')
for card in cards:
pr.update()
card.saveState()
pr.update(status='Removing Rig')
for card in cards:
pr.update()
card.removeRig()
pr.update(status='Removing Bones')
for card in cards:
pr.update()
card.removeBones()
#
reposers = tpose.getReposeRoots()
if reposers:
#delete(reposers)
pr.update(status='New reposer')
tpose.updateReposers(cards)
pr.update(status='Run adjusters')
tpose.runAdjusters()
pr.update(status='Build Bones')
fossil_card.buildBones(cards)
pr.update(status='Build Rig')
fossil_card.buildRig(cards)
pr.update(status='Restore State')
for card in cards:
pr.update()
card.restoreState()
if reposers:
tpose.goToBindPose()
if not weights:
skinning.cacheWeights(cards, meshStorage)
else:
for obj, data in weights.items():
obj = PyNode(obj)
pdil.weights.apply(obj, data['weight'])
def fitControlsToMesh(cards, meshes):
''' Scale all the controls to be slightly larger than the nearest mesh portion.
'''
cmds_xform = cmds.xform # Make the fastest version possible
allPositions = []
for mesh in meshes:
# String queries are super fast, about 7x faster than PyMel
vertCmd = str(mesh) + '.vtx[{}]'
allPositions += [cmds_xform( vertCmd.format(i), q=1, t=1, ws=1 ) for i in range(len(mesh.vtx))]
for card in cards:
for ctrl, side, _type in card.getMainControls():
radius = controllerShape.determineRadius(allPositions, pdil.dagObj.getPos(ctrl))
currentRadius = controllerShape.getControllerRadius(ctrl)
controllerShape.scaleAllCVs(ctrl, radius / currentRadius )
for key, subCtrl in ctrl.subControl.items():
radius = controllerShape.determineRadius(allPositions, pdil.dagObj.getPos(subCtrl))
currentRadius = controllerShape.getControllerRadius(subCtrl)
controllerShape.scaleAllCVs(subCtrl, radius / currentRadius)
| patcorwin/fossil | pdil/tool/fossil/main.py | Python | bsd-3-clause | 32,622 |
#!/usr/bin/env python
import subprocess
import praw
from hashlib import sha1
from flask import Flask
from flask import Response
from flask import request
from cStringIO import StringIO
from base64 import b64encode
from base64 import b64decode
from ConfigParser import ConfigParser
import OAuth2Util
import os
import markdown
import bleach
# encoding=utf8
import sys
from participantCollection import ParticipantCollection
reload(sys)
sys.setdefaultencoding('utf8')
# Edit Me!
# Each day after you post a signup post, copy its 6-character ID to this array.
signupPageSubmissionIds = [ '7zrrj1', '7zxkpq', '8055hn', '80ddrf', '80nbm1', '80waq3' ]
flaskport = 8993
app = Flask(__name__)
app.debug = True
commentHashesAndComments = {}
def loginAndReturnRedditSession():
config = ConfigParser()
config.read("../reddit-password-credentials.cfg")
user = config.get("Reddit", "user")
password = config.get("Reddit", "password")
# TODO: password auth is going away, and we will soon need to do oauth.
redditSession = praw.Reddit(user_agent='Test Script by /u/foobarbazblarg')
redditSession.login(user, password, disable_warning=True)
# submissions = redditSession.get_subreddit('pornfree').get_hot(limit=5)
# print [str(x) for x in submissions]
return redditSession
def loginOAuthAndReturnRedditSession():
redditSession = praw.Reddit(user_agent='Test Script by /u/foobarbazblarg')
# New version of praw does not require explicit use of the OAuth2Util object. Presumably because reddit now REQUIRES oauth.
# o = OAuth2Util.OAuth2Util(redditSession, print_log=True, configfile="../reddit-oauth-credentials.cfg")
# TODO: Testing comment of refresh. We authenticate fresh every time, so presumably no need to do o.refresh().
# o.refresh(force=True)
return redditSession
def getSubmissionsForRedditSession(redditSession):
# submissions = [redditSession.get_submission(submission_id=submissionId) for submissionId in signupPageSubmissionIds]
submissions = [redditSession.submission(id=submissionId) for submissionId in signupPageSubmissionIds]
for submission in submissions:
submission.comments.replace_more(limit=None)
# submission.replace_more_comments(limit=None, threshold=0)
return submissions
def getCommentsForSubmissions(submissions):
comments = []
for submission in submissions:
commentForest = submission.comments
comments += [comment for comment in commentForest.list() if comment.__class__ == praw.models.Comment]
return comments
def retireCommentHash(commentHash):
with open("retiredcommenthashes.txt", "a") as commentHashFile:
commentHashFile.write(commentHash + '\n')
def retiredCommentHashes():
with open("retiredcommenthashes.txt", "r") as commentHashFile:
# return commentHashFile.readlines()
return commentHashFile.read().splitlines()
@app.route('/moderatesignups.html')
def moderatesignups():
global commentHashesAndComments
commentHashesAndComments = {}
stringio = StringIO()
stringio.write('<html>\n<head>\n</head>\n\n')
# redditSession = loginAndReturnRedditSession()
redditSession = loginOAuthAndReturnRedditSession()
submissions = getSubmissionsForRedditSession(redditSession)
flat_comments = getCommentsForSubmissions(submissions)
retiredHashes = retiredCommentHashes()
i = 1
stringio.write('<iframe name="invisibleiframe" style="display:none;"></iframe>\n')
stringio.write("<h3>")
stringio.write(os.getcwd())
stringio.write("<br>\n")
for submission in submissions:
stringio.write(submission.title)
stringio.write("<br>\n")
stringio.write("</h3>\n\n")
stringio.write('<form action="copydisplayduringsignuptoclipboard.html" method="post" target="invisibleiframe">')
stringio.write('<input type="submit" value="Copy display-during-signup.py stdout to clipboard">')
stringio.write('</form>')
for comment in flat_comments:
# print comment.is_root
# print comment.score
i += 1
commentHash = sha1()
commentHash.update(comment.fullname)
commentHash.update(comment.body.encode('utf-8'))
commentHash = commentHash.hexdigest()
if commentHash not in retiredHashes:
commentHashesAndComments[commentHash] = comment
authorName = str(comment.author) # can be None if author was deleted. So check for that and skip if it's None.
stringio.write("<hr>\n")
stringio.write('<font color="blue"><b>')
stringio.write(authorName) # can be None if author was deleted. So check for that and skip if it's None.
stringio.write('</b></font><br>')
if ParticipantCollection().hasParticipantNamed(authorName):
stringio.write(' <small><font color="green">(member)</font></small>')
# if ParticipantCollection().participantNamed(authorName).isStillIn:
# stringio.write(' <small><font color="green">(in)</font></small>')
# else:
# stringio.write(' <small><font color="red">(out)</font></small>')
else:
stringio.write(' <small><font color="red">(not a member)</font></small>')
stringio.write('<form action="takeaction.html" method="post" target="invisibleiframe">')
stringio.write('<input type="submit" name="actiontotake" value="Signup" style="color:white;background-color:green">')
# stringio.write('<input type="submit" name="actiontotake" value="Signup and checkin">')
# stringio.write('<input type="submit" name="actiontotake" value="Relapse">')
# stringio.write('<input type="submit" name="actiontotake" value="Reinstate">')
stringio.write('<input type="submit" name="actiontotake" value="Skip comment">')
stringio.write('<input type="submit" name="actiontotake" value="Skip comment and don\'t upvote">')
stringio.write('<input type="hidden" name="username" value="' + b64encode(authorName) + '">')
stringio.write('<input type="hidden" name="commenthash" value="' + commentHash + '">')
# stringio.write('<input type="hidden" name="commentpermalink" value="' + comment.permalink + '">')
stringio.write('</form>')
stringio.write(bleach.clean(markdown.markdown(comment.body.encode('utf-8')), tags=['p']))
stringio.write("\n<br><br>\n\n")
stringio.write('</html>')
pageString = stringio.getvalue()
stringio.close()
return Response(pageString, mimetype='text/html')
@app.route('/takeaction.html', methods=["POST"])
def takeaction():
username = b64decode(request.form["username"])
commentHash = str(request.form["commenthash"])
# commentPermalink = request.form["commentpermalink"]
actionToTake = request.form["actiontotake"]
# print commentHashesAndComments
comment = commentHashesAndComments[commentHash]
# print "comment: " + str(comment)
if actionToTake == 'Signup':
print "signup - " + username
subprocess.call(['./signup.py', username])
comment.upvote()
retireCommentHash(commentHash)
# if actionToTake == 'Signup and checkin':
# print "signup and checkin - " + username
# subprocess.call(['./signup-and-checkin.sh', username])
# comment.upvote()
# retireCommentHash(commentHash)
# elif actionToTake == 'Relapse':
# print "relapse - " + username
# subprocess.call(['./relapse.py', username])
# comment.upvote()
# retireCommentHash(commentHash)
# elif actionToTake == 'Reinstate':
# print "reinstate - " + username
# subprocess.call(['./reinstate.py', username])
# comment.upvote()
# retireCommentHash(commentHash)
elif actionToTake == 'Skip comment':
print "Skip comment - " + username
comment.upvote()
retireCommentHash(commentHash)
elif actionToTake == "Skip comment and don't upvote":
print "Skip comment and don't upvote - " + username
retireCommentHash(commentHash)
return Response("hello", mimetype='text/html')
@app.route('/copydisplayduringsignuptoclipboard.html', methods=["POST"])
def copydisplayduringsignuptoclipboard():
print "TODO: Copy display to clipboard"
subprocess.call(['./display-during-signup.py'])
return Response("hello", mimetype='text/html')
if __name__ == '__main__':
app.run(host='127.0.0.1', port=flaskport)
| foobarbazblarg/stayclean | stayclean-2018-march/serve-signups-with-flask.py | Python | mit | 8,581 |
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from electrum_dgc.i18n import _
class ReceivingWidget(QTreeWidget):
def toggle_used(self):
if self.hide_used:
self.hide_used = False
self.setColumnHidden(2, False)
else:
self.hide_used = True
self.setColumnHidden(2, True)
self.update_list()
def edit_label(self, item, column):
if column == 1 and item.isSelected():
self.editing = True
item.setFlags(Qt.ItemIsEditable|Qt.ItemIsSelectable | Qt.ItemIsUserCheckable | Qt.ItemIsEnabled | Qt.ItemIsDragEnabled)
self.editItem(item, column)
item.setFlags(Qt.ItemIsSelectable | Qt.ItemIsUserCheckable | Qt.ItemIsEnabled | Qt.ItemIsDragEnabled)
self.editing = False
def update_label(self, item, column):
if self.editing:
return
else:
address = str(item.text(0))
label = unicode( item.text(1) )
self.owner.actuator.g.wallet.set_label(address, label)
def copy_address(self):
address = self.currentItem().text(0)
qApp.clipboard().setText(address)
def update_list(self):
return
self.clear()
addresses = self.owner.actuator.g.wallet.addresses(False)
for address in addresses:
history = self.owner.actuator.g.wallet.history.get(address,[])
used = "No"
# It appears that at this moment history can either be an array with tx and block height
# Or just a tx that's why this ugly code duplication is in, will fix
if len(history) == 1:
# This means pruned data. If that's the case the address has to been used at one point
if history[0] == "*":
used = "Yes"
else:
for tx_hash in history:
tx = self.owner.actuator.g.wallet.transactions.get(tx_hash)
if tx:
used = "Yes"
else:
for tx_hash, height in history:
tx = self.owner.actuator.g.wallet.transactions.get(tx_hash)
if tx:
used = "Yes"
if(self.hide_used == True and used == "No") or self.hide_used == False:
label = self.owner.actuator.g.wallet.labels.get(address,'')
item = QTreeWidgetItem([address, label, used])
self.insertTopLevelItem(0, item)
def __init__(self, owner=None):
self.owner = owner
self.editing = False
QTreeWidget.__init__(self, owner)
self.setColumnCount(3)
self.setHeaderLabels([_("Address"), _("Label"), _("Used")])
self.setIndentation(0)
self.hide_used = True
self.setColumnHidden(2, True)
| testalt/electrum-dgc | gui/qt/receiving_widget.py | Python | gpl-3.0 | 2,848 |
"""Contains the BulletinBoard class."""
__all__ = ['BulletinBoard']
from direct.directnotify import DirectNotifyGlobal
class BulletinBoard:
"""This class implements a global location for key/value pairs to be
stored. Intended to prevent coders from putting global variables directly
on showbase, so that potential name collisions can be more easily
detected."""
notify = DirectNotifyGlobal.directNotify.newCategory('BulletinBoard')
def __init__(self):
self._dict = {}
def get(self, postName, default=None):
return self._dict.get(postName, default)
def has(self, postName):
return postName in self._dict
def getEvent(self, postName):
return 'bboard-%s' % postName
def getRemoveEvent(self, postName):
return 'bboard-remove-%s' % postName
def post(self, postName, value=None):
if postName in self._dict:
BulletinBoard.notify.warning('changing %s from %s to %s' % (
postName, self._dict[postName], value))
self.update(postName, value)
def update(self, postName, value):
"""can use this to set value the first time"""
if postName in self._dict:
BulletinBoard.notify.info('update: posting %s' % (postName))
self._dict[postName] = value
messenger.send(self.getEvent(postName))
def remove(self, postName):
if postName in self._dict:
del self._dict[postName]
messenger.send(self.getRemoveEvent(postName))
def removeIfEqual(self, postName, value):
# only remove the post if its value is a particular value
if self.has(postName):
if self.get(postName) == value:
self.remove(postName)
def __repr__(self):
str = 'Bulletin Board Contents\n'
str += '======================='
keys = list(self._dict.keys())
keys.sort()
for postName in keys:
str += '\n%s: %s' % (postName, self._dict[postName])
return str
| chandler14362/panda3d | direct/src/showbase/BulletinBoard.py | Python | bsd-3-clause | 2,027 |
import os
import sys
import time
import smtplib
import traceback
import shutil
from xcsoar.mapgen.server.job import Job
from xcsoar.mapgen.generator import Generator
from xcsoar.mapgen.util import check_commands
class Worker:
def __init__(self, dir_jobs, dir_data, mail_server):
check_commands()
self.__dir_jobs = os.path.abspath(dir_jobs)
self.__dir_data = os.path.abspath(dir_data)
self.__mail_server = mail_server
self.__run = False
def __send_download_mail(self, job):
try:
print('Sending download mail to {} ...'.format(job.description.mail))
msg = '''From: no-reply@xcsoar.org"
To: {to}
Subject: XCSoar Map Generator - Download ready ({name}.xcm)
The XCSoar Map Generator has finished your map.
It can be downloaded at {url}
This link is valid for 7 days.
'''.format(to=job.description.mail, name=job.description.name, url=job.description.download_url)
s = smtplib.SMTP(self.__mail_server)
try:
s.sendmail('no-reply@xcsoar.org', job.description.mail, msg)
finally:
s.quit()
except Exception as e:
print('Failed to send mail: {}'.format(e))
def __do_job(self, job):
try:
print('Generating map file for job uuid={}, name={}, mail={}'.format(job.uuid, job.description.name, job.description.mail))
description = job.description
if not description.waypoint_file and not description.bounds:
print('No waypoint file or bounds set. Aborting.')
job.delete()
return
generator = Generator(self.__dir_data, job.file_path('tmp'))
generator.set_bounds(description.bounds)
generator.add_information_file(job.description.name, job.description.mail)
if description.use_topology:
job.update_status('Creating topology files...')
generator.add_topology(compressed = description.compressed, level_of_detail = description.level_of_detail)
if description.use_terrain:
job.update_status('Creating terrain files...')
generator.add_terrain(description.resolution)
if description.welt2000:
job.update_status('Adding welt2000 waypoints...')
generator.add_welt2000()
elif description.waypoint_file:
job.update_status('Adding waypoint file...')
generator.add_waypoint_file(job.file_path(description.waypoint_file))
if description.waypoint_details_file:
job.update_status('Adding waypoint details file...')
generator.add_waypoint_details_file(job.file_path(description.waypoint_details_file))
if description.airspace_file:
job.update_status('Adding airspace file...')
generator.add_airspace_file(job.file_path(description.airspace_file))
job.update_status('Creating map file...')
try:
generator.create(job.map_file())
finally:
generator.cleanup()
shutil.rmtree(job.file_path('tmp'))
job.done()
except Exception as e:
print('Error: {}'.format(e))
traceback.print_exc(file=sys.stdout)
job.error()
return
print('Map {} is ready for use.'.format(job.map_file()))
if job.description.mail != '':
self.__send_download_mail(job)
def run(self):
self.__run = True
print('Monitoring {} for new jobs...'.format(self.__dir_jobs))
while self.__run:
try:
job = Job.get_next(self.__dir_jobs)
if not job:
time.sleep(0.5)
continue
self.__do_job(job)
except Exception as e:
print('Error: {}'.format(e))
traceback.print_exc(file=sys.stdout)
| fberst/mapgen | lib/xcsoar/mapgen/server/worker.py | Python | gpl-2.0 | 4,027 |
from flask import current_app as app, render_template, request, redirect, jsonify, url_for, Blueprint
from CTFd.utils import admins_only, is_admin, unix_time, get_config, \
set_config, sendmail, rmdir, create_image, delete_image, run_image, container_status, container_ports, \
container_stop, container_start, get_themes, cache, upload_file
from CTFd.models import db, Teams, Solves, Awards, Containers, Challenges, WrongKeys, Keys, Tags, Files, Tracking, Pages, Config, DatabaseError
from CTFd.scoreboard import get_standings
admin_scoreboard = Blueprint('admin_scoreboard', __name__)
@admin_scoreboard.route('/admin/scoreboard')
@admins_only
def admin_scoreboard_view():
standings = get_standings(admin=True)
return render_template('admin/scoreboard.html', teams=standings)
@admin_scoreboard.route('/admin/scores')
@admins_only
def admin_scores():
score = db.func.sum(Challenges.value).label('score')
quickest = db.func.max(Solves.date).label('quickest')
teams = db.session.query(Solves.teamid, Teams.name, score).join(Teams).join(Challenges).filter(Teams.banned == False).group_by(Solves.teamid).order_by(score.desc(), quickest)
db.session.close()
json_data = {'teams': []}
for i, x in enumerate(teams):
json_data['teams'].append({'place': i + 1, 'id': x.teamid, 'name': x.name, 'score': int(x.score)})
return jsonify(json_data) | gibsonnathan/CTFd | CTFd/admin/scoreboard.py | Python | apache-2.0 | 1,388 |
"""
Tests for Discussion API serializers
"""
import itertools
from urlparse import urlparse
import ddt
import httpretty
import mock
from django.test.client import RequestFactory
from discussion_api.serializers import CommentSerializer, ThreadSerializer, get_context
from discussion_api.tests.utils import (
CommentsServiceMockMixin,
make_minimal_cs_thread,
make_minimal_cs_comment,
)
from django_comment_common.models import (
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_STUDENT,
Role,
)
from lms.lib.comment_client.comment import Comment
from lms.lib.comment_client.thread import Thread
from student.tests.factories import UserFactory
from util.testing import UrlResetMixin
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from openedx.core.djangoapps.course_groups.tests.helpers import CohortFactory
@ddt.ddt
class SerializerTestMixin(CommentsServiceMockMixin, UrlResetMixin):
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(SerializerTestMixin, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.disable)
self.maxDiff = None # pylint: disable=invalid-name
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/dummy")
self.request.user = self.user
self.course = CourseFactory.create()
self.author = UserFactory.create()
def create_role(self, role_name, users, course=None):
"""Create a Role in self.course with the given name and users"""
course = course or self.course
role = Role.objects.create(name=role_name, course_id=course.id)
role.users = users
@ddt.data(
(FORUM_ROLE_ADMINISTRATOR, True, False, True),
(FORUM_ROLE_ADMINISTRATOR, False, True, False),
(FORUM_ROLE_MODERATOR, True, False, True),
(FORUM_ROLE_MODERATOR, False, True, False),
(FORUM_ROLE_COMMUNITY_TA, True, False, True),
(FORUM_ROLE_COMMUNITY_TA, False, True, False),
(FORUM_ROLE_STUDENT, True, False, True),
(FORUM_ROLE_STUDENT, False, True, True),
)
@ddt.unpack
def test_anonymity(self, role_name, anonymous, anonymous_to_peers, expected_serialized_anonymous):
"""
Test that content is properly made anonymous.
Content should be anonymous iff the anonymous field is true or the
anonymous_to_peers field is true and the requester does not have a
privileged role.
role_name is the name of the requester's role.
anonymous is the value of the anonymous field in the content.
anonymous_to_peers is the value of the anonymous_to_peers field in the
content.
expected_serialized_anonymous is whether the content should actually be
anonymous in the API output when requested by a user with the given
role.
"""
self.create_role(role_name, [self.user])
serialized = self.serialize(
self.make_cs_content({"anonymous": anonymous, "anonymous_to_peers": anonymous_to_peers})
)
actual_serialized_anonymous = serialized["author"] is None
self.assertEqual(actual_serialized_anonymous, expected_serialized_anonymous)
@ddt.data(
(FORUM_ROLE_ADMINISTRATOR, False, "staff"),
(FORUM_ROLE_ADMINISTRATOR, True, None),
(FORUM_ROLE_MODERATOR, False, "staff"),
(FORUM_ROLE_MODERATOR, True, None),
(FORUM_ROLE_COMMUNITY_TA, False, "community_ta"),
(FORUM_ROLE_COMMUNITY_TA, True, None),
(FORUM_ROLE_STUDENT, False, None),
(FORUM_ROLE_STUDENT, True, None),
)
@ddt.unpack
def test_author_labels(self, role_name, anonymous, expected_label):
"""
Test correctness of the author_label field.
The label should be "staff", "staff", or "community_ta" for the
Administrator, Moderator, and Community TA roles, respectively, but
the label should not be present if the content is anonymous.
role_name is the name of the author's role.
anonymous is the value of the anonymous field in the content.
expected_label is the expected value of the author_label field in the
API output.
"""
self.create_role(role_name, [self.author])
serialized = self.serialize(self.make_cs_content({"anonymous": anonymous}))
self.assertEqual(serialized["author_label"], expected_label)
def test_abuse_flagged(self):
serialized = self.serialize(self.make_cs_content({"abuse_flaggers": [str(self.user.id)]}))
self.assertEqual(serialized["abuse_flagged"], True)
def test_voted(self):
thread_id = "test_thread"
self.register_get_user_response(self.user, upvoted_ids=[thread_id])
serialized = self.serialize(self.make_cs_content({"id": thread_id}))
self.assertEqual(serialized["voted"], True)
@ddt.ddt
class ThreadSerializerSerializationTest(SerializerTestMixin, ModuleStoreTestCase):
"""Tests for ThreadSerializer serialization."""
def make_cs_content(self, overrides):
"""
Create a thread with the given overrides, plus some useful test data.
"""
merged_overrides = {
"course_id": unicode(self.course.id),
"user_id": str(self.author.id),
"username": self.author.username,
}
merged_overrides.update(overrides)
return make_minimal_cs_thread(merged_overrides)
def serialize(self, thread):
"""
Create a serializer with an appropriate context and use it to serialize
the given thread, returning the result.
"""
return ThreadSerializer(thread, context=get_context(self.course, self.request)).data
def test_basic(self):
thread = {
"id": "test_thread",
"course_id": unicode(self.course.id),
"commentable_id": "test_topic",
"group_id": None,
"user_id": str(self.author.id),
"username": self.author.username,
"anonymous": False,
"anonymous_to_peers": False,
"created_at": "2015-04-28T00:00:00Z",
"updated_at": "2015-04-28T11:11:11Z",
"thread_type": "discussion",
"title": "Test Title",
"body": "Test body",
"pinned": True,
"closed": False,
"abuse_flaggers": [],
"votes": {"up_count": 4},
"comments_count": 5,
"unread_comments_count": 3,
}
expected = {
"id": "test_thread",
"course_id": unicode(self.course.id),
"topic_id": "test_topic",
"group_id": None,
"group_name": None,
"author": self.author.username,
"author_label": None,
"created_at": "2015-04-28T00:00:00Z",
"updated_at": "2015-04-28T11:11:11Z",
"type": "discussion",
"title": "Test Title",
"raw_body": "Test body",
"pinned": True,
"closed": False,
"following": False,
"abuse_flagged": False,
"voted": False,
"vote_count": 4,
"comment_count": 5,
"unread_comment_count": 3,
"comment_list_url": "http://testserver/api/discussion/v1/comments/?thread_id=test_thread",
"endorsed_comment_list_url": None,
"non_endorsed_comment_list_url": None,
}
self.assertEqual(self.serialize(thread), expected)
thread["thread_type"] = "question"
expected.update({
"type": "question",
"comment_list_url": None,
"endorsed_comment_list_url": (
"http://testserver/api/discussion/v1/comments/?thread_id=test_thread&endorsed=True"
),
"non_endorsed_comment_list_url": (
"http://testserver/api/discussion/v1/comments/?thread_id=test_thread&endorsed=False"
),
})
self.assertEqual(self.serialize(thread), expected)
def test_group(self):
cohort = CohortFactory.create(course_id=self.course.id)
serialized = self.serialize(self.make_cs_content({"group_id": cohort.id}))
self.assertEqual(serialized["group_id"], cohort.id)
self.assertEqual(serialized["group_name"], cohort.name)
def test_following(self):
thread_id = "test_thread"
self.register_get_user_response(self.user, subscribed_thread_ids=[thread_id])
serialized = self.serialize(self.make_cs_content({"id": thread_id}))
self.assertEqual(serialized["following"], True)
@ddt.ddt
class CommentSerializerTest(SerializerTestMixin, ModuleStoreTestCase):
"""Tests for CommentSerializer."""
def setUp(self):
super(CommentSerializerTest, self).setUp()
self.endorser = UserFactory.create()
self.endorsed_at = "2015-05-18T12:34:56Z"
def make_cs_content(self, overrides=None, with_endorsement=False):
"""
Create a comment with the given overrides, plus some useful test data.
"""
merged_overrides = {
"user_id": str(self.author.id),
"username": self.author.username
}
if with_endorsement:
merged_overrides["endorsement"] = {
"user_id": str(self.endorser.id),
"time": self.endorsed_at
}
merged_overrides.update(overrides or {})
return make_minimal_cs_comment(merged_overrides)
def serialize(self, comment, thread_data=None):
"""
Create a serializer with an appropriate context and use it to serialize
the given comment, returning the result.
"""
context = get_context(self.course, self.request, make_minimal_cs_thread(thread_data))
return CommentSerializer(comment, context=context).data
def test_basic(self):
comment = {
"id": "test_comment",
"thread_id": "test_thread",
"user_id": str(self.author.id),
"username": self.author.username,
"anonymous": False,
"anonymous_to_peers": False,
"created_at": "2015-04-28T00:00:00Z",
"updated_at": "2015-04-28T11:11:11Z",
"body": "Test body",
"endorsed": False,
"abuse_flaggers": [],
"votes": {"up_count": 4},
"children": [],
}
expected = {
"id": "test_comment",
"thread_id": "test_thread",
"parent_id": None,
"author": self.author.username,
"author_label": None,
"created_at": "2015-04-28T00:00:00Z",
"updated_at": "2015-04-28T11:11:11Z",
"raw_body": "Test body",
"endorsed": False,
"endorsed_by": None,
"endorsed_by_label": None,
"endorsed_at": None,
"abuse_flagged": False,
"voted": False,
"vote_count": 4,
"children": [],
}
self.assertEqual(self.serialize(comment), expected)
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False]
)
)
@ddt.unpack
def test_endorsed_by(self, endorser_role_name, thread_anonymous):
"""
Test correctness of the endorsed_by field.
The endorser should be anonymous iff the thread is anonymous to the
requester, and the endorser is not a privileged user.
endorser_role_name is the name of the endorser's role.
thread_anonymous is the value of the anonymous field in the thread.
"""
self.create_role(endorser_role_name, [self.endorser])
serialized = self.serialize(
self.make_cs_content(with_endorsement=True),
thread_data={"anonymous": thread_anonymous}
)
actual_endorser_anonymous = serialized["endorsed_by"] is None
expected_endorser_anonymous = endorser_role_name == FORUM_ROLE_STUDENT and thread_anonymous
self.assertEqual(actual_endorser_anonymous, expected_endorser_anonymous)
@ddt.data(
(FORUM_ROLE_ADMINISTRATOR, "staff"),
(FORUM_ROLE_MODERATOR, "staff"),
(FORUM_ROLE_COMMUNITY_TA, "community_ta"),
(FORUM_ROLE_STUDENT, None),
)
@ddt.unpack
def test_endorsed_by_labels(self, role_name, expected_label):
"""
Test correctness of the endorsed_by_label field.
The label should be "staff", "staff", or "community_ta" for the
Administrator, Moderator, and Community TA roles, respectively.
role_name is the name of the author's role.
expected_label is the expected value of the author_label field in the
API output.
"""
self.create_role(role_name, [self.endorser])
serialized = self.serialize(self.make_cs_content(with_endorsement=True))
self.assertEqual(serialized["endorsed_by_label"], expected_label)
def test_endorsed_at(self):
serialized = self.serialize(self.make_cs_content(with_endorsement=True))
self.assertEqual(serialized["endorsed_at"], self.endorsed_at)
def test_children(self):
comment = self.make_cs_content({
"id": "test_root",
"children": [
self.make_cs_content({
"id": "test_child_1",
"parent_id": "test_root",
}),
self.make_cs_content({
"id": "test_child_2",
"parent_id": "test_root",
"children": [
self.make_cs_content({
"id": "test_grandchild",
"parent_id": "test_child_2"
})
],
}),
],
})
serialized = self.serialize(comment)
self.assertEqual(serialized["children"][0]["id"], "test_child_1")
self.assertEqual(serialized["children"][0]["parent_id"], "test_root")
self.assertEqual(serialized["children"][1]["id"], "test_child_2")
self.assertEqual(serialized["children"][1]["parent_id"], "test_root")
self.assertEqual(serialized["children"][1]["children"][0]["id"], "test_grandchild")
self.assertEqual(serialized["children"][1]["children"][0]["parent_id"], "test_child_2")
@ddt.ddt
class ThreadSerializerDeserializationTest(CommentsServiceMockMixin, UrlResetMixin, ModuleStoreTestCase):
"""Tests for ThreadSerializer deserialization."""
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(ThreadSerializerDeserializationTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.disable)
self.course = CourseFactory.create()
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/dummy")
self.request.user = self.user
self.minimal_data = {
"course_id": unicode(self.course.id),
"topic_id": "test_topic",
"type": "discussion",
"title": "Test Title",
"raw_body": "Test body",
}
self.existing_thread = Thread(**make_minimal_cs_thread({
"id": "existing_thread",
"course_id": unicode(self.course.id),
"commentable_id": "original_topic",
"thread_type": "discussion",
"title": "Original Title",
"body": "Original body",
"user_id": str(self.user.id),
}))
def save_and_reserialize(self, data, instance=None):
"""
Create a serializer with the given data and (if updating) instance,
ensure that it is valid, save the result, and return the full thread
data from the serializer.
"""
serializer = ThreadSerializer(
instance,
data=data,
partial=(instance is not None),
context=get_context(self.course, self.request)
)
self.assertTrue(serializer.is_valid())
serializer.save()
return serializer.data
def test_create_minimal(self):
self.register_post_thread_response({"id": "test_id"})
saved = self.save_and_reserialize(self.minimal_data)
self.assertEqual(
urlparse(httpretty.last_request().path).path,
"/api/v1/test_topic/threads"
)
self.assertEqual(
httpretty.last_request().parsed_body,
{
"course_id": [unicode(self.course.id)],
"commentable_id": ["test_topic"],
"thread_type": ["discussion"],
"title": ["Test Title"],
"body": ["Test body"],
"user_id": [str(self.user.id)],
}
)
self.assertEqual(saved["id"], "test_id")
def test_create_missing_field(self):
for field in self.minimal_data:
data = self.minimal_data.copy()
data.pop(field)
serializer = ThreadSerializer(data=data)
self.assertFalse(serializer.is_valid())
self.assertEqual(
serializer.errors,
{field: ["This field is required."]}
)
def test_create_type(self):
self.register_post_thread_response({"id": "test_id"})
data = self.minimal_data.copy()
data["type"] = "question"
self.save_and_reserialize(data)
data["type"] = "invalid_type"
serializer = ThreadSerializer(data=data)
self.assertFalse(serializer.is_valid())
def test_update_empty(self):
self.register_put_thread_response(self.existing_thread.attributes)
self.save_and_reserialize({}, self.existing_thread)
self.assertEqual(
httpretty.last_request().parsed_body,
{
"course_id": [unicode(self.course.id)],
"commentable_id": ["original_topic"],
"thread_type": ["discussion"],
"title": ["Original Title"],
"body": ["Original body"],
"anonymous": ["False"],
"anonymous_to_peers": ["False"],
"closed": ["False"],
"pinned": ["False"],
"user_id": [str(self.user.id)],
}
)
def test_update_all(self):
self.register_put_thread_response(self.existing_thread.attributes)
data = {
"topic_id": "edited_topic",
"type": "question",
"title": "Edited Title",
"raw_body": "Edited body",
}
saved = self.save_and_reserialize(data, self.existing_thread)
self.assertEqual(
httpretty.last_request().parsed_body,
{
"course_id": [unicode(self.course.id)],
"commentable_id": ["edited_topic"],
"thread_type": ["question"],
"title": ["Edited Title"],
"body": ["Edited body"],
"anonymous": ["False"],
"anonymous_to_peers": ["False"],
"closed": ["False"],
"pinned": ["False"],
"user_id": [str(self.user.id)],
}
)
for key in data:
self.assertEqual(saved[key], data[key])
def test_update_empty_string(self):
serializer = ThreadSerializer(
self.existing_thread,
data={field: "" for field in ["topic_id", "title", "raw_body"]},
partial=True,
context=get_context(self.course, self.request)
)
self.assertEqual(
serializer.errors,
{field: ["This field is required."] for field in ["topic_id", "title", "raw_body"]}
)
def test_update_course_id(self):
serializer = ThreadSerializer(
self.existing_thread,
data={"course_id": "some/other/course"},
partial=True,
context=get_context(self.course, self.request)
)
self.assertEqual(
serializer.errors,
{"course_id": ["This field is not allowed in an update."]}
)
@ddt.ddt
class CommentSerializerDeserializationTest(CommentsServiceMockMixin, ModuleStoreTestCase):
"""Tests for ThreadSerializer deserialization."""
def setUp(self):
super(CommentSerializerDeserializationTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.disable)
self.course = CourseFactory.create()
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/dummy")
self.request.user = self.user
self.minimal_data = {
"thread_id": "test_thread",
"raw_body": "Test body",
}
self.existing_comment = Comment(**make_minimal_cs_comment({
"id": "existing_comment",
"thread_id": "existing_thread",
"body": "Original body",
"user_id": str(self.user.id),
"course_id": unicode(self.course.id),
}))
def save_and_reserialize(self, data, instance=None):
"""
Create a serializer with the given data, ensure that it is valid, save
the result, and return the full comment data from the serializer.
"""
context = get_context(
self.course,
self.request,
make_minimal_cs_thread({"course_id": unicode(self.course.id)})
)
serializer = CommentSerializer(
instance,
data=data,
partial=(instance is not None),
context=context
)
self.assertTrue(serializer.is_valid())
serializer.save()
return serializer.data
@ddt.data(None, "test_parent")
def test_create_success(self, parent_id):
data = self.minimal_data.copy()
if parent_id:
data["parent_id"] = parent_id
self.register_get_comment_response({"thread_id": "test_thread", "id": parent_id})
self.register_post_comment_response(
{"id": "test_comment"},
thread_id="test_thread",
parent_id=parent_id
)
saved = self.save_and_reserialize(data)
expected_url = (
"/api/v1/comments/{}".format(parent_id) if parent_id else
"/api/v1/threads/test_thread/comments"
)
self.assertEqual(urlparse(httpretty.last_request().path).path, expected_url)
self.assertEqual(
httpretty.last_request().parsed_body,
{
"course_id": [unicode(self.course.id)],
"body": ["Test body"],
"user_id": [str(self.user.id)],
}
)
self.assertEqual(saved["id"], "test_comment")
self.assertEqual(saved["parent_id"], parent_id)
def test_create_parent_id_nonexistent(self):
self.register_get_comment_error_response("bad_parent", 404)
data = self.minimal_data.copy()
data["parent_id"] = "bad_parent"
context = get_context(self.course, self.request, make_minimal_cs_thread())
serializer = CommentSerializer(data=data, context=context)
self.assertFalse(serializer.is_valid())
self.assertEqual(
serializer.errors,
{
"non_field_errors": [
"parent_id does not identify a comment in the thread identified by thread_id."
]
}
)
def test_create_parent_id_wrong_thread(self):
self.register_get_comment_response({"thread_id": "different_thread", "id": "test_parent"})
data = self.minimal_data.copy()
data["parent_id"] = "test_parent"
context = get_context(self.course, self.request, make_minimal_cs_thread())
serializer = CommentSerializer(data=data, context=context)
self.assertFalse(serializer.is_valid())
self.assertEqual(
serializer.errors,
{
"non_field_errors": [
"parent_id does not identify a comment in the thread identified by thread_id."
]
}
)
def test_create_missing_field(self):
for field in self.minimal_data:
data = self.minimal_data.copy()
data.pop(field)
serializer = CommentSerializer(
data=data,
context=get_context(self.course, self.request, make_minimal_cs_thread())
)
self.assertFalse(serializer.is_valid())
self.assertEqual(
serializer.errors,
{field: ["This field is required."]}
)
def test_update_empty(self):
self.register_put_comment_response(self.existing_comment.attributes)
self.save_and_reserialize({}, instance=self.existing_comment)
self.assertEqual(
httpretty.last_request().parsed_body,
{
"body": ["Original body"],
"course_id": [unicode(self.course.id)],
"user_id": [str(self.user.id)],
"anonymous": ["False"],
"anonymous_to_peers": ["False"],
"endorsed": ["False"],
}
)
def test_update_all(self):
self.register_put_comment_response(self.existing_comment.attributes)
data = {"raw_body": "Edited body"}
saved = self.save_and_reserialize(data, instance=self.existing_comment)
self.assertEqual(
httpretty.last_request().parsed_body,
{
"body": ["Edited body"],
"course_id": [unicode(self.course.id)],
"user_id": [str(self.user.id)],
"anonymous": ["False"],
"anonymous_to_peers": ["False"],
"endorsed": ["False"],
}
)
self.assertEqual(saved["raw_body"], data["raw_body"])
def test_update_empty_raw_body(self):
serializer = CommentSerializer(
self.existing_comment,
data={"raw_body": ""},
partial=True,
context=get_context(self.course, self.request)
)
self.assertEqual(
serializer.errors,
{"raw_body": ["This field is required."]}
)
@ddt.data("thread_id", "parent_id")
def test_update_non_updatable(self, field):
serializer = CommentSerializer(
self.existing_comment,
data={field: "different_value"},
partial=True,
context=get_context(self.course, self.request)
)
self.assertEqual(
serializer.errors,
{field: ["This field is not allowed in an update."]}
)
| rhndg/openedx | lms/djangoapps/discussion_api/tests/test_serializers.py | Python | agpl-3.0 | 27,357 |
from django.contrib.sites.models import Site
from django.db import models
class Article(models.Model):
sites = models.ManyToManyField(Site)
headline = models.CharField(max_length=100)
publications = models.ManyToManyField("model_package.Publication", null=True, blank=True,)
| denisenkom/django | tests/model_package/models/article.py | Python | bsd-3-clause | 289 |
###############################################################################
##
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
# Finds all .py files recursively in current directory (.)
# and updates 2007 year with 2008 in the file header.
import re
import os
new_copyright = ["## Copyright (C) 2011-2014, NYU-Poly.\n"]
re_copyright = re.compile(r"\s+## Copyright \(C\) 2011-2014, NYU-Poly\.\s+")
line_copyright = re.compile(r"## Copyright \(C\) 2011-2014, NYU-Poly\.")
IGNORE_LIST = ["update_copyright_year.py"]
files = []
for (path, dnames, fnames) in os.walk('.'):
for fn in fnames:
if fn not in IGNORE_LIST and fn.endswith(".py"):
files.append(os.path.join(path, fn))
print len(files), " files found"
count = 0
for fname in files:
fin = open(fname)
lines = fin.readlines()
fin.seek(0)
all_lines = fin.read()
fin.close()
if re_copyright.search(all_lines) > 0:
#Search through the first lines because sometimes it's not exactly in the second line:
for i in [2,3,4,5]:
if line_copyright.search(lines[i]) > 0:
print "Updating: %s"%fname
newlines = lines[:i]
newlines.extend(new_copyright)
cropped = lines[i+1:] #Replace by i+1 when it is to update just the year.
newlines.extend(cropped)
fout = file(fname, 'w')
fout.writelines(newlines)
fout.close()
count += 1
break
print count, " files updated"
| Nikea/VisTrails | scripts/update_copyright_year.py | Python | bsd-3-clause | 3,291 |
#
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Union
import numpy as np
from rl_coach.agents.value_optimization_agent import ValueOptimizationAgent
from rl_coach.architectures.embedder_parameters import InputEmbedderParameters
from rl_coach.architectures.head_parameters import QHeadParameters
from rl_coach.architectures.middleware_parameters import FCMiddlewareParameters
from rl_coach.base_parameters import AlgorithmParameters, NetworkParameters, AgentParameters, \
MiddlewareScheme
from rl_coach.core_types import EnvironmentSteps
from rl_coach.exploration_policies.e_greedy import EGreedyParameters
from rl_coach.memories.non_episodic.experience_replay import ExperienceReplayParameters
from rl_coach.schedules import LinearSchedule
class DQNAlgorithmParameters(AlgorithmParameters):
def __init__(self):
super().__init__()
self.num_steps_between_copying_online_weights_to_target = EnvironmentSteps(10000)
self.num_consecutive_playing_steps = EnvironmentSteps(4)
self.discount = 0.99
self.supports_parameter_noise = True
class DQNNetworkParameters(NetworkParameters):
def __init__(self):
super().__init__()
self.input_embedders_parameters = {'observation': InputEmbedderParameters()}
self.middleware_parameters = FCMiddlewareParameters(scheme=MiddlewareScheme.Medium)
self.heads_parameters = [QHeadParameters()]
self.optimizer_type = 'Adam'
self.batch_size = 32
self.replace_mse_with_huber_loss = True
self.create_target_network = True
self.should_get_softmax_probabilities = False
class DQNAgentParameters(AgentParameters):
def __init__(self):
super().__init__(algorithm=DQNAlgorithmParameters(),
exploration=EGreedyParameters(),
memory=ExperienceReplayParameters(),
networks={"main": DQNNetworkParameters()})
self.exploration.epsilon_schedule = LinearSchedule(1, 0.1, 1000000)
self.exploration.evaluation_epsilon = 0.05
@property
def path(self):
return 'rl_coach.agents.dqn_agent:DQNAgent'
# Deep Q Network - https://www.cs.toronto.edu/~vmnih/docs/dqn.pdf
class DQNAgent(ValueOptimizationAgent):
def __init__(self, agent_parameters, parent: Union['LevelManager', 'CompositeAgent']=None):
super().__init__(agent_parameters, parent)
@property
def is_on_policy(self) -> bool:
return False
def select_actions(self, next_states, q_st_plus_1):
return np.argmax(q_st_plus_1, 1)
def learn_from_batch(self, batch):
network_keys = self.ap.network_wrappers['main'].input_embedders_parameters.keys()
# for the action we actually took, the error is:
# TD error = r + discount*max(q_st_plus_1) - q_st
# # for all other actions, the error is 0
q_st_plus_1, TD_targets = self.networks['main'].parallel_prediction([
(self.networks['main'].target_network, batch.next_states(network_keys)),
(self.networks['main'].online_network, batch.states(network_keys))
])
selected_actions = self.select_actions(batch.next_states(network_keys), q_st_plus_1)
# add Q value samples for logging
self.q_values.add_sample(TD_targets)
# only update the action that we have actually done in this transition
TD_errors = []
for i in range(batch.size):
new_target = batch.rewards()[i] +\
(1.0 - batch.game_overs()[i]) * self.ap.algorithm.discount * q_st_plus_1[i][selected_actions[i]]
TD_errors.append(np.abs(new_target - TD_targets[i, batch.actions()[i]]))
TD_targets[i, batch.actions()[i]] = new_target
# update errors in prioritized replay buffer
importance_weights = self.update_transition_priorities_and_get_weights(TD_errors, batch)
result = self.networks['main'].train_and_sync_networks(batch.states(network_keys), TD_targets,
importance_weights=importance_weights)
total_loss, losses, unclipped_grads = result[:3]
return total_loss, losses, unclipped_grads
| NervanaSystems/coach | rl_coach/agents/dqn_agent.py | Python | apache-2.0 | 4,788 |
# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsProject.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Alessandro Pasotti'
__date__ = '04/12/2015'
__copyright__ = 'Copyright 2015, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from qgis.core import QgsProject, QgsVectorLayer, QgsMapLayer
from qgis.testing import start_app, unittest
from qgis.PyQt.QtCore import QT_VERSION_STR
import sip
try:
from qgis.PyQt.QtTest import QSignalSpy
use_signal_spy = True
except:
use_signal_spy = False
start_app()
def createLayer(name):
return QgsVectorLayer("Point?field=x:string", name, "memory")
class TestQgsProjectMapLayers(unittest.TestCase):
def setUp(self):
pass
def testInstance(self):
""" test retrieving global instance """
self.assertTrue(QgsProject.instance())
# register a layer to the singleton
QgsProject.instance().addMapLayer(createLayer('test'))
# check that the same instance is returned
self.assertEqual(len(QgsProject.instance().mapLayersByName('test')), 1)
QgsProject.instance().removeAllMapLayers()
def test_addMapLayer(self):
""" test adding individual map layers to registry """
QgsProject.instance().removeAllMapLayers()
l1 = createLayer('test')
self.assertEqual(QgsProject.instance().addMapLayer(l1), l1)
self.assertEqual(len(QgsProject.instance().mapLayersByName('test')), 1)
self.assertEqual(QgsProject.instance().count(), 1)
# adding a second layer should leave existing layers intact
l2 = createLayer('test2')
self.assertEqual(QgsProject.instance().addMapLayer(l2), l2)
self.assertEqual(len(QgsProject.instance().mapLayersByName('test')), 1)
self.assertEqual(len(QgsProject.instance().mapLayersByName('test2')), 1)
self.assertEqual(QgsProject.instance().count(), 2)
QgsProject.instance().removeAllMapLayers()
def test_addMapLayerAlreadyAdded(self):
""" test that already added layers can't be readded to registry """
QgsProject.instance().removeAllMapLayers()
l1 = createLayer('test')
QgsProject.instance().addMapLayer(l1)
self.assertEqual(len(QgsProject.instance().mapLayersByName('test')), 1)
self.assertEqual(QgsProject.instance().count(), 1)
self.assertEqual(QgsProject.instance().addMapLayer(l1), None)
self.assertEqual(len(QgsProject.instance().mapLayersByName('test')), 1)
self.assertEqual(QgsProject.instance().count(), 1)
QgsProject.instance().removeAllMapLayers()
def test_addMapLayerInvalid(self):
""" test that invalid map layersd can't be added to registry """
QgsProject.instance().removeAllMapLayers()
self.assertEqual(QgsProject.instance().addMapLayer(QgsVectorLayer("Point?field=x:string", 'test', "xxx")), None)
self.assertEqual(len(QgsProject.instance().mapLayersByName('test')), 0)
self.assertEqual(QgsProject.instance().count(), 0)
QgsProject.instance().removeAllMapLayers()
@unittest.skipIf(not use_signal_spy, "No QSignalSpy available")
def test_addMapLayerSignals(self):
""" test that signals are correctly emitted when adding map layer"""
QgsProject.instance().removeAllMapLayers()
layer_was_added_spy = QSignalSpy(QgsProject.instance().layerWasAdded)
layers_added_spy = QSignalSpy(QgsProject.instance().layersAdded)
legend_layers_added_spy = QSignalSpy(QgsProject.instance().legendLayersAdded)
l1 = createLayer('test')
QgsProject.instance().addMapLayer(l1)
# can't seem to actually test the data which was emitted, so best we can do is test
# the signal count
self.assertEqual(len(layer_was_added_spy), 1)
self.assertEqual(len(layers_added_spy), 1)
self.assertEqual(len(legend_layers_added_spy), 1)
# layer not added to legend
QgsProject.instance().addMapLayer(createLayer('test2'), False)
self.assertEqual(len(layer_was_added_spy), 2)
self.assertEqual(len(layers_added_spy), 2)
self.assertEqual(len(legend_layers_added_spy), 1)
# try readding a layer already in the registry
QgsProject.instance().addMapLayer(l1)
# should be no extra signals emitted
self.assertEqual(len(layer_was_added_spy), 2)
self.assertEqual(len(layers_added_spy), 2)
self.assertEqual(len(legend_layers_added_spy), 1)
def test_addMapLayers(self):
""" test adding multiple map layers to registry """
QgsProject.instance().removeAllMapLayers()
l1 = createLayer('test')
l2 = createLayer('test2')
self.assertEqual(set(QgsProject.instance().addMapLayers([l1, l2])), set([l1, l2]))
self.assertEqual(len(QgsProject.instance().mapLayersByName('test')), 1)
self.assertEqual(len(QgsProject.instance().mapLayersByName('test2')), 1)
self.assertEqual(QgsProject.instance().count(), 2)
# adding more layers should leave existing layers intact
l3 = createLayer('test3')
l4 = createLayer('test4')
self.assertEqual(set(QgsProject.instance().addMapLayers([l3, l4])), set([l3, l4]))
self.assertEqual(len(QgsProject.instance().mapLayersByName('test')), 1)
self.assertEqual(len(QgsProject.instance().mapLayersByName('test2')), 1)
self.assertEqual(len(QgsProject.instance().mapLayersByName('test3')), 1)
self.assertEqual(len(QgsProject.instance().mapLayersByName('test4')), 1)
self.assertEqual(QgsProject.instance().count(), 4)
QgsProject.instance().removeAllMapLayers()
def test_addMapLayersInvalid(self):
""" test that invalid map layersd can't be added to registry """
QgsProject.instance().removeAllMapLayers()
self.assertEqual(QgsProject.instance().addMapLayers([QgsVectorLayer("Point?field=x:string", 'test', "xxx")]), [])
self.assertEqual(len(QgsProject.instance().mapLayersByName('test')), 0)
self.assertEqual(QgsProject.instance().count(), 0)
QgsProject.instance().removeAllMapLayers()
def test_addMapLayersAlreadyAdded(self):
""" test that already added layers can't be readded to registry """
QgsProject.instance().removeAllMapLayers()
l1 = createLayer('test')
self.assertEqual(QgsProject.instance().addMapLayers([l1]), [l1])
self.assertEqual(len(QgsProject.instance().mapLayersByName('test')), 1)
self.assertEqual(QgsProject.instance().count(), 1)
self.assertEqual(QgsProject.instance().addMapLayers([l1]), [])
self.assertEqual(len(QgsProject.instance().mapLayersByName('test')), 1)
self.assertEqual(QgsProject.instance().count(), 1)
QgsProject.instance().removeAllMapLayers()
@unittest.skipIf(not use_signal_spy, "No QSignalSpy available")
def test_addMapLayersSignals(self):
""" test that signals are correctly emitted when adding map layers"""
QgsProject.instance().removeAllMapLayers()
layer_was_added_spy = QSignalSpy(QgsProject.instance().layerWasAdded)
layers_added_spy = QSignalSpy(QgsProject.instance().layersAdded)
legend_layers_added_spy = QSignalSpy(QgsProject.instance().legendLayersAdded)
l1 = createLayer('test')
l2 = createLayer('test2')
QgsProject.instance().addMapLayers([l1, l2])
# can't seem to actually test the data which was emitted, so best we can do is test
# the signal count
self.assertEqual(len(layer_was_added_spy), 2)
self.assertEqual(len(layers_added_spy), 1)
self.assertEqual(len(legend_layers_added_spy), 1)
# layer not added to legend
QgsProject.instance().addMapLayers([createLayer('test3'), createLayer('test4')], False)
self.assertEqual(len(layer_was_added_spy), 4)
self.assertEqual(len(layers_added_spy), 2)
self.assertEqual(len(legend_layers_added_spy), 1)
# try readding a layer already in the registry
QgsProject.instance().addMapLayers([l1, l2])
# should be no extra signals emitted
self.assertEqual(len(layer_was_added_spy), 4)
self.assertEqual(len(layers_added_spy), 2)
self.assertEqual(len(legend_layers_added_spy), 1)
def test_mapLayerById(self):
""" test retrieving map layer by ID """
QgsProject.instance().removeAllMapLayers()
# test no crash with empty registry
self.assertEqual(QgsProject.instance().mapLayer('bad'), None)
self.assertEqual(QgsProject.instance().mapLayer(None), None)
l1 = createLayer('test')
l2 = createLayer('test2')
QgsProject.instance().addMapLayers([l1, l2])
self.assertEqual(QgsProject.instance().mapLayer('bad'), None)
self.assertEqual(QgsProject.instance().mapLayer(None), None)
self.assertEqual(QgsProject.instance().mapLayer(l1.id()), l1)
self.assertEqual(QgsProject.instance().mapLayer(l2.id()), l2)
def test_mapLayersByName(self):
""" test retrieving map layer by name """
QgsProject.instance().removeAllMapLayers()
# test no crash with empty registry
self.assertEqual(QgsProject.instance().mapLayersByName('bad'), [])
self.assertEqual(QgsProject.instance().mapLayersByName(None), [])
l1 = createLayer('test')
l2 = createLayer('test2')
QgsProject.instance().addMapLayers([l1, l2])
self.assertEqual(QgsProject.instance().mapLayersByName('bad'), [])
self.assertEqual(QgsProject.instance().mapLayersByName(None), [])
self.assertEqual(QgsProject.instance().mapLayersByName('test'), [l1])
self.assertEqual(QgsProject.instance().mapLayersByName('test2'), [l2])
#duplicate name
l3 = createLayer('test')
QgsProject.instance().addMapLayer(l3)
self.assertEqual(set(QgsProject.instance().mapLayersByName('test')), set([l1, l3]))
def test_mapLayers(self):
""" test retrieving map layers list """
QgsProject.instance().removeAllMapLayers()
# test no crash with empty registry
self.assertEqual(QgsProject.instance().mapLayers(), {})
l1 = createLayer('test')
l2 = createLayer('test2')
QgsProject.instance().addMapLayers([l1, l2])
self.assertEqual(QgsProject.instance().mapLayers(), {l1.id(): l1, l2.id(): l2})
def test_removeMapLayersById(self):
""" test removing map layers by ID """
QgsProject.instance().removeAllMapLayers()
# test no crash with empty registry
QgsProject.instance().removeMapLayers(['bad'])
QgsProject.instance().removeMapLayers([None])
l1 = createLayer('test')
l2 = createLayer('test2')
l3 = createLayer('test3')
QgsProject.instance().addMapLayers([l1, l2, l3])
self.assertEqual(QgsProject.instance().count(), 3)
#remove bad layers
QgsProject.instance().removeMapLayers(['bad'])
self.assertEqual(QgsProject.instance().count(), 3)
QgsProject.instance().removeMapLayers([None])
self.assertEqual(QgsProject.instance().count(), 3)
# remove valid layers
l1_id = l1.id()
QgsProject.instance().removeMapLayers([l1_id])
self.assertEqual(QgsProject.instance().count(), 2)
# double remove
QgsProject.instance().removeMapLayers([l1_id])
self.assertEqual(QgsProject.instance().count(), 2)
# test that layer has been deleted
self.assertTrue(sip.isdeleted(l1))
# remove multiple
QgsProject.instance().removeMapLayers([l2.id(), l3.id()])
self.assertEqual(QgsProject.instance().count(), 0)
self.assertTrue(sip.isdeleted(l2))
# try removing a layer not in the registry
l4 = createLayer('test4')
QgsProject.instance().removeMapLayers([l4.id()])
self.assertFalse(sip.isdeleted(l4))
# fails on qt5 due to removeMapLayers list type conversion - needs a PyName alias
# added to removeMapLayers for QGIS 3.0
@unittest.expectedFailure(QT_VERSION_STR[0] == '5')
def test_removeMapLayersByLayer(self):
""" test removing map layers by layer"""
QgsProject.instance().removeAllMapLayers()
# test no crash with empty registry
QgsProject.instance().removeMapLayers([None])
l1 = createLayer('test')
l2 = createLayer('test2')
l3 = createLayer('test3')
QgsProject.instance().addMapLayers([l1, l2, l3])
self.assertEqual(QgsProject.instance().count(), 3)
#remove bad layers
QgsProject.instance().removeMapLayers([None])
self.assertEqual(QgsProject.instance().count(), 3)
# remove valid layers
QgsProject.instance().removeMapLayers([l1])
self.assertEqual(QgsProject.instance().count(), 2)
# test that layer has been deleted
self.assertTrue(sip.isdeleted(l1))
# remove multiple
QgsProject.instance().removeMapLayers([l2, l3])
self.assertEqual(QgsProject.instance().count(), 0)
self.assertTrue(sip.isdeleted(l2))
self.assertTrue(sip.isdeleted(l3))
def test_removeMapLayerById(self):
""" test removing a map layer by ID """
QgsProject.instance().removeAllMapLayers()
# test no crash with empty registry
QgsProject.instance().removeMapLayer('bad')
QgsProject.instance().removeMapLayer(None)
l1 = createLayer('test')
l2 = createLayer('test2')
QgsProject.instance().addMapLayers([l1, l2])
self.assertEqual(QgsProject.instance().count(), 2)
#remove bad layers
QgsProject.instance().removeMapLayer('bad')
self.assertEqual(QgsProject.instance().count(), 2)
QgsProject.instance().removeMapLayer(None)
self.assertEqual(QgsProject.instance().count(), 2)
# remove valid layers
l1_id = l1.id()
QgsProject.instance().removeMapLayer(l1_id)
self.assertEqual(QgsProject.instance().count(), 1)
# double remove
QgsProject.instance().removeMapLayer(l1_id)
self.assertEqual(QgsProject.instance().count(), 1)
# test that layer has been deleted
self.assertTrue(sip.isdeleted(l1))
# remove second layer
QgsProject.instance().removeMapLayer(l2.id())
self.assertEqual(QgsProject.instance().count(), 0)
self.assertTrue(sip.isdeleted(l2))
# try removing a layer not in the registry
l3 = createLayer('test3')
QgsProject.instance().removeMapLayer(l3.id())
self.assertFalse(sip.isdeleted(l3))
def test_removeMapLayerByLayer(self):
""" test removing a map layer by layer """
QgsProject.instance().removeAllMapLayers()
# test no crash with empty registry
QgsProject.instance().removeMapLayer('bad')
QgsProject.instance().removeMapLayer(None)
l1 = createLayer('test')
l2 = createLayer('test2')
QgsProject.instance().addMapLayers([l1, l2])
self.assertEqual(QgsProject.instance().count(), 2)
#remove bad layers
QgsProject.instance().removeMapLayer(None)
self.assertEqual(QgsProject.instance().count(), 2)
l3 = createLayer('test3')
QgsProject.instance().removeMapLayer(l3)
self.assertEqual(QgsProject.instance().count(), 2)
# remove valid layers
QgsProject.instance().removeMapLayer(l1)
self.assertEqual(QgsProject.instance().count(), 1)
# test that layer has been deleted
self.assertTrue(sip.isdeleted(l1))
# remove second layer
QgsProject.instance().removeMapLayer(l2)
self.assertEqual(QgsProject.instance().count(), 0)
self.assertTrue(sip.isdeleted(l2))
# try removing a layer not in the registry
l3 = createLayer('test3')
QgsProject.instance().removeMapLayer(l3)
self.assertFalse(sip.isdeleted(l3))
def test_removeAllMapLayers(self):
""" test removing all map layers from registry """
QgsProject.instance().removeAllMapLayers()
l1 = createLayer('test')
l2 = createLayer('test2')
QgsProject.instance().addMapLayers([l1, l2])
self.assertEqual(QgsProject.instance().count(), 2)
QgsProject.instance().removeAllMapLayers()
self.assertEqual(QgsProject.instance().count(), 0)
self.assertEqual(QgsProject.instance().mapLayersByName('test'), [])
self.assertEqual(QgsProject.instance().mapLayersByName('test2'), [])
@unittest.skipIf(not use_signal_spy, "No QSignalSpy available")
def test_addRemoveLayersSignals(self):
""" test that signals are correctly emitted when removing map layers"""
QgsProject.instance().removeAllMapLayers()
layers_will_be_removed_spy = QSignalSpy(QgsProject.instance().layersWillBeRemoved)
layer_will_be_removed_spy_str = QSignalSpy(QgsProject.instance().layerWillBeRemoved[str])
layer_will_be_removed_spy_layer = QSignalSpy(QgsProject.instance().layerWillBeRemoved[QgsMapLayer])
layers_removed_spy = QSignalSpy(QgsProject.instance().layersRemoved)
layer_removed_spy = QSignalSpy(QgsProject.instance().layerRemoved)
remove_all_spy = QSignalSpy(QgsProject.instance().removeAll)
l1 = createLayer('l1')
l2 = createLayer('l2')
l3 = createLayer('l3')
l4 = createLayer('l4')
QgsProject.instance().addMapLayers([l1, l2, l3, l4])
# remove 1 layer
QgsProject.instance().removeMapLayer(l1)
# can't seem to actually test the data which was emitted, so best we can do is test
# the signal count
self.assertEqual(len(layers_will_be_removed_spy), 1)
self.assertEqual(len(layer_will_be_removed_spy_str), 1)
self.assertEqual(len(layer_will_be_removed_spy_layer), 1)
self.assertEqual(len(layers_removed_spy), 1)
self.assertEqual(len(layer_removed_spy), 1)
self.assertEqual(len(remove_all_spy), 0)
self.assertEqual(QgsProject.instance().count(), 3)
# remove 2 layers at once
QgsProject.instance().removeMapLayers([l2.id(), l3.id()])
self.assertEqual(len(layers_will_be_removed_spy), 2)
self.assertEqual(len(layer_will_be_removed_spy_str), 3)
self.assertEqual(len(layer_will_be_removed_spy_layer), 3)
self.assertEqual(len(layers_removed_spy), 2)
self.assertEqual(len(layer_removed_spy), 3)
self.assertEqual(len(remove_all_spy), 0)
self.assertEqual(QgsProject.instance().count(), 1)
# remove all
QgsProject.instance().removeAllMapLayers()
self.assertEqual(len(layers_will_be_removed_spy), 3)
self.assertEqual(len(layer_will_be_removed_spy_str), 4)
self.assertEqual(len(layer_will_be_removed_spy_layer), 4)
self.assertEqual(len(layers_removed_spy), 3)
self.assertEqual(len(layer_removed_spy), 4)
self.assertEqual(len(remove_all_spy), 1)
#remove some layers which aren't in the registry
QgsProject.instance().removeMapLayers(['asdasd'])
self.assertEqual(len(layers_will_be_removed_spy), 3)
self.assertEqual(len(layer_will_be_removed_spy_str), 4)
self.assertEqual(len(layer_will_be_removed_spy_layer), 4)
self.assertEqual(len(layers_removed_spy), 3)
self.assertEqual(len(layer_removed_spy), 4)
self.assertEqual(len(remove_all_spy), 1)
l5 = createLayer('test5')
QgsProject.instance().removeMapLayer(l5)
self.assertEqual(len(layers_will_be_removed_spy), 3)
self.assertEqual(len(layer_will_be_removed_spy_str), 4)
self.assertEqual(len(layer_will_be_removed_spy_layer), 4)
self.assertEqual(len(layers_removed_spy), 3)
self.assertEqual(len(layer_removed_spy), 4)
self.assertEqual(len(remove_all_spy), 1)
def test_RemoveLayerShouldNotSegFault(self):
QgsProject.instance().removeAllMapLayers()
reg = QgsProject.instance()
# Should not segfault
reg.removeMapLayers(['not_exists'])
reg.removeMapLayer('not_exists2')
# check also that the removal of an unexistent layer does not insert a null layer
for k, layer in list(reg.mapLayers().items()):
assert(layer is not None)
if __name__ == '__main__':
unittest.main()
| myarjunar/QGIS | tests/src/python/test_qgsmaplayerregistry.py | Python | gpl-2.0 | 20,785 |
# $Id: buddy.py 4704 2014-01-16 05:30:46Z ming $
#
# pjsua Python GUI Demo
#
# Copyright (C)2013 Teluu Inc. (http://www.teluu.com)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import sys
if sys.version_info[0] >= 3: # Python 3
import tkinter as tk
from tkinter import ttk
from tkinter import messagebox as msgbox
else:
import Tkinter as tk
import tkMessageBox as msgbox
import ttk
import random
import pjsua2 as pj
import endpoint
import application
# Buddy class
class Buddy(pj.Buddy):
"""
High level Python Buddy object, derived from pjsua2's Buddy object.
"""
def __init__(self, app):
pj.Buddy.__init__(self)
self.app = app
self.randId = random.randint(1, 9999)
self.cfg = None
self.account = None
def statusText(self):
bi = self.getInfo()
status = ''
if bi.subState == pj.PJSIP_EVSUB_STATE_ACTIVE:
if bi.presStatus.status == pj.PJSUA_BUDDY_STATUS_ONLINE:
status = bi.presStatus.statusText
if not status:
status = 'Online'
elif bi.presStatus.status == pj.PJSUA_BUDDY_STATUS_OFFLINE:
status = 'Offline'
else:
status = 'Unknown'
return status
def onBuddyState(self):
self.app.updateBuddy(self)
class SettingDialog(tk.Toplevel):
"""
This implements buddy settings dialog to manipulate buddy settings.
"""
def __init__(self, parent, cfg):
tk.Toplevel.__init__(self, parent)
self.transient(parent)
self.parent = parent
self.geometry("+100+100")
self.title('Buddy settings')
self.frm = ttk.Frame(self)
self.frm.pack(expand='yes', fill='both')
self.isOk = False
self.cfg = cfg
self.createWidgets()
def doModal(self):
if self.parent:
self.parent.wait_window(self)
else:
self.wait_window(self)
return self.isOk
def createWidgets(self):
# The notebook
self.frm.rowconfigure(0, weight=1)
self.frm.rowconfigure(1, weight=0)
self.frm.columnconfigure(0, weight=1)
self.frm.columnconfigure(1, weight=1)
self.wTab = ttk.Notebook(self.frm)
self.wTab.grid(column=0, row=0, columnspan=2, padx=5, pady=5, sticky=tk.N+tk.S+tk.W+tk.E)
# Main buttons
btnOk = ttk.Button(self.frm, text='Ok', command=self.onOk)
btnOk.grid(column=0, row=1, sticky=tk.E, padx=20, pady=10)
btnCancel = ttk.Button(self.frm, text='Cancel', command=self.onCancel)
btnCancel.grid(column=1, row=1, sticky=tk.W, padx=20, pady=10)
# Tabs
self.createBasicTab()
def createBasicTab(self):
# Prepare the variables to set/receive values from GUI
self.cfgUri = tk.StringVar()
self.cfgUri.set( self.cfg.uri )
self.cfgSubscribe = tk.IntVar()
self.cfgSubscribe.set(self.cfg.subscribe)
# Build the tab page
frm = ttk.Frame(self.frm)
frm.columnconfigure(0, weight=1)
frm.columnconfigure(1, weight=2)
row = 0
ttk.Label(frm, text='URI:').grid(row=row, column=0, sticky=tk.E, pady=2)
ttk.Entry(frm, textvariable=self.cfgUri, width=40).grid(row=row, column=1, sticky=tk.W+tk.E, padx=6)
row += 1
ttk.Checkbutton(frm, text='Subscribe presence', variable=self.cfgSubscribe).grid(row=row, column=1, sticky=tk.W, padx=6, pady=2)
self.wTab.add(frm, text='Basic Settings')
def onOk(self):
# Check basic settings
errors = "";
if self.cfgUri.get():
if not endpoint.validateSipUri(self.cfgUri.get()):
errors += "Invalid Buddy URI: '%s'\n" % (self.cfgUri.get())
if errors:
msgbox.showerror("Error detected:", errors)
return
# Basic settings
self.cfg.uri = self.cfgUri.get()
self.cfg.subscribe = self.cfgSubscribe.get()
self.isOk = True
self.destroy()
def onCancel(self):
self.destroy()
if __name__ == '__main__':
application.main()
| yuezhou/telephony2 | telephony/Classes/pjproject-2.2.1/pjsip-apps/src/pygui/buddy.py | Python | mit | 4,267 |
from uw_hfs.dao import Hfs_DAO
| uw-it-aca/uw-restclients | restclients/dao_implementation/hfs.py | Python | apache-2.0 | 31 |
# -*- coding: utf-8 -*-
from django.conf.urls import patterns, url
urlpatterns = patterns('aldryn_mailchimp.views',
url(r'^(?P<pk>[0-9]+)/(?P<slug>[\w.@+-]+)/$', 'campaign_detail', name='mailchimp_campaign_detail'),
)
| CT-Data-Collaborative/ctdata-mailchimp | ctdata_mailchimp/urls.py | Python | bsd-3-clause | 224 |
import pinky
class TheBrain:
def __init__(self, email, password):
self.pinky = pinky.Pinky(email, password)
def take_over_the_world(self):
# unconditional help
energy = self.pinky.energy_levels()
if (not self.pinky.is_alive()) or energy['max'] - energy['value'] < 8:
self.pinky.intervention()
elif (energy['max'] - energy['value'] < 30):
if self.pinky.action() == 3 and self.pinky.health_percentage() <40:
self.pinky.intervention()
elif (energy['max'] - energy['value'] < 50):
if self.pinky.action() == 3 and self.pinky.health_percentage() <15:
self.pinky.intervention() | frcr/thetalesimplebot | thebrain.py | Python | mit | 696 |
import pyglet
from pyglet.gl import*
from unit import Unit
from gun import Gun
from math import pi, sin, cos
from utils import load_image
from random import randint
from utils import*
from resources import*
from shared import*
glEnable(GL_BLEND)
class PlayerUnitHandlers(object):
def __init__(self, player):
self.player = player
def on_key_press(self, symbol, modifiers):
if symbol == 119:#w
self.player.up = True
elif symbol == 115:#s
self.player.down = True
elif symbol == 97:#a
self.player.left = True
elif symbol == 100:#d
self.player.right = True
elif symbol == 114:#r
self.player.gun.reload()
#weapon keys 1,2,3,...
for i in range(len(self.player.guns)):
if symbol == 49+i:
self.player.switch_gun(self.player.guns[i])
if modifiers == 17:#shift
if not self.player.sprinting:
self.player.sprint_on()
elif modifiers == 18:#ctrl
if not self.player.crouching:
self.player.crouch_on()
#print modifiers
def on_key_release(self, symbol, modifiers):
if symbol == 119:#w
self.player.up = False
elif symbol == 115:#s
self.player.down = False
elif symbol == 97:#a
self.player.left = False
elif symbol == 100:#d
self.player.right = False
#print modifiers
if modifiers != 17:#shift
#print "yop"
if self.player.sprinting:
#print "so richtig"
self.player.sprint_off()
if modifiers != 18:#ctrl
if self.player.crouching:
self.player.crouch_off()
def on_mouse_scroll(self, x, y, scroll_x, scroll_y):
current_gun = self.player.guns.index(self.player.gun)
if scroll_y > 0:
current_gun += 1
if current_gun >= len(self.player.guns):
current_gun = 0
self.player.switch_gun(self.player.guns[current_gun])
else:
current_gun -= 1
if current_gun < 0:
current_gun = len(self.player.guns)-1
self.player.switch_gun(self.player.guns[current_gun])
def on_mouse_motion(self, x, y, dx, dy):
self.player.aim_x = x
self.player.aim_y = y
def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
self.player.aim_x = x
self.player.aim_y = y
def on_mouse_press(self, x, y, button, modifiers):
self.player.aim_x = x
self.player.aim_y = y
if button == 1:#left
self.player.pull_trigger()
elif button == 4:#right
self.player.gun.reload()
def on_mouse_release(self, x, y, button, modifiers):
if button == 1:
self.player.release_trigger()
class Player(Unit):
def __init__(self,
image = Resources.Image.player,
x = 0,
y = 0,
radius = 24,
health = 1000,
max_speed = 150):
super(Player, self).__init__(image, x, y, radius, health, max_speed)
#keyboard & mouse handlers
self.handlers = PlayerUnitHandlers(self)
window.push_handlers(self.handlers)
#player specific stuff
self.crosshair = None
self.guide = None
self.ammo_text = pyglet.text.Label(text = str(self.gun.bullets_left) + "/" + str(self.gun.ammo_pool), x = self.x, y = self.y, batch = batch, group = hud, anchor_x='center')
self.score = 0
self.score_text = pyglet.text.Label(text = "Kills: " + str(self.score), x = 5, y = 5, batch = batch, group = hud)
self.reload_bar = None
self.reloading_time = 0
def loop(self, dt):
super(Player, self).loop(dt)
self.update_crosshair()
#self.update_guide()
self.update_ammo_text()
def on_kill(self, victim):
self.increase_score(1)
def increase_score(self, amount):
self.score += amount
self.score_text.text = "Kills: " + str(self.score)
def update_ammo_text(self):
self.ammo_text.x = int(self.x)
self.ammo_text.y = int(self.y-70)
self.ammo_text.text = str(self.gun.bullets_left) + "/" + str(self.gun.ammo_pool)
#circle
def update_crosshair(self):
x = self.aim_x
y = self.aim_y
dx1 = abs(x - self.x)
dy1 = abs(y - self.y)
radius = 4 + ((dx1+dy1)*3) * ((self.accuracy())*0.2)#dunno but looks right
iterations = int(0.15*radius*pi)+8
s = sin(2*pi / iterations)
c = cos(2*pi / iterations)
dx, dy = radius, 0
dx1, dy1 = radius+3, 0
vertices = []
for i in range(iterations+1):
vertices.extend([x+dx, y+dy])
dx, dy = (dx*c - dy*s), (dy*c + dx*s)
vertices.extend([x+dx1, y+dy1])
dx1, dy1 = (dx1*c - dy1*s), (dy1*c + dx1*s)
count = len(vertices)/2
if self.crosshair != None:
self.crosshair.delete()
p = self.health.current_percentage()
red = 255-p
green = p
self.crosshair = batch.add(count, GL_QUAD_STRIP, hud, ('v2f', vertices), ('c4B', (red, green, 0, 255)*count))
## #squares fuck horrible
## def update_crosshair(self):
## x = self.aim_x
## y = self.aim_y
## dx = abs(x - self.x)
## dy = abs(y - self.y)
##
## radius = 4 + ((dx+dy)*3) * ((self.accuracy())*0.2)#dunno but looks right
##
## iterations = 4
##
## s = sin(2*pi / iterations)
## c = cos(2*pi / iterations)
##
## dx, dy = radius, 0.0
##
## vertices = []
## for i in range(iterations):
## x1=x+dx
## y1=y+dy
##
## vertices.append([x1,y1])
##
## dx, dy = (dx*c - dy*s), (dy*c + dx*s)
##
## #print vertices
## t = 2
## l = 20
##
## dx, dy = vertices[0]
## a = [dx,dy+t,
## dx,dy-t,
## dx+l,dy-t,
## dx+l,dy+t]
##
## dx, dy = vertices[1]
## b = [dx-t,dy+l,
## dx-t,dy,
## dx+t,dy,
## dx+t,dy+l]
##
## dx, dy = vertices[2]
## c = [dx-l,dy+t,
## dx-l,dy-t,
## dx,dy-t,
## dx,dy+t]
##
## dx, dy = vertices[3]
## d = [dx-t,dy,
## dx-t,dy-l,
## dx+t,dy-l,
## dx+t,dy]
##
## #print vertices
## vertices = []
## vertices.extend(a)
## vertices.extend(b)
## vertices.extend(c)
## vertices.extend(d)
##
## count = len(vertices)/2
## if self.crosshair != None:
## self.crosshair.delete()
##
## p = self.health.current_percentage()
## red = 255-p
## green = p
## self.crosshair = batch.add(count, GL_QUADS, hud, ('v2f', vertices), ('c4B', (red, green, 0, 255)*count))
def update_guide(self):
vertices = [int(self.x), int(self.y), int(self.aim_x), int(self.aim_y)]
if self.guide != None:
self.guide.delete()
self.guide = batch.add(2, GL_LINES, hud, ('v2i', vertices))
def update_reload_bar(self, dt, reload_time, iterations):
#print dt, reload_time
self.reloading_time += dt
x = self.aim_x
y = self.aim_y
dx1 = abs(x - self.x)
dy1 = abs(y - self.y)
radius = 5 + ((dx1+dy1)*3) * ((self.accuracy())*0.2)
s = sin(2*pi / iterations)
c = cos(2*pi / iterations)
dx, dy = 0, radius
vertices = [x,y]
for i in range( int( (self.reloading_time/reload_time) *iterations) +1):
vertices.extend([x+dx, y+dy])
dx, dy = (dx*c + dy*s), (dy*c - dx*s)
count = len(vertices)/2
if self.reload_bar != None:
self.reload_bar.delete()
self.reload_bar = batch.add(count, GL_TRIANGLE_FAN, hud, ('v2f', vertices), ('c4B', (255,255,255,80)*count))
if self.reloading_time > reload_time:
self.reloading_time = 0
self.reload_bar.delete()
self.reload_bar = None
pyglet.clock.unschedule(self.update_reload_bar)
def on_reload(self, reload_time):
reload_time = float(reload_time)
iterations = 128
self.reload_bar = None
pyglet.clock.schedule_interval_soft(self.update_reload_bar, reload_time/iterations, reload_time, iterations)
def on_gun_switch(self):
if self.reload_bar != None:
self.reloading_time = 0
pyglet.clock.unschedule(self.update_reload_bar)
self.reload_bar.delete()
self.reload_bar = None
def release(self):
window.remove_handlers(self.handlers)
super(Player, self).release()
def clean(self, dt):
try:
self.ammo_text.delete()
self.crosshair.delete()
self.guide.delete()
except AttributeError:
pass
#print "why"
except AssertionError:
print "new why"
pass
super(Player, self).clean(dt)
| Eaglemania/ASS | player.py | Python | gpl-2.0 | 9,494 |
# Copyright 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
r"""Instrumentation-based profiling for Python.
trace_event allows you to hand-instrument your code with areas of interest.
When enabled, trace_event logs the start and stop times of these events to a
logfile. These resulting logfiles can be viewed with either Chrome's about:tracing
UI or with the standalone trace_event_viewer available at
http://www.github.com/natduca/trace_event_viewer/
To use trace event, simply call trace_event_enable and start instrumenting your code:
from trace_event import *
if "--trace" in sys.argv:
trace_enable("myfile.trace")
@traced
def foo():
...
class MyFoo(object):
@traced
def bar(self):
...
trace_event records trace events to an in-memory buffer. If your application is
long running and you want to see the results of a trace before it exits, you can call
trace_flush to write any in-memory events to disk.
To help intregrating trace_event into existing codebases that dont want to add
trace_event as a dependancy, trace_event is split into an import shim
(trace_event.py) and an implementaiton (trace_event_impl/*). You can copy the
shim, trace_event.py, directly into your including codebase. If the
trace_event_impl is not found, the shim will simply noop.
trace_event is safe with regard to Python threads. Simply trace as you normally would and each
thread's timing will show up in the trace file.
Multiple processes can safely output into a single trace_event logfile. If you
fork after enabling tracing, the child process will continue outputting to the
logfile. Use of the multiprocessing module will work as well. In both cases,
however, note that disabling tracing in the parent process will not stop tracing
in the child processes.
"""
try:
import trace_event_impl
except ImportError:
trace_event_impl = None
def trace_can_enable():
"""
Returns True if a trace_event_impl was found. If false,
trace_enable will fail. Regular tracing methods, including
trace_begin and trace_end, will simply be no-ops.
"""
return trace_event_impl != None
if trace_event_impl:
import time
def trace_is_enabled():
return trace_event_impl.trace_is_enabled()
def trace_enable(logfile):
return trace_event_impl.trace_enable(logfile)
def trace_disable():
return trace_event_impl.trace_disable()
def trace_flush():
trace_event_impl.trace_flush()
def trace_begin(name, **kwargs):
args_to_log = {key: repr(value) for key, value in kwargs.iteritems()}
trace_event_impl.add_trace_event("B", time.time(), "python", name, args_to_log)
def trace_end(name):
trace_event_impl.add_trace_event("E", time.time(), "python", name)
def trace(name, **kwargs):
return trace_event_impl.trace(name, **kwargs)
def traced(fn):
return trace_event_impl.traced(fn)
else:
import contextlib
def trace_enable():
raise TraceException("Cannot enable trace_event. No trace_event_impl module found.")
def trace_disable():
pass
def trace_is_enabled():
return False
def trace_flush():
pass
def trace_begin(self, name, **kwargs):
pass
def trace_end(self, name):
pass
@contextlib.contextmanager
def trace(name, **kwargs):
yield
def traced(fn):
return fn
trace_enable.__doc__ = """Enables tracing.
Once enabled, the enabled bit propagates to forked processes and
multiprocessing subprocesses. Regular child processes, e.g. those created via
os.system/popen, or subprocess.Popen instances, will not get traced. You can,
however, enable tracing on those subprocess manually.
Trace files are multiprocess safe, so you can have multiple processes
outputting to the same tracelog at once.
log_file can be one of three things:
None: a logfile is opened based on sys[argv], namely
"./" + sys.argv[0] + ".json"
string: a logfile of the given name is opened.
file-like object: the fileno() is is used. The underlying file descriptor
must support fcntl.lockf() operations.
"""
trace_disable.__doc__ = """Disables tracing, if enabled.
Will not disable tracing on any existing child proceses that were forked
from this process. You must disable them yourself.
"""
trace_flush.__doc__ = """Flushes any currently-recorded trace data to disk.
trace_event records traces into an in-memory buffer for efficiency. Flushing
is only done at process exit or when this method is called.
"""
trace_flush.__doc__ = """Returns whether tracing is enabled.
"""
trace_begin.__doc__ = """Records the beginning of an event of the given name.
The building block for performance tracing. A typical example is:
from trace_event import *
def something_heavy():
trace_begin("something_heavy")
trace_begin("read")
try:
lines = open().readlines()
finally:
trace_end("read")
trace_begin("parse")
try:
parse(lines)
finally:
trace_end("parse")
trace_end("something_heavy")
Note that a trace_end call must be issued for every trace_begin call. When
tracing around blocks that might throw exceptions, you should use the trace function,
or a try-finally pattern to ensure that the trace_end method is called.
See the documentation for the @traced decorator for a simpler way to instrument
functions and methods.
"""
trace_end.__doc__ = """Records the end of an event of the given name.
See the documentation for trace_begin for more information.
Make sure to issue a trace_end for every trace_begin issued. Failure to pair
these calls will lead to bizarrely tall looking traces in the
trace_event_viewer UI.
"""
trace.__doc__ = """Traces a block of code using a with statement.
Example usage:
from trace_event import *
def something_heavy(lines):
with trace("parse_lines", lines=lines):
parse(lines)
If tracing an entire function call, prefer the @traced decorator.
"""
traced.__doc__ = """
Traces the provided function, using the function name for the actual generated event.
Prefer this decorator over the explicit trace_begin and trace_end functions
whenever you are tracing the start and stop of a function. It automatically
issues trace_begin/end events, even when the wrapped function throws.
You can also pass the function's argument names to traced, and the argument
values will be added to the trace. Example usage:
from trace_event import *
@traced("url")
def send_request(url):
urllib2.urlopen(url).read()
"""
| Teamxrtc/webrtc-streaming-node | third_party/webrtc/src/chromium/src/third_party/py_trace_event/src/trace_event.py | Python | mit | 6,729 |
"""Tests for forbes implementation."""
import pytest
from forbes import *
@pytest.fixture
def data():
"""Fixture to get jason data."""
return get_json()
def test_billionaire_age_valid(data):
"""Test that find billionaire finds youngest billionaire with valid age."""
result = find_billionaire(data)
assert result['youngest_valid']['name'] == 'Mark Zuckerberg'
assert result['youngest_valid']['age'] == 32
def test_billionaire_oldest(data):
"""Test that find billionaire finds oldest billionaire under 80."""
result = find_billionaire(data)
assert result['oldest_under_80']['name'] == 'Phil Knight'
assert result['oldest_under_80']['age'] == 78
def test_billionaire_age_not_valid(data):
"""Verify that the smallest number in age is invalid and does not return as youngest billionaire."""
result = find_billionaire(data)
assert result['youngest_valid']['name'] != 'Beate Heister & Karl Albrecht Jr.'
assert result['youngest_valid']['age'] != -1
def test_billionaire_age_0_is_valid(data):
"""Test that age 0 is a valid age."""
data.append({
'name': 'Adam Smith',
'age': 0,
'rank': 35,
'net_worth (USD)': 100000000000,
'source': 'baby food',
'country': 'United States'
})
result = find_billionaire(data)
assert result['youngest_valid']['name'] == 'Adam Smith'
assert result['youngest_valid']['age'] == 0
| serashioda/code-katas | src/tests/test_forbes.py | Python | mit | 1,438 |
import psycopg2
from .db_controller import DbController
class Hashtag:
def __init__(self, name):
self.name = name
self.__db = DbController(table='Event')
@staticmethod
def create_hashtag(name):
try:
db = DbController(table='Event')
db.create(name=name)
return Hashtag(name)
except sqlite3.Error as e:
print(e)
return None
@staticmethod
def get_hashtag(name):
try:
db = DbController(table='Event')
data = db.get('name', name)
return Hashtag(name)
except sqlite3.Error as e:
print(e)
return None
@staticmethod
def all_hashtag():
try:
db = DbController(table='User')
hashtags = db.all()
hashtag_list = []
for data in hashtags:
hashtag_list.append(Hashtag(data['name']))
return hashtag_list
except sqlite3.Error as e:
print(e)
return None
def delete_event(self):
try:
self.__db.delete(self.name)
return True
except sqlite3.Error as e:
print(e)
return False
| lubchenko05/belka.system | belkaBot/logic/hashtag.py | Python | mit | 1,274 |
try:
from unittest import mock
except ImportError:
import mock
from docker_custodian.docker_autostop import (
build_container_matcher,
get_opts,
has_been_running_since,
main,
stop_container,
stop_containers,
)
def test_stop_containers(mock_client, container, now):
matcher = mock.Mock()
mock_client.containers.return_value = [container]
mock_client.inspect_container.return_value = container
stop_containers(mock_client, now, matcher, False)
matcher.assert_called_once_with('container_name')
mock_client.stop.assert_called_once_with(container['Id'])
def test_stop_container(mock_client):
id = 'asdb'
stop_container(mock_client, id)
mock_client.stop.assert_called_once_with(id)
def test_build_container_matcher():
prefixes = ['one_', 'two_']
matcher = build_container_matcher(prefixes)
assert matcher('one_container')
assert matcher('two_container')
assert not matcher('three_container')
assert not matcher('one')
def test_has_been_running_since_true(container, later_time):
assert has_been_running_since(container, later_time)
def test_has_been_running_since_false(container, earlier_time):
assert not has_been_running_since(container, earlier_time)
@mock.patch('docker_custodian.docker_autostop.build_container_matcher',
autospec=True)
@mock.patch('docker_custodian.docker_autostop.stop_containers',
autospec=True)
@mock.patch('docker_custodian.docker_autostop.get_opts',
autospec=True)
@mock.patch('docker_custodian.docker_autostop.docker', autospec=True)
def test_main(
mock_docker,
mock_get_opts,
mock_stop_containers,
mock_build_matcher
):
mock_get_opts.return_value.timeout = 30
main()
mock_get_opts.assert_called_once_with()
mock_build_matcher.assert_called_once_with(
mock_get_opts.return_value.prefix)
mock_stop_containers.assert_called_once_with(
mock.ANY,
mock_get_opts.return_value.max_run_time,
mock_build_matcher.return_value,
mock_get_opts.return_value.dry_run)
def test_get_opts_with_defaults():
opts = get_opts(args=['--prefix', 'one', '--prefix', 'two'])
assert opts.timeout == 60
assert opts.dry_run is False
assert opts.prefix == ['one', 'two']
assert opts.max_run_time is None
def test_get_opts_with_args(now):
with mock.patch(
'docker_custodian.docker_autostop.timedelta_type',
autospec=True
) as mock_timedelta_type:
opts = get_opts(args=['--prefix', 'one', '--max-run-time', '24h'])
assert opts.max_run_time == mock_timedelta_type.return_value
mock_timedelta_type.assert_called_once_with('24h')
| Yelp/docker-custodian | tests/docker_autostop_test.py | Python | apache-2.0 | 2,726 |
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Handlers dealing with nodes
"""
from datetime import datetime
import web
from nailgun.api.v1.handlers.base import BaseHandler
from nailgun.api.v1.handlers.base import CollectionHandler
from nailgun.api.v1.handlers.base import content_json
from nailgun.api.v1.handlers.base import SingleHandler
from nailgun.api.v1.validators.network import NetAssignmentValidator
from nailgun.api.v1.validators.node import NodeValidator
from nailgun import consts
from nailgun import objects
from nailgun.objects.serializers.node import NodeInterfacesSerializer
from nailgun.db import db
from nailgun.db.sqlalchemy.models import NetworkGroup
from nailgun.db.sqlalchemy.models import Node
from nailgun.db.sqlalchemy.models import NodeNICInterface
from nailgun.logger import logger
from nailgun import notifier
class NodeHandler(SingleHandler):
single = objects.Node
validator = NodeValidator
class NodeCollectionHandler(CollectionHandler):
"""Node collection handler
"""
fields = ('id', 'name', 'meta', 'progress', 'roles', 'pending_roles',
'status', 'mac', 'fqdn', 'ip', 'manufacturer', 'platform_name',
'pending_addition', 'pending_deletion', 'os_platform',
'error_type', 'online', 'cluster', 'uuid', 'network_data')
validator = NodeValidator
collection = objects.NodeCollection
@content_json
def GET(self):
"""May receive cluster_id parameter to filter list
of nodes
:returns: Collection of JSONized Node objects.
:http: * 200 (OK)
"""
cluster_id = web.input(cluster_id=None).cluster_id
nodes = self.collection.eager_nodes_handlers(None)
if cluster_id == '':
nodes = nodes.filter_by(cluster_id=None)
elif cluster_id:
nodes = nodes.filter_by(cluster_id=cluster_id)
return self.collection.to_json(nodes)
@content_json
def PUT(self):
""":returns: Collection of JSONized Node objects.
:http: * 200 (nodes are successfully updated)
* 400 (invalid nodes data specified)
"""
data = self.checked_data(
self.validator.validate_collection_update
)
nodes_updated = []
for nd in data:
node = self.collection.single.get_by_meta(nd)
if not node:
raise self.http(404, "Can't find node: {0}".format(nd))
self.collection.single.update(node, nd)
nodes_updated.append(node.id)
# we need eagerload everything that is used in render
nodes = self.collection.filter_by_id_list(
self.collection.eager_nodes_handlers(None),
nodes_updated
)
return self.collection.to_json(nodes)
class NodeAgentHandler(BaseHandler):
collection = objects.NodeCollection
validator = NodeValidator
@content_json
def PUT(self):
""":returns: node id.
:http: * 200 (node are successfully updated)
* 304 (node data not changed since last request)
* 400 (invalid nodes data specified)
* 404 (node not found)
"""
nd = self.checked_data(
self.validator.validate_collection_update,
data=u'[{0}]'.format(web.data()))[0]
node = self.collection.single.get_by_meta(nd)
if not node:
raise self.http(404, "Can't find node: {0}".format(nd))
node.timestamp = datetime.now()
if not node.online:
node.online = True
msg = u"Node '{0}' is back online".format(node.human_readable_name)
logger.info(msg)
notifier.notify("discover", msg, node_id=node.id)
db().flush()
if 'agent_checksum' in nd and (
node.agent_checksum == nd['agent_checksum']
):
return {'id': node.id, 'cached': True}
self.collection.single.update_by_agent(node, nd)
return {"id": node.id}
class NodeNICsHandler(BaseHandler):
"""Node network interfaces handler
"""
model = NodeNICInterface
validator = NetAssignmentValidator
serializer = NodeInterfacesSerializer
@content_json
def GET(self, node_id):
""":returns: Collection of JSONized Node interfaces.
:http: * 200 (OK)
* 404 (node not found in db)
"""
node = self.get_object_or_404(objects.Node, node_id)
return map(self.render, node.interfaces)
@content_json
def PUT(self, node_id):
""":returns: Collection of JSONized Node objects.
:http: * 200 (nodes are successfully updated)
* 400 (invalid nodes data specified)
"""
interfaces_data = self.checked_data(
self.validator.validate_structure_and_data, node_id=node_id)
node_data = {'id': node_id, 'interfaces': interfaces_data}
objects.Cluster.get_network_manager()._update_attrs(node_data)
node = self.get_object_or_404(objects.Node, node_id)
objects.Node.add_pending_change(
node,
consts.CLUSTER_CHANGES.interfaces
)
return map(self.render, node.interfaces)
class NodeCollectionNICsHandler(BaseHandler):
"""Node collection network interfaces handler
"""
model = NetworkGroup
validator = NetAssignmentValidator
serializer = NodeInterfacesSerializer
@content_json
def PUT(self):
""":returns: Collection of JSONized Node objects.
:http: * 200 (nodes are successfully updated)
* 400 (invalid nodes data specified)
"""
data = self.checked_data(
self.validator.validate_collection_structure_and_data)
updated_nodes_ids = []
for node_data in data:
node_id = objects.Cluster.get_network_manager(
)._update_attrs(node_data)
updated_nodes_ids.append(node_id)
updated_nodes = db().query(Node).filter(
Node.id.in_(updated_nodes_ids)
).all()
return [
{
"id": n.id,
"interfaces": map(self.render, n.interfaces)
} for n in updated_nodes
]
class NodeNICsDefaultHandler(BaseHandler):
"""Node default network interfaces handler
"""
@content_json
def GET(self, node_id):
""":returns: Collection of default JSONized interfaces for node.
:http: * 200 (OK)
* 404 (node not found in db)
"""
node = self.get_object_or_404(objects.Node, node_id)
default_nets = self.get_default(node)
return default_nets
def get_default(self, node):
if node.cluster:
return objects.Node.get_network_manager(
node
).get_default_networks_assignment(node)
class NodeCollectionNICsDefaultHandler(NodeNICsDefaultHandler):
"""Node collection default network interfaces handler
"""
validator = NetAssignmentValidator
@content_json
def GET(self):
"""May receive cluster_id parameter to filter list
of nodes
:returns: Collection of JSONized Nodes interfaces.
:http: * 200 (OK)
* 404 (node not found in db)
"""
cluster_id = web.input(cluster_id=None).cluster_id
if cluster_id == '':
nodes = self.get_object_or_404(objects.Node, cluster_id=None)
elif cluster_id:
nodes = self.get_object_or_404(
objects.Node,
cluster_id=cluster_id
)
else:
nodes = self.get_object_or_404(objects.Node)
def_net_nodes = []
for node in nodes:
rendered_node = self.get_default(self.render(node))
def_net_nodes.append(rendered_node)
return map(self.render, nodes)
class NodesAllocationStatsHandler(BaseHandler):
"""Node allocation stats handler
"""
@content_json
def GET(self):
""":returns: Total and unallocated nodes count.
:http: * 200 (OK)
"""
unallocated_nodes = db().query(Node).filter_by(cluster_id=None).count()
total_nodes = \
db().query(Node).count()
return {'total': total_nodes,
'unallocated': unallocated_nodes}
| koder-ua/nailgun-fcert | nailgun/nailgun/api/v1/handlers/node.py | Python | apache-2.0 | 8,916 |
import ConfigParser
import os.path as op
from .data_collection import ta_mod_input as ta_input
from .ta_cloud_connect_client import TACloudConnectClient as CollectorCls
from ..common.lib_util import (
get_main_file, get_app_root_dir, get_mod_input_script_name
)
def _load_options_from_inputs_spec(app_root, stanza_name):
input_spec_file = 'inputs.conf.spec'
file_path = op.join(app_root, 'README', input_spec_file)
if not op.isfile(file_path):
raise RuntimeError("README/%s doesn't exist" % input_spec_file)
parser = ConfigParser.RawConfigParser(allow_no_value=True)
parser.read(file_path)
options = parser.defaults().keys()
stanza_prefix = '%s://' % stanza_name
stanza_exist = False
for section in parser.sections():
if section == stanza_name or section.startswith(stanza_prefix):
options.extend(parser.options(section))
stanza_exist = True
if not stanza_exist:
raise RuntimeError("Stanza %s doesn't exist" % stanza_name)
return set(options)
def _find_ucc_global_config_json(app_root, ucc_config_filename):
"""Find UCC config file from all possible directories"""
candidates = ['local', 'default', 'bin',
op.join('appserver', 'static', 'js', 'build')]
for candidate in candidates:
file_path = op.join(app_root, candidate, ucc_config_filename)
if op.isfile(file_path):
return file_path
raise RuntimeError(
'Unable to load %s from [%s]'
% (ucc_config_filename, ','.join(candidates))
)
def _get_cloud_connect_config_json(script_name):
config_file_name = '.'.join([script_name, 'cc.json'])
return op.join(op.dirname(get_main_file()), config_file_name)
def run(single_instance=False):
script_name = get_mod_input_script_name()
cce_config_file = _get_cloud_connect_config_json(script_name)
app_root = get_app_root_dir()
ucc_config_path = _find_ucc_global_config_json(
app_root, 'globalConfig.json'
)
schema_params = _load_options_from_inputs_spec(app_root, script_name)
ta_input.main(
CollectorCls,
schema_file_path=ucc_config_path,
log_suffix=script_name,
cc_json_file=cce_config_file,
schema_para_list=schema_params,
single_instance=single_instance
)
| georgestarcher/TA-SyncKVStore | bin/ta_synckvstore/cloudconnectlib/splunktacollectorlib/cloud_connect_mod_input.py | Python | mit | 2,338 |
from dartcms import get_model
from dartcms.utils.config import DartCMSConfig
from django.utils.translation import ugettext_lazy as _
from .forms import ProductCatalogForm
app_name = 'catalog'
ProductCatalog = get_model('shop', 'ProductCatalog')
config = DartCMSConfig({
'model': ProductCatalog,
'grid': {
'grid_columns': [
{'field': 'name', 'width': '50%'},
{'field': 'slug', 'width': '40%'},
{'field': 'is_visible', 'width': '10%'},
],
'additional_grid_actions': [
{'url': 'sections', 'label': _('Sections'),
'kwarg_name': 'catalog', 'include_urls': 'dartcms.apps.shop.section.urls'}
]
},
'form': {
'form_class': ProductCatalogForm
}
})
urlpatterns = config.get_urls()
| astrikov-d/dartcms | dartcms/apps/shop/catalog/urls.py | Python | mit | 796 |
from rest_framework import generics
from rest_framework.exceptions import PermissionDenied
from django.utils.timezone import datetime
from .models import PrivateMessage, GroupMessage
from .serializers import PrivateMessageListCreateSerializer, PrivateMessageRetrieveUpdateDestroySerializer
from .serializers import GroupMessageListCreateSerializer, GroupMessageRetrieveUpdateDestroySerializer
##########################
##### PrivateMessage #####
##########################
class PrivateMessageListCreateAPIView(generics.ListCreateAPIView):
model = PrivateMessage
serializer_class = PrivateMessageListCreateSerializer
filter_fields = ('thread',)
def get_queryset(self):
return PrivateMessage.objects.filter(thread__participants=self.request.user)
def perform_create(self, serializer):
instance = serializer.save()
# update thread last message datetime
instance.thread.last_message = datetime.now()
instance.thread.save()
class PrivateMessageRetrieveUpdateDestroyAPIView(generics.RetrieveUpdateDestroyAPIView):
model = PrivateMessage
serializer_class = PrivateMessageRetrieveUpdateDestroySerializer
def get_queryset(self):
return PrivateMessage.objects.filter(thread__participants=self.request.user)
def update(self, request, *args, **kwargs):
if request.user != self.get_object().sender:
raise PermissionDenied()
return super(PrivateMessageRetrieveUpdateDestroyAPIView, self).update(request, *args, **kwargs)
def delete(self, request, *args, **kwargs):
if request.user != self.get_object().sender:
raise PermissionDenied()
return super(PrivateMessageRetrieveUpdateDestroyAPIView, self).delete(request, *args, **kwargs)
########################
##### GroupMessage #####
########################
class GroupMessageListCreateAPIView(generics.ListCreateAPIView):
model = GroupMessage
serializer_class = GroupMessageListCreateSerializer
filter_fields = ('thread', 'sender', 'text')
def get_queryset(self):
return GroupMessage.objects.filter(thread__participants=self.request.user)
class GroupMessageRetrieveUpdateDestroyAPIView(generics.RetrieveUpdateDestroyAPIView):
model = GroupMessage
serializer_class = GroupMessageRetrieveUpdateDestroySerializer
def get_queryset(self):
return GroupMessage.objects.filter(thread__participants=self.request.user)
def update(self, request, *args, **kwargs):
if request.user != self.get_object().sender:
raise PermissionDenied()
return super(GroupMessageRetrieveUpdateDestroyAPIView, self).update(request, *args, **kwargs)
def delete(self, request, *args, **kwargs):
if request.user != self.get_object().sender:
raise PermissionDenied()
return super(GroupMessageRetrieveUpdateDestroyAPIView, self).delete(request, *args, **kwargs)
| olegpshenichniy/truechat | server/api/message/views.py | Python | mit | 2,941 |
# coding: utf-8
from ..views import employee
from ..tests.base import BaseTest
from ..tests.helpers import *
from .. import factories
class BaseEmployeeTest(BaseTest):
view_path = None
def get_update_params(self):
division = models.Division.objects.all()[0]
p = {
'username': 'ivanov',
'password1': 't1234567',
'password2': 't1234567',
'last_name': 'Ivanov',
'first_name': 'Ivan',
'middle_name': 'Ivanovich',
'divisions': division.id,
'full_access': division.id,
'read_access': division.id,
'is_active': True,
'can_external': False,
}
return p
def get_ident_emp_param(self):
p = self.get_update_params()
return {
'user__username': p['username'],
'last_name': p['last_name'],
'middle_name': p['middle_name'],
'first_name': p['first_name'],
'divisions__id__in': [p['divisions']],
'full_access__id__in': [p['full_access']],
'read_access__id__in': [p['full_access']],
'user__is_active': p['is_active'],
'can_external': p['can_external'],
}
class EmployeeListTest(SortTestMixin, ListAccessTestMixin, FuncAccessTestMixin, LoginRequiredTestMixin, BaseEmployeeTest):
view_path = 'perm_employee_list'
view_class = employee.List
factory_class = factories.Employee
class EmployeeDetailTest(ReadAccessTestMixin, LoginRequiredTestMixin, BaseEmployeeTest):
view_path = 'perm_employee_detail'
view_class = employee.Detail
factory_class = factories.Employee
class EmployeeCreateTest(CreateTestMixin, LoginRequiredTestMixin, FuncAccessTestMixin, BaseEmployeeTest):
view_path = 'perm_employee_create'
view_class = employee.Create
def get_create_data(self) -> dict:
return self.get_update_params()
def get_ident_param(self) -> dict:
return self.get_ident_emp_param()
class EmployeeUpdateTest(UpdateTestMixin, FuncAccessTestMixin, LoginRequiredTestMixin, BaseEmployeeTest):
view_path = 'perm_employee_update'
view_class = employee.Update
factory_class = factories.Employee
def get_update_data(self) -> dict:
return self.get_update_params()
def check_updated(self):
self.assertTrue(
models.Employee.objects.get(
id=self.get_instance().id,
**self.get_ident_emp_param()
)
)
class EmployeeDeleteTest(LoginRequiredTestMixin, DeleteTestMixin, BaseEmployeeTest):
view_path = 'perm_employee_delete'
view_class = employee.Delete
factory_class = factories.Employee
def generate_data(self):
BaseMixin.generate_data(self)
other_user = User.objects.create_user(username='tester_other', email='other_tester@soft-way.biz', password='123')
other_employee = models.Employee.objects.create(user=other_user, last_name='other_test', first_name='test', middle_name='test')
other_employee.full_access.add(self.employee.get_default_division())
def get_instance(self):
other_employee = models.Employee.objects.exclude(id=self.user.employee.id)[0]
return other_employee
class EmployeePasswordChangeTest(LoginRequiredTestMixin, UpdateTestMixin, BaseEmployeeTest):
view_path = 'perm_employee_password_change'
view_class = employee.Update
factory_class = factories.Employee
def get_update_data(self) -> dict:
return {
'password1': 'new_password',
'password2': 'new_password',
}
def test_update(self):
old_password = self.get_instance().user.password
response = self.client.post(self.get_url(), self.get_update_data(), follow=True)
self.assertEqual(response.redirect_chain[0], ('..', 302))
update_empl = models.Employee.objects.get(id=self.get_instance().id)
self.assertNotEqual(update_empl.user.password, old_password)
class EmployeeRolesTest(UpdateTestMixin, LoginRequiredTestMixin, BaseEmployeeTest):
view_path = 'perm_employee_roles'
view_class = employee.Update
factory_class = factories.Employee
def get_update_data(self):
division = self.employee.divisions.all()[0]
role = models.Role.objects.get_or_create(
name='test_new_role', code='test_code',
level=9, division=division
)[0]
employee = self.get_instance()
employee.divisions.add(division)
return {
'user': employee.user,
'roles': role.id,
}
def check_updated(self):
p = self.get_update_data()
employee = models.Employee.objects.get(id=self.get_instance().id)
self.assertIn(
p['roles'],
employee.roles.all().values_list('id', flat=True)
) | telminov/sw-django-division-perm | division_perm/tests/test_employee.py | Python | mit | 4,896 |
# -*- coding: utf-8 -*-
##
# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# For those usages not covered by the Apache License, Version 2.0 please
# contact with: nfvlabs@tid.es
##
'''
This is the thread for the http server North API.
Two thread will be launched, with normal and administrative permissions.
'''
__author__="Alfonso Tierno"
__date__ ="$10-jul-2014 12:07:15$"
import bottle
import yaml
import json
import threading
import datetime
from utils import RADclass
from jsonschema import validate as js_v, exceptions as js_e
import host_thread as ht
from vim_schema import host_new_schema, host_edit_schema, tenant_new_schema, \
tenant_edit_schema, \
flavor_new_schema, flavor_update_schema, \
image_new_schema, image_update_schema, \
server_new_schema, server_action_schema, network_new_schema, network_update_schema, \
port_new_schema, port_update_schema
global my
global url_base
global config_dic
url_base="/openvim"
HTTP_Bad_Request = 400
HTTP_Unauthorized = 401
HTTP_Not_Found = 404
HTTP_Forbidden = 403
HTTP_Method_Not_Allowed = 405
HTTP_Not_Acceptable = 406
HTTP_Request_Timeout = 408
HTTP_Conflict = 409
HTTP_Service_Unavailable = 503
HTTP_Internal_Server_Error= 500
def check_extended(extended, allow_net_attach=False):
'''Makes and extra checking of extended input that cannot be done using jsonschema
Attributes:
allow_net_attach: for allowing or not the uuid field at interfaces
that are allowed for instance, but not for flavors
Return: (<0, error_text) if error; (0,None) if not error '''
if "numas" not in extended: return 0, None
id_s=[]
numaid=0
for numa in extended["numas"]:
nb_formats = 0
if "cores" in numa:
nb_formats += 1
if "cores-id" in numa:
if len(numa["cores-id"]) != numa["cores"]:
return -HTTP_Bad_Request, "different number of cores-id (%d) than cores (%d) at numa %d" % (len(numa["cores-id"]), numa["cores"],numaid)
id_s.extend(numa["cores-id"])
if "threads" in numa:
nb_formats += 1
if "threads-id" in numa:
if len(numa["threads-id"]) != numa["threads"]:
return -HTTP_Bad_Request, "different number of threads-id (%d) than threads (%d) at numa %d" % (len(numa["threads-id"]), numa["threads"],numaid)
id_s.extend(numa["threads-id"])
if "paired-threads" in numa:
nb_formats += 1
if "paired-threads-id" in numa:
if len(numa["paired-threads-id"]) != numa["paired-threads"]:
return -HTTP_Bad_Request, "different number of paired-threads-id (%d) than paired-threads (%d) at numa %d" % (len(numa["paired-threads-id"]), numa["paired-threads"],numaid)
for pair in numa["paired-threads-id"]:
if len(pair) != 2:
return -HTTP_Bad_Request, "paired-threads-id must contain a list of two elements list at numa %d" % (numaid)
id_s.extend(pair)
if nb_formats > 1:
return -HTTP_Service_Unavailable, "only one of cores, threads, paired-threads are allowed in this version at numa %d" % numaid
#check interfaces
if "interfaces" in numa:
ifaceid=0
names=[]
vpcis=[]
for interface in numa["interfaces"]:
if "uuid" in interface and not allow_net_attach:
return -HTTP_Bad_Request, "uuid field is not allowed at numa %d interface %s position %d" % (numaid, interface.get("name",""), ifaceid )
if "mac_address" in interface and interface["dedicated"]=="yes":
return -HTTP_Bad_Request, "mac_address can not be set for dedicated (passthrough) at numa %d, interface %s position %d" % (numaid, interface.get("name",""), ifaceid )
if "name" in interface:
if interface["name"] in names:
return -HTTP_Bad_Request, "name repeated at numa %d, interface %s position %d" % (numaid, interface.get("name",""), ifaceid )
names.append(interface["name"])
if "vpci" in interface:
if interface["vpci"] in vpcis:
return -HTTP_Bad_Request, "vpci %s repeated at numa %d, interface %s position %d" % (interface["vpci"], numaid, interface.get("name",""), ifaceid )
vpcis.append(interface["vpci"])
ifaceid+=1
numaid+=1
if numaid > 1:
return -HTTP_Service_Unavailable, "only one numa can be defined in this version "
for a in range(0,len(id_s)):
if a not in id_s:
return -HTTP_Bad_Request, "core/thread identifiers must start at 0 and gaps are not alloed. Missing id number %d" % a
return 0, None
#
# dictionaries that change from HTTP API to database naming
#
http2db_host={'id':'uuid'}
http2db_tenant={'id':'uuid'}
http2db_flavor={'id':'uuid','imageRef':'image_id'}
http2db_image={'id':'uuid', 'created':'created_at', 'updated':'modified_at', 'public': 'public'}
http2db_server={'id':'uuid','hostId':'host_id','flavorRef':'flavor_id','imageRef':'image_id','created':'created_at'}
http2db_network={'id':'uuid','provider:vlan':'vlan', 'provider:physical': 'provider'}
http2db_port={'id':'uuid', 'network_id':'net_id', 'mac_address':'mac', 'device_owner':'type','device_id':'instance_id','binding:switch_port':'switch_port','binding:vlan':'vlan', 'bandwidth':'Mbps'}
def remove_extra_items(data, schema):
deleted=[]
if type(data) is tuple or type(data) is list:
for d in data:
a= remove_extra_items(d, schema['items'])
if a is not None: deleted.append(a)
elif type(data) is dict:
for k in data.keys():
if 'properties' not in schema or k not in schema['properties'].keys():
del data[k]
deleted.append(k)
else:
a = remove_extra_items(data[k], schema['properties'][k])
if a is not None: deleted.append({k:a})
if len(deleted) == 0: return None
elif len(deleted) == 1: return deleted[0]
else: return deleted
def delete_nulls(var):
if type(var) is dict:
for k in var.keys():
if var[k] is None: del var[k]
elif type(var[k]) is dict or type(var[k]) is list or type(var[k]) is tuple:
if delete_nulls(var[k]): del var[k]
if len(var) == 0: return True
elif type(var) is list or type(var) is tuple:
for k in var:
if type(k) is dict: delete_nulls(k)
if len(var) == 0: return True
return False
class httpserver(threading.Thread):
def __init__(self, db_conn, name="http", host='localhost', port=8080, admin=False, config_=None):
'''
Creates a new thread to attend the http connections
Attributes:
db_conn: database connection
name: name of this thread
host: ip or name where to listen
port: port where to listen
admin: if this has privileges of administrator or not
config_: unless the first thread must be provided. It is a global dictionary where to allocate the self variable
'''
global url_base
global config_dic
#initialization
if config_ is not None:
config_dic = config_
if 'http_threads' not in config_dic:
config_dic['http_threads'] = {}
threading.Thread.__init__(self)
self.host = host
self.port = port
self.db = db_conn
self.admin = admin
if name in config_dic:
print "httpserver Warning!!! Onether thread with the same name", name
n=0
while name+str(n) in config_dic:
n +=1
name +=str(n)
self.name = name
self.url_preffix = 'http://' + self.host + ':' + str(self.port) + url_base
config_dic['http_threads'][name] = self
#Ensure that when the main program exits the thread will also exit
self.daemon = True
self.setDaemon(True)
def run(self):
bottle.run(host=self.host, port=self.port, debug=True) #quiet=True
def gethost(self, host_id):
result, content = self.db.get_host(host_id)
if result < 0:
print "httpserver.gethost error %d %s" % (result, content)
bottle.abort(-result, content)
elif result==0:
print "httpserver.gethost host '%s' not found" % host_id
bottle.abort(HTTP_Not_Found, content)
else:
data={'host' : content}
convert_boolean(content, ('admin_state_up',) )
change_keys_http2db(content, http2db_host, reverse=True)
print data['host']
return format_out(data)
@bottle.route(url_base + '/', method='GET')
def http_get():
print
return 'works' #TODO: put links or redirection to /openvim???
#
# Util funcions
#
def change_keys_http2db(data, http_db, reverse=False):
'''Change keys of dictionary data according to the key_dict values
This allow change from http interface names to database names.
When reverse is True, the change is otherwise
Attributes:
data: can be a dictionary or a list
http_db: is a dictionary with hhtp names as keys and database names as value
reverse: by default change is done from http API to database. If True change is done otherwise
Return: None, but data is modified'''
if type(data) is tuple or type(data) is list:
for d in data:
change_keys_http2db(d, http_db, reverse)
elif type(data) is dict or type(data) is bottle.FormsDict:
if reverse:
for k,v in http_db.items():
if v in data: data[k]=data.pop(v)
else:
for k,v in http_db.items():
if k in data: data[v]=data.pop(k)
def format_out(data):
'''return string of dictionary data according to requested json, yaml, xml. By default json'''
if 'application/yaml' in bottle.request.headers.get('Accept'):
bottle.response.content_type='application/yaml'
return yaml.safe_dump(data, explicit_start=True, indent=4, default_flow_style=False, tags=False, encoding='utf-8', allow_unicode=True) #, canonical=True, default_style='"'
else: #by default json
bottle.response.content_type='application/json'
#return data #json no style
return json.dumps(data, indent=4) + "\n"
def format_in(schema):
try:
error_text = "Invalid header format "
format_type = bottle.request.headers.get('Content-Type', 'application/json')
if 'application/json' in format_type:
error_text = "Invalid json format "
#Use the json decoder instead of bottle decoder because it informs about the location of error formats with a ValueError exception
client_data = json.load(bottle.request.body)
#client_data = bottle.request.json()
elif 'application/yaml' in format_type:
error_text = "Invalid yaml format "
client_data = yaml.load(bottle.request.body)
elif format_type == 'application/xml':
bottle.abort(501, "Content-Type: application/xml not supported yet.")
else:
print "HTTP HEADERS: " + str(bottle.request.headers.items())
bottle.abort(HTTP_Not_Acceptable, 'Content-Type ' + str(format_type) + ' not supported.')
return
#if client_data == None:
# bottle.abort(HTTP_Bad_Request, "Content error, empty")
# return
#check needed_items
#print "HTTP input data: ", str(client_data)
error_text = "Invalid content "
js_v(client_data, schema)
return client_data
except (ValueError, yaml.YAMLError) as exc:
error_text += str(exc)
print error_text
bottle.abort(HTTP_Bad_Request, error_text)
except js_e.ValidationError as exc:
print "HTTP validate_in error, jsonschema exception ", exc.message, "at", exc.path
print " CONTENT: " + str(bottle.request.body.readlines())
error_pos = ""
if len(exc.path)>0: error_pos=" at '" + ":".join(map(str, exc.path)) + "'"
bottle.abort(HTTP_Bad_Request, error_text + error_pos+": "+exc.message)
#except:
# bottle.abort(HTTP_Bad_Request, "Content error: Failed to parse Content-Type", error_pos)
# raise
def filter_query_string(qs, http2db, allowed):
'''Process query string (qs) checking that contains only valid tokens for avoiding SQL injection
Attributes:
'qs': bottle.FormsDict variable to be processed. None or empty is considered valid
'allowed': list of allowed string tokens (API http naming). All the keys of 'qs' must be one of 'allowed'
'http2db': dictionary with change from http API naming (dictionary key) to database naming(dictionary value)
Return: A tuple with the (select,where,limit) to be use in a database query. All of then transformed to the database naming
select: list of items to retrieve, filtered by query string 'field=token'. If no 'field' is present, allowed list is returned
where: dictionary with key, value, taken from the query string token=value. Empty if nothing is provided
limit: limit dictated by user with the query string 'limit'. 100 by default
abort if not permitted, using bottel.abort
'''
where={}
limit=100
select=[]
if type(qs) is not bottle.FormsDict:
print '!!!!!!!!!!!!!!invalid query string not a dictionary'
#bottle.abort(HTTP_Internal_Server_Error, "call programmer")
else:
for k in qs:
if k=='field':
select += qs.getall(k)
for v in select:
if v not in allowed:
bottle.abort(HTTP_Bad_Request, "Invalid query string at 'field="+v+"'")
elif k=='limit':
try:
limit=int(qs[k])
except:
bottle.abort(HTTP_Bad_Request, "Invalid query string at 'limit="+qs[k]+"'")
else:
if k not in allowed:
bottle.abort(HTTP_Bad_Request, "Invalid query string at '"+k+"="+qs[k]+"'")
if qs[k]!="null": where[k]=qs[k]
else: where[k]=None
if len(select)==0: select += allowed
#change from http api to database naming
for i in range(0,len(select)):
k=select[i]
if k in http2db:
select[i] = http2db[k]
change_keys_http2db(where, http2db)
#print "filter_query_string", select,where,limit
return select,where,limit
def convert_bandwidth(data, reverse=False):
'''Check the field bandwidth recursively and when found, it removes units and convert to number
It assumes that bandwidth is well formed
Attributes:
'data': dictionary bottle.FormsDict variable to be checked. None or empty is considered valid
'reverse': by default convert form str to int (Mbps), if True it convert from number to units
Return:
None
'''
if type(data) is dict:
for k in data.keys():
if type(data[k]) is dict or type(data[k]) is tuple or type(data[k]) is list:
convert_bandwidth(data[k], reverse)
if "bandwidth" in data:
try:
value=str(data["bandwidth"])
if not reverse:
pos = value.find("bps")
if pos>0:
if value[pos-1]=="G": data["bandwidth"] = int(data["bandwidth"][:pos-1]) * 1000
elif value[pos-1]=="k": data["bandwidth"]= int(data["bandwidth"][:pos-1]) / 1000
else: data["bandwidth"]= int(data["bandwidth"][:pos-1])
else:
value = int(data["bandwidth"])
if value % 1000 == 0: data["bandwidth"]=str(value/1000) + " Gbps"
else: data["bandwidth"]=str(value) + " Mbps"
except:
print "convert_bandwidth exception for type", type(data["bandwidth"]), " data", data["bandwidth"]
return
if type(data) is tuple or type(data) is list:
for k in data:
if type(k) is dict or type(k) is tuple or type(k) is list:
convert_bandwidth(k, reverse)
def convert_boolean(data, items):
'''Check recursively the content of data, and if there is an key contained in items, convert value from string to boolean
It assumes that bandwidth is well formed
Attributes:
'data': dictionary bottle.FormsDict variable to be checked. None or empty is consideted valid
'items': tuple of keys to convert
Return:
None
'''
if type(data) is dict:
for k in data.keys():
if type(data[k]) is dict or type(data[k]) is tuple or type(data[k]) is list:
convert_boolean(data[k], items)
if k in items:
if type(data[k]) is str:
if data[k]=="false": data[k]=False
elif data[k]=="true": data[k]=True
if type(data) is tuple or type(data) is list:
for k in data:
if type(k) is dict or type(k) is tuple or type(k) is list:
convert_boolean(k, items)
def convert_datetime2str(var):
'''Converts a datetime variable to a string with the format '%Y-%m-%dT%H:%i:%s'
It enters recursively in the dict var finding this kind of variables
'''
if type(var) is dict:
for k,v in var.items():
if type(v) is datetime.datetime:
var[k]= v.strftime('%Y-%m-%dT%H:%M:%S')
elif type(v) is dict or type(v) is list or type(v) is tuple:
convert_datetime2str(v)
if len(var) == 0: return True
elif type(var) is list or type(var) is tuple:
for v in var:
convert_datetime2str(v)
def check_valid_tenant(my, tenant_id):
if tenant_id=='any':
if not my.admin:
return HTTP_Unauthorized, "Needed admin privileges"
else:
result, _ = my.db.get_table(FROM='tenants', SELECT=('uuid',), WHERE={'uuid': tenant_id})
if result<=0:
return HTTP_Not_Found, "tenant '%s' not found" % tenant_id
return 0, None
def check_valid_uuid(uuid):
id_schema = {"type" : "string", "pattern": "^[a-fA-F0-9]{8}(-[a-fA-F0-9]{4}){3}-[a-fA-F0-9]{12}$"}
try:
js_v(uuid, id_schema)
return True
except js_e.ValidationError:
return False
@bottle.error(400)
@bottle.error(401)
@bottle.error(404)
@bottle.error(403)
@bottle.error(405)
@bottle.error(406)
@bottle.error(408)
@bottle.error(409)
@bottle.error(503)
@bottle.error(500)
def error400(error):
e={"error":{"code":error.status_code, "type":error.status, "description":error.body}}
return format_out(e)
@bottle.hook('after_request')
def enable_cors():
#TODO: Alf: Is it needed??
bottle.response.headers['Access-Control-Allow-Origin'] = '*'
#
# HOSTS
#
@bottle.route(url_base + '/hosts', method='GET')
def http_get_hosts():
select_,where_,limit_ = filter_query_string(bottle.request.query, http2db_host,
('id','name','description','status','admin_state_up') )
myself = config_dic['http_threads'][ threading.current_thread().name ]
result, content = myself.db.get_table(FROM='hosts', SELECT=select_, WHERE=where_, LIMIT=limit_)
if result < 0:
print "http_get_hosts Error", content
bottle.abort(-result, content)
else:
convert_boolean(content, ('admin_state_up',) )
change_keys_http2db(content, http2db_host, reverse=True)
for row in content:
row['links'] = ( {'href': myself.url_preffix + '/hosts/' + str(row['id']), 'rel': 'bookmark'}, )
data={'hosts' : content}
return format_out(data)
@bottle.route(url_base + '/hosts/<host_id>', method='GET')
def http_get_host_id(host_id):
my = config_dic['http_threads'][ threading.current_thread().name ]
return my.gethost(host_id)
@bottle.route(url_base + '/hosts', method='POST')
def http_post_hosts():
'''insert a host into the database. All resources are got and inserted'''
my = config_dic['http_threads'][ threading.current_thread().name ]
#check permissions
if not my.admin:
bottle.abort(HTTP_Unauthorized, "Needed admin privileges")
#parse input data
http_content = format_in( host_new_schema )
r = remove_extra_items(http_content, host_new_schema)
if r is not None: print "http_post_host_id: Warning: remove extra items ", r
change_keys_http2db(http_content['host'], http2db_host)
host = http_content['host']
warning_text=""
if 'host-data' in http_content:
host.update(http_content['host-data'])
ip_name=http_content['host-data']['ip_name']
user=http_content['host-data']['user']
password=http_content['host-data'].get('password', None)
else:
ip_name=host['ip_name']
user=host['user']
password=host.get('password', None)
#fill rad info
rad = RADclass.RADclass()
(return_status, code) = rad.obtain_RAD(user, password, ip_name)
#return
if not return_status:
print 'http_post_hosts ERROR obtaining RAD', code
bottle.abort(HTTP_Bad_Request, code)
return
warning_text=code
rad_structure = yaml.load(rad.to_text())
print 'rad_structure\n---------------------'
print json.dumps(rad_structure, indent=4)
print '---------------------'
#return
WHERE_={"family":rad_structure['processor']['family'], 'manufacturer':rad_structure['processor']['manufacturer'], 'version':rad_structure['processor']['version']}
result, content = my.db.get_table(FROM='host_ranking',
SELECT=('ranking',),
WHERE=WHERE_)
if result > 0:
host['ranking'] = content[0]['ranking']
else:
#error_text= "Host " + str(WHERE_)+ " not found in ranking table. Not valid for VIM management"
#bottle.abort(HTTP_Bad_Request, error_text)
#return
warning_text += "Host " + str(WHERE_)+ " not found in ranking table. Assuming lowest value 100\n"
host['ranking'] = 100 #TODO: as not used in this version, set the lowest value
features = rad_structure['processor'].get('features', ())
host['features'] = ",".join(features)
host['numas'] = []
for node in rad_structure['resource topology']['nodes'].itervalues():
interfaces= []
cores = []
eligible_cores=[]
count = 0
for core in node['cpu']['eligible_cores']:
eligible_cores.extend(core)
for core in node['cpu']['cores']:
c={'core_id': count, 'thread_id':core[0]}
if core[0] not in eligible_cores: c['status'] = 'noteligible'
cores.append(c)
c={'core_id': count, 'thread_id':core[1]}
if core[1] not in eligible_cores: c['status'] = 'noteligible'
cores.append(c)
count = count+1
if 'nics' in node:
for port_k, port_v in node['nics']['nic 0']['ports'].iteritems():
if port_v['virtual']:
continue
else:
sriovs = []
for port_k2, port_v2 in node['nics']['nic 0']['ports'].iteritems():
if port_v2['virtual'] and port_v2['PF_pci_id']==port_k:
sriovs.append({'pci':port_k2, 'mac':port_v2['mac'], 'source_name':port_v2['source_name']})
if len(sriovs)>0:
#sort sriov according to pci and rename them to the vf number
new_sriovs = sorted(sriovs, key=lambda k: k['pci'])
index=0
for sriov in new_sriovs:
sriov['source_name'] = index
index += 1
interfaces.append ({'pci':str(port_k), 'Mbps': port_v['speed']/1000000, 'sriovs': new_sriovs, 'mac':port_v['mac'], 'source_name':port_v['source_name']})
#@TODO LA memoria devuelta por el RAD es incorrecta, almenos para IVY1, NFV100
memory=node['memory']['node_size'] / (1024*1024*1024)
#memory=get_next_2pow(node['memory']['hugepage_nr'])
host['numas'].append( {'numa_socket': node['id'], 'hugepages': node['memory']['hugepage_nr'], 'memory':memory, 'interfaces': interfaces, 'cores': cores } )
print json.dumps(host, indent=4)
#return
#
#insert in data base
result, content = my.db.new_host(host)
if result >= 0:
if content['admin_state_up']:
#create thread
host_test_mode = True if config_dic['mode']=='test' or config_dic['mode']=="OF only" else False
host_develop_mode = True if config_dic['mode']=='development' else False
host_develop_bridge_iface = config_dic.get('development_bridge', None)
thread = ht.host_thread(name=host.get('name',ip_name), user=user, host=ip_name, db=config_dic['db'], db_lock=config_dic['db_lock'],
test=host_test_mode, image_path=config_dic['image_path'],
version=config_dic['version'], host_id=content['uuid'],
develop_mode=host_develop_mode, develop_bridge_iface=host_develop_bridge_iface )
thread.start()
config_dic['host_threads'][ content['uuid'] ] = thread
#return host data
change_keys_http2db(content, http2db_host, reverse=True)
if len(warning_text)>0:
content["warning"]= warning_text
data={'host' : content}
return format_out(data)
else:
bottle.abort(HTTP_Bad_Request, content)
return
@bottle.route(url_base + '/hosts/<host_id>', method='PUT')
def http_put_host_id(host_id):
'''modify a host into the database. All resources are got and inserted'''
my = config_dic['http_threads'][ threading.current_thread().name ]
#check permissions
if not my.admin:
bottle.abort(HTTP_Unauthorized, "Needed admin privileges")
#parse input data
http_content = format_in( host_edit_schema )
r = remove_extra_items(http_content, host_edit_schema)
if r is not None: print "http_post_host_id: Warning: remove extra items ", r
change_keys_http2db(http_content['host'], http2db_host)
#insert in data base
result, content = my.db.edit_host(host_id, http_content['host'])
if result >= 0:
convert_boolean(content, ('admin_state_up',) )
change_keys_http2db(content, http2db_host, reverse=True)
data={'host' : content}
#reload thread
config_dic['host_threads'][host_id].name = content.get('name',content['ip_name'])
config_dic['host_threads'][host_id].user = content['user']
config_dic['host_threads'][host_id].host = content['ip_name']
config_dic['host_threads'][host_id].insert_task("reload")
#print data
return format_out(data)
else:
bottle.abort(HTTP_Bad_Request, content)
return
@bottle.route(url_base + '/hosts/<host_id>', method='DELETE')
def http_delete_host_id(host_id):
my = config_dic['http_threads'][ threading.current_thread().name ]
#check permissions
if not my.admin:
bottle.abort(HTTP_Unauthorized, "Needed admin privileges")
result, content = my.db.delete_row('hosts', host_id)
if result == 0:
bottle.abort(HTTP_Not_Found, content)
elif result >0:
#terminate thread
if host_id in config_dic['host_threads']:
config_dic['host_threads'][host_id].insert_task("exit")
#return data
data={'result' : content}
return format_out(data)
else:
print "http_delete_host_id error",result, content
bottle.abort(-result, content)
return
#
# TENANTS
#
@bottle.route(url_base + '/tenants', method='GET')
def http_get_tenants():
my = config_dic['http_threads'][ threading.current_thread().name ]
select_,where_,limit_ = filter_query_string(bottle.request.query, http2db_tenant,
('id','name','description','enabled') )
result, content = my.db.get_table(FROM='tenants', SELECT=select_,WHERE=where_,LIMIT=limit_)
if result < 0:
print "http_get_tenants Error", content
bottle.abort(-result, content)
else:
change_keys_http2db(content, http2db_tenant, reverse=True)
convert_boolean(content, ('enabled',))
data={'tenants' : content}
#data['tenants_links'] = dict([('tenant', row['id']) for row in content])
return format_out(data)
@bottle.route(url_base + '/tenants/<tenant_id>', method='GET')
def http_get_tenant_id(tenant_id):
my = config_dic['http_threads'][ threading.current_thread().name ]
result, content = my.db.get_table(FROM='tenants', SELECT=('uuid','name','description', 'enabled'),WHERE={'uuid': tenant_id} )
if result < 0:
print "http_get_tenant_id error %d %s" % (result, content)
bottle.abort(-result, content)
elif result==0:
print "http_get_tenant_id tenant '%s' not found" % tenant_id
bottle.abort(HTTP_Not_Found, "tenant %s not found" % tenant_id)
else:
change_keys_http2db(content, http2db_tenant, reverse=True)
convert_boolean(content, ('enabled',))
data={'tenant' : content[0]}
#data['tenants_links'] = dict([('tenant', row['id']) for row in content])
return format_out(data)
@bottle.route(url_base + '/tenants', method='POST')
def http_post_tenants():
'''insert a tenant into the database.'''
my = config_dic['http_threads'][ threading.current_thread().name ]
#parse input data
http_content = format_in( tenant_new_schema )
r = remove_extra_items(http_content, tenant_new_schema)
if r is not None: print "http_post_tenants: Warning: remove extra items ", r
change_keys_http2db(http_content['tenant'], http2db_tenant)
#insert in data base
result, content = my.db.new_tenant(http_content['tenant'])
if result >= 0:
return http_get_tenant_id(content)
else:
bottle.abort(-result, content)
return
@bottle.route(url_base + '/tenants/<tenant_id>', method='PUT')
def http_put_tenant_id(tenant_id):
'''update a tenant into the database.'''
my = config_dic['http_threads'][ threading.current_thread().name ]
#parse input data
http_content = format_in( tenant_edit_schema )
r = remove_extra_items(http_content, tenant_edit_schema)
if r is not None: print "http_put_tenant_id: Warning: remove extra items ", r
change_keys_http2db(http_content['tenant'], http2db_tenant)
#insert in data base
result, content = my.db.update_rows('tenants', http_content['tenant'], WHERE={'uuid': tenant_id}, log=True )
if result >= 0:
return http_get_tenant_id(tenant_id)
else:
bottle.abort(-result, content)
return
@bottle.route(url_base + '/tenants/<tenant_id>', method='DELETE')
def http_delete_tenant_id(tenant_id):
my = config_dic['http_threads'][ threading.current_thread().name ]
#check permissions
r, tenants_flavors = my.db.get_table(FROM='tenants_flavors', SELECT=('flavor_id','tenant_id'), WHERE={'tenant_id': tenant_id})
if r<=0:
tenants_flavors=()
r, tenants_images = my.db.get_table(FROM='tenants_images', SELECT=('image_id','tenant_id'), WHERE={'tenant_id': tenant_id})
if r<=0:
tenants_images=()
result, content = my.db.delete_row('tenants', tenant_id)
if result == 0:
bottle.abort(HTTP_Not_Found, content)
elif result >0:
print "alf", tenants_flavors, tenants_images
for flavor in tenants_flavors:
my.db.delete_row_by_key("flavors", "uuid", flavor['flavor_id'])
for image in tenants_images:
my.db.delete_row_by_key("images", "uuid", image['image_id'])
data={'result' : content}
return format_out(data)
else:
print "http_delete_tenant_id error",result, content
bottle.abort(-result, content)
return
#
# FLAVORS
#
@bottle.route(url_base + '/<tenant_id>/flavors', method='GET')
def http_get_flavors(tenant_id):
my = config_dic['http_threads'][ threading.current_thread().name ]
#check valid tenant_id
result,content = check_valid_tenant(my, tenant_id)
if result != 0:
bottle.abort(result, content)
#obtain data
select_,where_,limit_ = filter_query_string(bottle.request.query, http2db_flavor,
('id','name','description','public') )
if tenant_id=='any':
from_ ='flavors'
else:
from_ ='tenants_flavors inner join flavors on tenants_flavors.flavor_id=flavors.uuid'
where_['tenant_id'] = tenant_id
result, content = my.db.get_table(FROM=from_, SELECT=select_, WHERE=where_, LIMIT=limit_)
if result < 0:
print "http_get_flavors Error", content
bottle.abort(-result, content)
else:
change_keys_http2db(content, http2db_flavor, reverse=True)
for row in content:
row['links']=[ {'href': "/".join( (my.url_preffix, tenant_id, 'flavors', str(row['id']) ) ), 'rel':'bookmark' } ]
data={'flavors' : content}
return format_out(data)
@bottle.route(url_base + '/<tenant_id>/flavors/<flavor_id>', method='GET')
def http_get_flavor_id(tenant_id, flavor_id):
my = config_dic['http_threads'][ threading.current_thread().name ]
#check valid tenant_id
result,content = check_valid_tenant(my, tenant_id)
if result != 0:
bottle.abort(result, content)
#obtain data
select_,where_,limit_ = filter_query_string(bottle.request.query, http2db_flavor,
('id','name','description','ram', 'vcpus', 'extended', 'disk', 'public') )
if tenant_id=='any':
from_ ='flavors'
else:
from_ ='tenants_flavors as tf inner join flavors as f on tf.flavor_id=f.uuid'
where_['tenant_id'] = tenant_id
where_['uuid'] = flavor_id
result, content = my.db.get_table(SELECT=select_, FROM=from_, WHERE=where_, LIMIT=limit_)
if result < 0:
print "http_get_flavor_id error %d %s" % (result, content)
bottle.abort(-result, content)
elif result==0:
print "http_get_flavors_id flavor '%s' not found" % str(flavor_id)
bottle.abort(HTTP_Not_Found, 'flavor %s not found' % flavor_id)
else:
change_keys_http2db(content, http2db_flavor, reverse=True)
if 'extended' in content[0] and content[0]['extended'] is not None:
extended = json.loads(content[0]['extended'])
if 'devices' in extended:
change_keys_http2db(extended['devices'], http2db_flavor, reverse=True)
content[0]['extended']=extended
convert_bandwidth(content[0], reverse=True)
content[0]['links']=[ {'href': "/".join( (my.url_preffix, tenant_id, 'flavors', str(content[0]['id']) ) ), 'rel':'bookmark' } ]
data={'flavor' : content[0]}
#data['tenants_links'] = dict([('tenant', row['id']) for row in content])
return format_out(data)
@bottle.route(url_base + '/<tenant_id>/flavors', method='POST')
def http_post_flavors(tenant_id):
'''insert a flavor into the database, and attach to tenant.'''
my = config_dic['http_threads'][ threading.current_thread().name ]
#check valid tenant_id
result,content = check_valid_tenant(my, tenant_id)
if result != 0:
bottle.abort(result, content)
http_content = format_in( flavor_new_schema )
r = remove_extra_items(http_content, flavor_new_schema)
if r is not None: print "http_post_flavors: Warning: remove extra items ", r
change_keys_http2db(http_content['flavor'], http2db_flavor)
extended_dict = http_content['flavor'].pop('extended', None)
if extended_dict is not None:
result, content = check_extended(extended_dict)
if result<0:
print "http_post_flavors wrong input extended error %d %s" % (result, content)
bottle.abort(-result, content)
return
convert_bandwidth(extended_dict)
if 'devices' in extended_dict: change_keys_http2db(extended_dict['devices'], http2db_flavor)
http_content['flavor']['extended'] = json.dumps(extended_dict)
#insert in data base
result, content = my.db.new_flavor(http_content['flavor'], tenant_id)
if result >= 0:
return http_get_flavor_id(tenant_id, content)
else:
print "http_psot_flavors error %d %s" % (result, content)
bottle.abort(-result, content)
return
@bottle.route(url_base + '/<tenant_id>/flavors/<flavor_id>', method='DELETE')
def http_delete_flavor_id(tenant_id, flavor_id):
'''Deletes the flavor_id of a tenant. IT removes from tenants_flavors table.'''
my = config_dic['http_threads'][ threading.current_thread().name ]
#check valid tenant_id
result,content = check_valid_tenant(my, tenant_id)
if result != 0:
bottle.abort(result, content)
return
result, content = my.db.delete_image_flavor('flavor', flavor_id, tenant_id)
if result == 0:
bottle.abort(HTTP_Not_Found, content)
elif result >0:
data={'result' : content}
return format_out(data)
else:
print "http_delete_flavor_id error",result, content
bottle.abort(-result, content)
return
@bottle.route(url_base + '/<tenant_id>/flavors/<flavor_id>/<action>', method='POST')
def http_attach_detach_flavors(tenant_id, flavor_id, action):
'''attach/detach an existing flavor in this tenant. That is insert/remove at tenants_flavors table.'''
#TODO alf: not tested at all!!!
my = config_dic['http_threads'][ threading.current_thread().name ]
#check valid tenant_id
result,content = check_valid_tenant(my, tenant_id)
if result != 0:
bottle.abort(result, content)
if tenant_id=='any':
bottle.abort(HTTP_Bad_Request, "Invalid tenant 'any' with this command")
#check valid action
if action!='attach' and action != 'detach':
bottle.abort(HTTP_Method_Not_Allowed, "actions can be attach or detach")
return
#Ensure that flavor exist
from_ ='tenants_flavors as tf right join flavors as f on tf.flavor_id=f.uuid'
where_={'uuid': flavor_id}
result, content = my.db.get_table(SELECT=('public','tenant_id'), FROM=from_, WHERE=where_)
if result==0:
if action=='attach':
text_error="Flavor '%s' not found" % flavor_id
else:
text_error="Flavor '%s' not found for tenant '%s'" % (flavor_id, tenant_id)
bottle.abort(HTTP_Not_Found, text_error)
return
elif result>0:
flavor=content[0]
if action=='attach':
if flavor['tenant_id']!=None:
bottle.abort(HTTP_Conflict, "Flavor '%s' already attached to tenant '%s'" % (flavor_id, tenant_id))
if flavor['public']=='no' and not my.admin:
#allow only attaching public flavors
bottle.abort(HTTP_Unauthorized, "Needed admin rights to attach a private flavor")
return
#insert in data base
result, content = my.db.new_row('tenants_flavors', {'flavor_id':flavor_id, 'tenant_id': tenant_id})
if result >= 0:
return http_get_flavor_id(tenant_id, flavor_id)
else: #detach
if flavor['tenant_id']==None:
bottle.abort(HTTP_Not_Found, "Flavor '%s' not attached to tenant '%s'" % (flavor_id, tenant_id))
result, content = my.db.delete_row_by_dict(FROM='tenants_flavors', WHERE={'flavor_id':flavor_id, 'tenant_id':tenant_id})
if result>=0:
if flavor['public']=='no':
#try to delete the flavor completely to avoid orphan flavors, IGNORE error
my.db.delete_row_by_dict(FROM='flavors', WHERE={'uuid':flavor_id})
data={'result' : "flavor detached"}
return format_out(data)
#if get here is because an error
print "http_attach_detach_flavors error %d %s" % (result, content)
bottle.abort(-result, content)
return
@bottle.route(url_base + '/<tenant_id>/flavors/<flavor_id>', method='PUT')
def http_put_flavor_id(tenant_id, flavor_id):
'''update a flavor_id into the database.'''
my = config_dic['http_threads'][ threading.current_thread().name ]
#check valid tenant_id
result,content = check_valid_tenant(my, tenant_id)
if result != 0:
bottle.abort(result, content)
#parse input data
http_content = format_in( flavor_update_schema )
r = remove_extra_items(http_content, flavor_update_schema)
if r is not None: print "http_put_flavor_id: Warning: remove extra items ", r
change_keys_http2db(http_content['flavor'], http2db_flavor)
extended_dict = http_content['flavor'].pop('extended', None)
if extended_dict is not None:
result, content = check_extended(extended_dict)
if result<0:
print "http_put_flavor_id wrong input extended error %d %s" % (result, content)
bottle.abort(-result, content)
return
convert_bandwidth(extended_dict)
if 'devices' in extended_dict: change_keys_http2db(extended_dict['devices'], http2db_flavor)
http_content['flavor']['extended'] = json.dumps(extended_dict)
#Ensure that flavor exist
where_={'uuid': flavor_id}
if tenant_id=='any':
from_ ='flavors'
else:
from_ ='tenants_flavors as ti inner join flavors as i on ti.flavor_id=i.uuid'
where_['tenant_id'] = tenant_id
result, content = my.db.get_table(SELECT=('public',), FROM=from_, WHERE=where_)
if result==0:
text_error="Flavor '%s' not found" % flavor_id
if tenant_id!='any':
text_error +=" for tenant '%s'" % flavor_id
bottle.abort(HTTP_Not_Found, text_error)
return
elif result>0:
if content[0]['public']=='yes' and not my.admin:
#allow only modifications over private flavors
bottle.abort(HTTP_Unauthorized, "Needed admin rights to edit a public flavor")
return
#insert in data base
result, content = my.db.update_rows('flavors', http_content['flavor'], {'uuid': flavor_id})
if result < 0:
print "http_put_flavor_id error %d %s" % (result, content)
bottle.abort(-result, content)
return
else:
return http_get_flavor_id(tenant_id, flavor_id)
#
# IMAGES
#
@bottle.route(url_base + '/<tenant_id>/images', method='GET')
def http_get_images(tenant_id):
my = config_dic['http_threads'][ threading.current_thread().name ]
#check valid tenant_id
result,content = check_valid_tenant(my, tenant_id)
if result != 0:
bottle.abort(result, content)
#obtain data
select_,where_,limit_ = filter_query_string(bottle.request.query, http2db_image,
('id','name','description','path','public') )
if tenant_id=='any':
from_ ='images'
else:
from_ ='tenants_images inner join images on tenants_images.image_id=images.uuid'
where_['tenant_id'] = tenant_id
result, content = my.db.get_table(SELECT=select_, FROM=from_, WHERE=where_, LIMIT=limit_)
if result < 0:
print "http_get_images Error", content
bottle.abort(-result, content)
else:
change_keys_http2db(content, http2db_image, reverse=True)
#for row in content: row['links']=[ {'href': "/".join( (my.url_preffix, tenant_id, 'images', str(row['id']) ) ), 'rel':'bookmark' } ]
data={'images' : content}
return format_out(data)
@bottle.route(url_base + '/<tenant_id>/images/<image_id>', method='GET')
def http_get_image_id(tenant_id, image_id):
my = config_dic['http_threads'][ threading.current_thread().name ]
#check valid tenant_id
result,content = check_valid_tenant(my, tenant_id)
if result != 0:
bottle.abort(result, content)
#obtain data
select_,where_,limit_ = filter_query_string(bottle.request.query, http2db_image,
('id','name','description','progress', 'status','path', 'created', 'updated','public') )
if tenant_id=='any':
from_ ='images'
else:
from_ ='tenants_images as ti inner join images as i on ti.image_id=i.uuid'
where_['tenant_id'] = tenant_id
where_['uuid'] = image_id
result, content = my.db.get_table(SELECT=select_, FROM=from_, WHERE=where_, LIMIT=limit_)
if result < 0:
print "http_get_images error %d %s" % (result, content)
bottle.abort(-result, content)
elif result==0:
print "http_get_images image '%s' not found" % str(image_id)
bottle.abort(HTTP_Not_Found, 'image %s not found' % image_id)
else:
convert_datetime2str(content)
change_keys_http2db(content, http2db_image, reverse=True)
if 'metadata' in content[0] and content[0]['metadata'] is not None:
metadata = json.loads(content[0]['metadata'])
content[0]['metadata']=metadata
content[0]['links']=[ {'href': "/".join( (my.url_preffix, tenant_id, 'images', str(content[0]['id']) ) ), 'rel':'bookmark' } ]
data={'image' : content[0]}
#data['tenants_links'] = dict([('tenant', row['id']) for row in content])
return format_out(data)
@bottle.route(url_base + '/<tenant_id>/images', method='POST')
def http_post_images(tenant_id):
'''insert a image into the database, and attach to tenant.'''
my = config_dic['http_threads'][ threading.current_thread().name ]
#check valid tenant_id
result,content = check_valid_tenant(my, tenant_id)
if result != 0:
bottle.abort(result, content)
http_content = format_in(image_new_schema)
r = remove_extra_items(http_content, image_new_schema)
if r is not None: print "http_post_images: Warning: remove extra items ", r
change_keys_http2db(http_content['image'], http2db_image)
metadata_dict = http_content['image'].pop('metadata', None)
if metadata_dict is not None:
http_content['image']['metadata'] = json.dumps(metadata_dict)
#insert in data base
result, content = my.db.new_image(http_content['image'], tenant_id)
if result >= 0:
return http_get_image_id(tenant_id, content)
else:
print "http_post_images error %d %s" % (result, content)
bottle.abort(-result, content)
return
@bottle.route(url_base + '/<tenant_id>/images/<image_id>', method='DELETE')
def http_delete_image_id(tenant_id, image_id):
'''Deletes the image_id of a tenant. IT removes from tenants_images table.'''
my = config_dic['http_threads'][ threading.current_thread().name ]
#check valid tenant_id
result,content = check_valid_tenant(my, tenant_id)
if result != 0:
bottle.abort(result, content)
result, content = my.db.delete_image_flavor('image', image_id, tenant_id)
if result == 0:
bottle.abort(HTTP_Not_Found, content)
elif result >0:
data={'result' : content}
return format_out(data)
else:
print "http_delete_image_id error",result, content
bottle.abort(-result, content)
return
@bottle.route(url_base + '/<tenant_id>/images/<image_id>/<action>', method='POST')
def http_attach_detach_images(tenant_id, image_id, action):
'''attach/detach an existing image in this tenant. That is insert/remove at tenants_images table.'''
#TODO alf: not tested at all!!!
my = config_dic['http_threads'][ threading.current_thread().name ]
#check valid tenant_id
result,content = check_valid_tenant(my, tenant_id)
if result != 0:
bottle.abort(result, content)
if tenant_id=='any':
bottle.abort(HTTP_Bad_Request, "Invalid tenant 'any' with this command")
#check valid action
if action!='attach' and action != 'detach':
bottle.abort(HTTP_Method_Not_Allowed, "actions can be attach or detach")
return
#Ensure that image exist
from_ ='tenants_images as ti right join images as i on ti.image_id=i.uuid'
where_={'uuid': image_id}
result, content = my.db.get_table(SELECT=('public','tenant_id'), FROM=from_, WHERE=where_)
if result==0:
if action=='attach':
text_error="Image '%s' not found" % image_id
else:
text_error="Image '%s' not found for tenant '%s'" % (image_id, tenant_id)
bottle.abort(HTTP_Not_Found, text_error)
return
elif result>0:
image=content[0]
if action=='attach':
if image['tenant_id']!=None:
bottle.abort(HTTP_Conflict, "Image '%s' already attached to tenant '%s'" % (image_id, tenant_id))
if image['public']=='no' and not my.admin:
#allow only attaching public images
bottle.abort(HTTP_Unauthorized, "Needed admin rights to attach a private image")
return
#insert in data base
result, content = my.db.new_row('tenants_images', {'image_id':image_id, 'tenant_id': tenant_id})
if result >= 0:
return http_get_image_id(tenant_id, image_id)
else: #detach
if image['tenant_id']==None:
bottle.abort(HTTP_Not_Found, "Image '%s' not attached to tenant '%s'" % (image_id, tenant_id))
result, content = my.db.delete_row_by_dict(FROM='tenants_images', WHERE={'image_id':image_id, 'tenant_id':tenant_id})
if result>=0:
if image['public']=='no':
#try to delete the image completely to avoid orphan images, IGNORE error
my.db.delete_row_by_dict(FROM='images', WHERE={'uuid':image_id})
data={'result' : "image detached"}
return format_out(data)
#if get here is because an error
print "http_attach_detach_images error %d %s" % (result, content)
bottle.abort(-result, content)
return
@bottle.route(url_base + '/<tenant_id>/images/<image_id>', method='PUT')
def http_put_image_id(tenant_id, image_id):
'''update a image_id into the database.'''
my = config_dic['http_threads'][ threading.current_thread().name ]
#check valid tenant_id
result,content = check_valid_tenant(my, tenant_id)
if result != 0:
bottle.abort(result, content)
#parse input data
http_content = format_in( image_update_schema )
r = remove_extra_items(http_content, image_update_schema)
if r is not None: print "http_put_image_id: Warning: remove extra items ", r
change_keys_http2db(http_content['image'], http2db_image)
metadata_dict = http_content['image'].pop('metadata', None)
if metadata_dict is not None:
http_content['image']['metadata'] = json.dumps(metadata_dict)
#Ensure that image exist
where_={'uuid': image_id}
if tenant_id=='any':
from_ ='images'
else:
from_ ='tenants_images as ti inner join images as i on ti.image_id=i.uuid'
where_['tenant_id'] = tenant_id
result, content = my.db.get_table(SELECT=('public',), FROM=from_, WHERE=where_)
if result==0:
text_error="Image '%s' not found" % image_id
if tenant_id!='any':
text_error +=" for tenant '%s'" % image_id
bottle.abort(HTTP_Not_Found, text_error)
return
elif result>0:
if content[0]['public']=='yes' and not my.admin:
#allow only modifications over private images
bottle.abort(HTTP_Unauthorized, "Needed admin rights to edit a public image")
return
#insert in data base
result, content = my.db.update_rows('images', http_content['image'], {'uuid': image_id})
if result < 0:
print "http_put_image_id error %d %s" % (result, content)
bottle.abort(-result, content)
return
else:
return http_get_image_id(tenant_id, image_id)
#
# SERVERS
#
@bottle.route(url_base + '/<tenant_id>/servers', method='GET')
def http_get_servers(tenant_id):
my = config_dic['http_threads'][ threading.current_thread().name ]
result,content = check_valid_tenant(my, tenant_id)
if result != 0:
bottle.abort(result, content)
return
#obtain data
select_,where_,limit_ = filter_query_string(bottle.request.query, http2db_server,
('id','name','description','hostId','imageRef','flavorRef','status', 'tenant_id') )
if tenant_id!='any':
where_['tenant_id'] = tenant_id
result, content = my.db.get_table(SELECT=select_, FROM='instances', WHERE=where_, LIMIT=limit_)
if result < 0:
print "http_get_servers Error", content
bottle.abort(-result, content)
else:
change_keys_http2db(content, http2db_server, reverse=True)
for row in content:
tenant_id = row.pop('tenant_id')
row['links']=[ {'href': "/".join( (my.url_preffix, tenant_id, 'servers', str(row['id']) ) ), 'rel':'bookmark' } ]
data={'servers' : content}
return format_out(data)
@bottle.route(url_base + '/<tenant_id>/servers/<server_id>', method='GET')
def http_get_server_id(tenant_id, server_id):
my = config_dic['http_threads'][ threading.current_thread().name ]
#check valid tenant_id
result,content = check_valid_tenant(my, tenant_id)
if result != 0:
bottle.abort(result, content)
return
#obtain data
result, content = my.db.get_instance(server_id)
if result == 0:
bottle.abort(HTTP_Not_Found, content)
elif result >0:
#change image/flavor-id to id and link
convert_bandwidth(content, reverse=True)
convert_datetime2str(content)
if content["ram"]==0 : del content["ram"]
if content["vcpus"]==0 : del content["vcpus"]
if 'flavor_id' in content:
if content['flavor_id'] is not None:
content['flavor'] = {'id':content['flavor_id'],
'links':[{'href': "/".join( (my.url_preffix, content['tenant_id'], 'flavors', str(content['flavor_id']) ) ), 'rel':'bookmark'}]
}
del content['flavor_id']
if 'image_id' in content:
if content['image_id'] is not None:
content['image'] = {'id':content['image_id'],
'links':[{'href': "/".join( (my.url_preffix, content['tenant_id'], 'images', str(content['image_id']) ) ), 'rel':'bookmark'}]
}
del content['image_id']
change_keys_http2db(content, http2db_server, reverse=True)
if 'extended' in content:
if 'devices' in content['extended']: change_keys_http2db(content['extended']['devices'], http2db_server, reverse=True)
data={'server' : content}
return format_out(data)
else:
bottle.abort(-result, content)
return
@bottle.route(url_base + '/<tenant_id>/servers', method='POST')
def http_post_server_id(tenant_id):
'''deploys a new server'''
my = config_dic['http_threads'][ threading.current_thread().name ]
#check valid tenant_id
result,content = check_valid_tenant(my, tenant_id)
if result != 0:
bottle.abort(result, content)
return
if tenant_id=='any':
bottle.abort(HTTP_Bad_Request, "Invalid tenant 'any' with this command")
#chek input
http_content = format_in( server_new_schema )
r = remove_extra_items(http_content, server_new_schema)
if r is not None: print "http_post_serves: Warning: remove extra items ", r
change_keys_http2db(http_content['server'], http2db_server)
extended_dict = http_content['server'].get('extended', None)
if extended_dict is not None:
result, content = check_extended(extended_dict, True)
if result<0:
print "http_post_servers wrong input extended error %d %s" % (result, content)
bottle.abort(-result, content)
return
convert_bandwidth(extended_dict)
if 'devices' in extended_dict: change_keys_http2db(extended_dict['devices'], http2db_server)
server = http_content['server']
server_start = server.get('start', 'yes')
server['tenant_id'] = tenant_id
#check flavor valid and take info
result, content = my.db.get_table(FROM='tenants_flavors as tf join flavors as f on tf.flavor_id=f.uuid',
SELECT=('ram','vcpus','extended'), WHERE={'uuid':server['flavor_id'], 'tenant_id':tenant_id})
if result<=0:
bottle.abort(HTTP_Not_Found, 'flavor_id %s not found' % server['flavor_id'])
return
server['flavor']=content[0]
#check image valid and take info
result, content = my.db.get_table(FROM='tenants_images as ti join images as i on ti.image_id=i.uuid',
SELECT=('path','metadata'), WHERE={'uuid':server['image_id'], 'tenant_id':tenant_id, "status":"ACTIVE"})
if result<=0:
bottle.abort(HTTP_Not_Found, 'image_id %s not found or not ACTIVE' % server['image_id'])
return
server['image']=content[0]
if "hosts_id" in server:
result, content = my.db.get_table(FROM='hosts', SELECT=('uuid',), WHERE={'uuid': server['host_id']})
if result<=0:
bottle.abort(HTTP_Not_Found, 'hostId %s not found' % server['host_id'])
return
#print json.dumps(server, indent=4)
result, content = ht.create_server(server, config_dic['db'], config_dic['db_lock'], config_dic['mode']=='normal')
if result >= 0:
#Insert instance to database
nets=[]
print
print "inserting at DB"
print
if server_start == 'no':
content['status'] = 'INACTIVE'
ports_to_free=[]
new_instance_result, new_instance = my.db.new_instance(content, nets, ports_to_free)
if new_instance_result < 0:
print "Error http_post_servers() :", new_instance_result, new_instance
bottle.abort(-new_instance_result, new_instance)
return
print
print "inserted at DB"
print
for port in ports_to_free:
r,c = config_dic['host_threads'][ server['host_id'] ].insert_task( 'restore-iface',*port )
if r < 0:
print ' http_post_servers ERROR RESTORE IFACE!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!' + c
#updata nets
for net in nets:
r,c = config_dic['of_thread'].insert_task("update-net", net)
if r < 0:
print ':http_post_servers ERROR UPDATING NETS !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!' + c
#look for dhcp ip address
r2, c2 = my.db.get_table(FROM="ports", SELECT=["mac", "net_id"], WHERE={"instance_id": new_instance})
if r2 >0 and config_dic.get("dhcp_server"):
for iface in c2:
if iface["net_id"] in config_dic["dhcp_nets"]:
#print "dhcp insert add task"
r,c = config_dic['dhcp_thread'].insert_task("add", iface["mac"])
if r < 0:
print ':http_post_servers ERROR UPDATING dhcp_server !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!' + c
#Start server
server['uuid'] = new_instance
#server_start = server.get('start', 'yes')
if server_start != 'no':
server['paused'] = True if server_start == 'paused' else False
server['action'] = {"start":None}
server['status'] = "CREATING"
#Program task
r,c = config_dic['host_threads'][ server['host_id'] ].insert_task( 'instance',server )
if r<0:
my.db.update_rows('instances', {'status':"ERROR"}, {'uuid':server['uuid'], 'last_error':c}, log=True)
return http_get_server_id(tenant_id, new_instance)
else:
bottle.abort(HTTP_Bad_Request, content)
return
def http_server_action(server_id, tenant_id, action):
'''Perform actions over a server as resume, reboot, terminate, ...'''
my = config_dic['http_threads'][ threading.current_thread().name ]
server={"uuid": server_id, "action":action}
where={'uuid': server_id}
if tenant_id!='any':
where['tenant_id']= tenant_id
result, content = my.db.get_table(FROM='instances', WHERE=where)
if result == 0:
bottle.abort(HTTP_Not_Found, "server %s not found" % server_id)
return
if result < 0:
print "http_post_server_action error getting data %d %s" % (result, content)
bottle.abort(HTTP_Internal_Server_Error, content)
return
server.update(content[0])
tenant_id = server["tenant_id"]
#TODO check a right content
new_status = None
if 'terminate' in action:
new_status='DELETING'
elif server['status'] == 'ERROR': #or server['status'] == 'CREATING':
if 'terminate' not in action and 'rebuild' not in action:
bottle.abort(HTTP_Method_Not_Allowed, "Server is in ERROR status, must be rebuit or deleted ")
return
# elif server['status'] == 'INACTIVE':
# if 'start' not in action and 'createImage' not in action:
# bottle.abort(HTTP_Method_Not_Allowed, "The only possible action over an instance in 'INACTIVE' status is 'start'")
# return
# if 'start' in action:
# new_status='CREATING'
# server['paused']='no'
# elif server['status'] == 'PAUSED':
# if 'resume' not in action:
# bottle.abort(HTTP_Method_Not_Allowed, "The only possible action over an instance in 'PAUSED' status is 'resume'")
# return
# elif server['status'] == 'ACTIVE':
# if 'pause' not in action and 'reboot'not in action and 'shutoff'not in action:
# bottle.abort(HTTP_Method_Not_Allowed, "The only possible action over an instance in 'ACTIVE' status is 'pause','reboot' or 'shutoff'")
# return
if 'start' in action or 'createImage' in action or 'rebuild' in action:
#check image valid and take info
image_id = server['image_id']
if 'createImage' in action:
if 'imageRef' in action['createImage']:
image_id = action['createImage']['imageRef']
elif 'disk' in action['createImage']:
result, content = my.db.get_table(FROM='instance_devices',
SELECT=('image_id','dev'), WHERE={'instance_id':server['uuid'],"type":"disk"})
if result<=0:
bottle.abort(HTTP_Not_Found, 'disk not found for server')
return
elif result>1:
disk_id=None
if action['createImage']['imageRef']['disk'] != None:
for disk in content:
if disk['dev'] == action['createImage']['imageRef']['disk']:
disk_id = disk['image_id']
break
if disk_id == None:
bottle.abort(HTTP_Not_Found, 'disk %s not found for server' % action['createImage']['imageRef']['disk'])
return
else:
bottle.abort(HTTP_Not_Found, 'more than one disk found for server' )
return
image_id = disk_id
else: #result==1
image_id = content[0]['image_id']
result, content = my.db.get_table(FROM='tenants_images as ti join images as i on ti.image_id=i.uuid',
SELECT=('path','metadata'), WHERE={'uuid':image_id, 'tenant_id':tenant_id, "status":"ACTIVE"})
if result<=0:
bottle.abort(HTTP_Not_Found, 'image_id %s not found or not ACTIVE' % image_id)
return
if content[0]['metadata'] is not None:
try:
metadata = json.loads(content[0]['metadata'])
except:
return -HTTP_Internal_Server_Error, "Can not decode image metadata"
content[0]['metadata']=metadata
else:
content[0]['metadata'] = {}
server['image']=content[0]
if 'createImage' in action:
action['createImage']['source'] = {'image_id': image_id, 'path': content[0]['path']}
if 'createImage' in action:
#Create an entry in Database for the new image
new_image={'status':'BUILD', 'progress': 0 }
new_image_metadata=content[0]
if 'metadata' in server['image'] and server['image']['metadata'] != None:
new_image_metadata.update(server['image']['metadata'])
new_image_metadata = {"use_incremental":"no"}
if 'metadata' in action['createImage']:
new_image_metadata.update(action['createImage']['metadata'])
new_image['metadata'] = json.dumps(new_image_metadata)
new_image['name'] = action['createImage'].get('name', None)
new_image['description'] = action['createImage'].get('description', None)
new_image['uuid']=my.db.new_uuid()
if 'path' in action['createImage']:
new_image['path'] = action['createImage']['path']
else:
new_image['path']="/provisional/path/" + new_image['uuid']
result, image_uuid = my.db.new_image(new_image, tenant_id)
if result<=0:
bottle.abort(HTTP_Bad_Request, 'Error: ' + image_uuid)
return
server['new_image'] = new_image
#Program task
r,c = config_dic['host_threads'][ server['host_id'] ].insert_task( 'instance',server )
if r<0:
print "Task queue full at host ", server['host_id']
bottle.abort(HTTP_Request_Timeout, c)
if 'createImage' in action and result >= 0:
return http_get_image_id(tenant_id, image_uuid)
#Update DB only for CREATING or DELETING status
data={'result' : 'in process'}
if new_status != None and new_status == 'DELETING':
nets=[]
ports_to_free=[]
#look for dhcp ip address
r2, c2 = my.db.get_table(FROM="ports", SELECT=["mac", "net_id"], WHERE={"instance_id": server_id})
r,c = my.db.delete_instance(server_id, tenant_id, nets, ports_to_free, "requested by http")
for port in ports_to_free:
r1,c1 = config_dic['host_threads'][ server['host_id'] ].insert_task( 'restore-iface',*port )
if r1 < 0:
print ' http_post_server_action error at server deletion ERROR resore-iface !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!' + c1
data={'result' : 'deleting in process, but ifaces cannot be restored!!!!!'}
for net in nets:
r1,c1 = config_dic['of_thread'].insert_task("update-net", net)
if r1 < 0:
print ' http_post_server_action error at server deletion ERROR UPDATING NETS !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!' + c1
data={'result' : 'deleting in process, but openflow rules cannot be deleted!!!!!'}
#look for dhcp ip address
if r2 >0 and config_dic.get("dhcp_server"):
for iface in c2:
if iface["net_id"] in config_dic["dhcp_nets"]:
r,c = config_dic['dhcp_thread'].insert_task("del", iface["mac"])
#print "dhcp insert del task"
if r < 0:
print ':http_post_servers ERROR UPDATING dhcp_server !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!' + c
return format_out(data)
@bottle.route(url_base + '/<tenant_id>/servers/<server_id>', method='DELETE')
def http_delete_server_id(tenant_id, server_id):
'''delete a server'''
my = config_dic['http_threads'][ threading.current_thread().name ]
#check valid tenant_id
result,content = check_valid_tenant(my, tenant_id)
if result != 0:
bottle.abort(result, content)
return
return http_server_action(server_id, tenant_id, {"terminate":None} )
@bottle.route(url_base + '/<tenant_id>/servers/<server_id>/action', method='POST')
def http_post_server_action(tenant_id, server_id):
'''take an action over a server'''
my = config_dic['http_threads'][ threading.current_thread().name ]
#check valid tenant_id
result,content = check_valid_tenant(my, tenant_id)
if result != 0:
bottle.abort(result, content)
return
http_content = format_in( server_action_schema )
#r = remove_extra_items(http_content, server_action_schema)
#if r is not None: print "http_post_server_action: Warning: remove extra items ", r
return http_server_action(server_id, tenant_id, http_content)
#
# NETWORKS
#
@bottle.route(url_base + '/networks', method='GET')
def http_get_networks():
my = config_dic['http_threads'][ threading.current_thread().name ]
#obtain data
select_,where_,limit_ = filter_query_string(bottle.request.query, http2db_network,
('id','name','tenant_id','type',
'shared','provider:vlan','status','last_error','admin_state_up','provider:physical') )
#TODO temporally remove tenant_id
if "tenant_id" in where_:
del where_["tenant_id"]
result, content = my.db.get_table(SELECT=select_, FROM='nets', WHERE=where_, LIMIT=limit_)
if result < 0:
print "http_get_networks error %d %s" % (result, content)
bottle.abort(-result, content)
else:
convert_boolean(content, ('shared', 'admin_state_up', 'enable_dhcp') )
delete_nulls(content)
change_keys_http2db(content, http2db_network, reverse=True)
data={'networks' : content}
return format_out(data)
@bottle.route(url_base + '/networks/<network_id>', method='GET')
def http_get_network_id(network_id):
my = config_dic['http_threads'][ threading.current_thread().name ]
#obtain data
where_ = bottle.request.query
where_['uuid'] = network_id
result, content = my.db.get_table(FROM='nets', WHERE=where_, LIMIT=100)
if result < 0:
print "http_get_networks_id error %d %s" % (result, content)
bottle.abort(-result, content)
elif result==0:
print "http_get_networks_id network '%s' not found" % network_id
bottle.abort(HTTP_Not_Found, 'network %s not found' % network_id)
else:
convert_boolean(content, ('shared', 'admin_state_up', 'enale_dhcp') )
change_keys_http2db(content, http2db_network, reverse=True)
#get ports
result, ports = my.db.get_table(FROM='ports', SELECT=('uuid as port_id',),
WHERE={'net_id': network_id}, LIMIT=100)
if len(ports) > 0:
content[0]['ports'] = ports
delete_nulls(content[0])
data={'network' : content[0]}
return format_out(data)
@bottle.route(url_base + '/networks', method='POST')
def http_post_networks():
'''insert a network into the database.'''
my = config_dic['http_threads'][ threading.current_thread().name ]
#parse input data
http_content = format_in( network_new_schema )
r = remove_extra_items(http_content, network_new_schema)
if r is not None: print "http_post_networks: Warning: remove extra items ", r
change_keys_http2db(http_content['network'], http2db_network)
network=http_content['network']
#check valid tenant_id
tenant_id= network.get('tenant_id')
if tenant_id!=None:
result, _ = my.db.get_table(FROM='tenants', SELECT=('uuid',), WHERE={'uuid': tenant_id,"enabled":True})
if result<=0:
bottle.abort(HTTP_Not_Found, 'tenant %s not found or not enabled' % tenant_id)
return
bridge_net = None
#check valid params
net_provider = network.get('provider')
net_type = network.get('type')
net_vlan = network.get("vlan")
net_bind_net = network.get("bind_net")
net_bind_type= network.get("bind_type")
name = network["name"]
#check if network name ends with :<vlan_tag> and network exist in order to make and automated bindning
vlan_index =name.rfind(":")
if net_bind_net==None and net_bind_type==None and vlan_index > 1:
try:
vlan_tag = int(name[vlan_index+1:])
if vlan_tag >0 and vlan_tag < 4096:
net_bind_net = name[:vlan_index]
net_bind_type = "vlan:" + name[vlan_index+1:]
except:
pass
if net_bind_net != None:
#look for a valid net
if check_valid_uuid(net_bind_net):
net_bind_key = "uuid"
else:
net_bind_key = "name"
result, content = my.db.get_table(FROM='nets', WHERE={net_bind_key: net_bind_net} )
if result<0:
bottle.abort(HTTP_Internal_Server_Error, 'getting nets from db ' + content)
return
elif result==0:
bottle.abort(HTTP_Bad_Request, "bind_net %s '%s'not found" % (net_bind_key, net_bind_net) )
return
elif result>1:
bottle.abort(HTTP_Bad_Request, "more than one bind_net %s '%s' found, use uuid" % (net_bind_key, net_bind_net) )
return
network["bind_net"] = content[0]["uuid"]
if net_bind_type != None:
if net_bind_type[0:5] != "vlan:":
bottle.abort(HTTP_Bad_Request, "bad format for 'bind_type', must be 'vlan:<tag>'")
return
if int(net_bind_type[5:]) > 4095 or int(net_bind_type[5:])<=0 :
bottle.abort(HTTP_Bad_Request, "bad format for 'bind_type', must be 'vlan:<tag>' with a tag between 1 and 4095")
return
network["bind_type"] = net_bind_type
if net_provider!=None:
if net_provider[:9]=="openflow:":
if net_type!=None:
if net_type!="ptp" and net_type!="data":
bottle.abort(HTTP_Bad_Request, "Only 'ptp' or 'data' net types can be bound to 'openflow'")
else:
net_type='data'
else:
if net_type!=None:
if net_type!="bridge_man" and net_type!="bridge_data":
bottle.abort(HTTP_Bad_Request, "Only 'bridge_man' or 'bridge_data' net types can be bound to 'bridge', 'macvtap' or 'default")
else:
net_type='bridge_man'
if net_type==None:
net_type='bridge_man'
if net_provider != None:
if net_provider[:7]=='bridge:':
#check it is one of the pre-provisioned bridges
bridge_net_name = net_provider[7:]
for brnet in config_dic['bridge_nets']:
if brnet[0]==bridge_net_name: # free
if brnet[3] != None:
bottle.abort(HTTP_Conflict, "invalid 'provider:physical', bridge '%s' is already used" % bridge_net_name)
return
bridge_net=brnet
net_vlan = brnet[1]
break
# if bridge_net==None:
# bottle.abort(HTTP_Bad_Request, "invalid 'provider:physical', bridge '%s' is not one of the provisioned 'bridge_ifaces' in the configuration file" % bridge_net_name)
# return
elif net_type=='bridge_data' or net_type=='bridge_man':
#look for a free precreated nets
for brnet in config_dic['bridge_nets']:
if brnet[3]==None: # free
if bridge_net != None:
if net_type=='bridge_man': #look for the smaller speed
if brnet[2] < bridge_net[2]: bridge_net = brnet
else: #look for the larger speed
if brnet[2] > bridge_net[2]: bridge_net = brnet
else:
bridge_net = brnet
net_vlan = brnet[1]
if bridge_net==None:
bottle.abort(HTTP_Bad_Request, "Max limits of bridge networks reached. Future versions of VIM will overcome this limit")
return
else:
print "using net", bridge_net
net_provider = "bridge:"+bridge_net[0]
net_vlan = bridge_net[1]
if net_vlan==None and (net_type=="data" or net_type=="ptp"):
net_vlan = my.db.get_free_net_vlan()
if net_vlan < 0:
bottle.abort(HTTP_Internal_Server_Error, "Error getting an available vlan")
return
network['provider'] = net_provider
network['type'] = net_type
network['vlan'] = net_vlan
result, content = my.db.new_row('nets', network, True, True)
if result >= 0:
if bridge_net!=None:
bridge_net[3] = content
if config_dic.get("dhcp_server"):
if network["name"] in config_dic["dhcp_server"].get("nets", () ):
config_dic["dhcp_nets"].append(content)
print "dhcp_server: add new net", content
elif bridge_net != None and bridge_net[0] in config_dic["dhcp_server"].get("bridge_ifaces", () ):
config_dic["dhcp_nets"].append(content)
print "dhcp_server: add new net", content
return http_get_network_id(content)
else:
print "http_post_networks error %d %s" % (result, content)
bottle.abort(-result, content)
return
@bottle.route(url_base + '/networks/<network_id>', method='PUT')
def http_put_network_id(network_id):
'''update a network_id into the database.'''
my = config_dic['http_threads'][ threading.current_thread().name ]
#parse input data
http_content = format_in( network_update_schema )
r = remove_extra_items(http_content, network_update_schema)
change_keys_http2db(http_content['network'], http2db_network)
network=http_content['network']
#Look for the previous data
where_ = {'uuid': network_id}
result, network_old = my.db.get_table(FROM='nets', WHERE=where_)
if result < 0:
print "http_put_network_id error %d %s" % (result, network_old)
bottle.abort(-result, network_old)
return
elif result==0:
print "http_put_network_id network '%s' not found" % network_id
bottle.abort(HTTP_Not_Found, 'network %s not found' % network_id)
return
#get ports
nbports, content = my.db.get_table(FROM='ports', SELECT=('uuid as port_id',),
WHERE={'net_id': network_id}, LIMIT=100)
if result < 0:
print "http_put_network_id error %d %s" % (result, network_old)
bottle.abort(-result, content)
return
if nbports>0:
if 'type' in network and network['type'] != network_old[0]['type']:
bottle.abort(HTTP_Method_Not_Allowed, "Can not change type of network while having ports attached")
if 'vlan' in network and network['vlan'] != network_old[0]['vlan']:
bottle.abort(HTTP_Method_Not_Allowed, "Can not change vlan of network while having ports attached")
#check valid params
net_provider = network.get('provider', network_old[0]['provider'])
net_type = network.get('type', network_old[0]['type'])
net_bind_net = network.get("bind_net")
net_bind_type= network.get("bind_type")
if net_bind_net != None:
#look for a valid net
if check_valid_uuid(net_bind_net):
net_bind_key = "uuid"
else:
net_bind_key = "name"
result, content = my.db.get_table(FROM='nets', WHERE={net_bind_key: net_bind_net} )
if result<0:
bottle.abort(HTTP_Internal_Server_Error, 'getting nets from db ' + content)
return
elif result==0:
bottle.abort(HTTP_Bad_Request, "bind_net %s '%s'not found" % (net_bind_key, net_bind_net) )
return
elif result>1:
bottle.abort(HTTP_Bad_Request, "more than one bind_net %s '%s' found, use uuid" % (net_bind_key, net_bind_net) )
return
network["bind_net"] = content[0]["uuid"]
if net_bind_type != None:
if net_bind_type[0:5] != "vlan:":
bottle.abort(HTTP_Bad_Request, "bad format for 'bind_type', must be 'vlan:<tag>'")
return
if int(net_bind_type[5:]) > 4095 or int(net_bind_type[5:])<=0 :
bottle.abort(HTTP_Bad_Request, "bad format for 'bind_type', must be 'vlan:<tag>' with a tag between 1 and 4095")
return
if net_provider!=None:
if net_provider[:9]=="openflow:":
if net_type!="ptp" and net_type!="data":
bottle.abort(HTTP_Bad_Request, "Only 'ptp' or 'data' net types can be bound to 'openflow'")
else:
if net_type!="bridge_man" and net_type!="bridge_data":
bottle.abort(HTTP_Bad_Request, "Only 'bridge_man' or 'bridge_data' net types can be bound to 'bridge', 'macvtap' or 'default")
#insert in data base
result, content = my.db.update_rows('nets', network, WHERE={'uuid': network_id}, log=True )
if result >= 0:
if result>0: # and nbports>0 and 'admin_state_up' in network and network['admin_state_up'] != network_old[0]['admin_state_up']:
r,c = config_dic['of_thread'].insert_task("update-net", network_id)
if r < 0:
print "http_put_network_id error while launching openflow rules"
bottle.abort(HTTP_Internal_Server_Error, c)
if config_dic.get("dhcp_server"):
if network_id in config_dic["dhcp_nets"]:
config_dic["dhcp_nets"].remove(network_id)
print "dhcp_server: delete net", network_id
if network.get("name", network_old["name"]) in config_dic["dhcp_server"].get("nets", () ):
config_dic["dhcp_nets"].append(network_id)
print "dhcp_server: add new net", network_id
else:
net_bind = network.get("bind", network_old["bind"] )
if net_bind and net_bind[:7]=="bridge:" and net_bind[7:] in config_dic["dhcp_server"].get("bridge_ifaces", () ):
config_dic["dhcp_nets"].append(network_id)
print "dhcp_server: add new net", network_id
return http_get_network_id(network_id)
else:
bottle.abort(-result, content)
return
@bottle.route(url_base + '/networks/<network_id>', method='DELETE')
def http_delete_network_id(network_id):
'''delete a network_id from the database.'''
my = config_dic['http_threads'][ threading.current_thread().name ]
#delete from the data base
result, content = my.db.delete_row('nets', network_id )
if result == 0:
bottle.abort(HTTP_Not_Found, content)
elif result >0:
for brnet in config_dic['bridge_nets']:
if brnet[3]==network_id:
brnet[3]=None
break
if config_dic.get("dhcp_server") and network_id in config_dic["dhcp_nets"]:
config_dic["dhcp_nets"].remove(network_id)
print "dhcp_server: delete net", network_id
data={'result' : content}
return format_out(data)
else:
print "http_delete_network_id error",result, content
bottle.abort(-result, content)
return
#
# OPENFLOW
#
@bottle.route(url_base + '/networks/<network_id>/openflow', method='GET')
def http_get_openflow_id(network_id):
'''To obtain the list of openflow rules of a network
'''
my = config_dic['http_threads'][ threading.current_thread().name ]
#ignore input data
if network_id=='all':
where_={}
else:
where_={"net_id": network_id}
result, content = my.db.get_table(SELECT=("name","net_id","priority","vlan_id","ingress_port","src_mac","dst_mac","actions"),
WHERE=where_, FROM='of_flows')
if result < 0:
bottle.abort(-result, content)
return
data={'openflow-rules' : content}
return format_out(data)
@bottle.route(url_base + '/networks/<network_id>/openflow', method='PUT')
def http_put_openflow_id(network_id):
'''To make actions over the net. The action is to reinstall the openflow rules
network_id can be 'all'
'''
my = config_dic['http_threads'][ threading.current_thread().name ]
if not my.admin:
bottle.abort(HTTP_Unauthorized, "Needed admin privileges")
return
#ignore input data
if network_id=='all':
where_={}
else:
where_={"uuid": network_id}
result, content = my.db.get_table(SELECT=("uuid","type"), WHERE=where_, FROM='nets')
if result < 0:
bottle.abort(-result, content)
return
for net in content:
if net["type"]!="ptp" and net["type"]!="data":
result-=1
continue
r,c = config_dic['of_thread'].insert_task("update-net", net['uuid'])
if r < 0:
print "http_put_openflow_id error while launching openflow rules"
bottle.abort(HTTP_Internal_Server_Error, c)
data={'result' : str(result)+" nets updates"}
return format_out(data)
@bottle.route(url_base + '/networks/openflow/clear', method='DELETE')
@bottle.route(url_base + '/networks/clear/openflow', method='DELETE')
def http_clear_openflow_rules():
'''To make actions over the net. The action is to delete ALL openflow rules
'''
my = config_dic['http_threads'][ threading.current_thread().name ]
if not my.admin:
bottle.abort(HTTP_Unauthorized, "Needed admin privileges")
return
#ignore input data
r,c = config_dic['of_thread'].insert_task("clear-all")
if r < 0:
print "http_delete_openflow_id error while launching openflow rules"
bottle.abort(HTTP_Internal_Server_Error, c)
return
data={'result' : " Clearing openflow rules in process"}
return format_out(data)
@bottle.route(url_base + '/networks/openflow/ports', method='GET')
def http_get_openflow_ports():
'''Obtain switch ports names of openflow controller
'''
data={'ports' : config_dic['of_thread'].OF_connector.pp2ofi}
return format_out(data)
#
# PORTS
#
@bottle.route(url_base + '/ports', method='GET')
def http_get_ports():
#obtain data
my = config_dic['http_threads'][ threading.current_thread().name ]
select_,where_,limit_ = filter_query_string(bottle.request.query, http2db_port,
('id','name','tenant_id','network_id','vpci','mac_address','device_owner','device_id',
'binding:switch_port','binding:vlan','bandwidth','status','admin_state_up','ip_address') )
#result, content = my.db.get_ports(where_)
result, content = my.db.get_table(SELECT=select_, WHERE=where_, FROM='ports',LIMIT=limit_)
if result < 0:
print "http_get_ports Error", result, content
bottle.abort(-result, content)
return
else:
convert_boolean(content, ('admin_state_up',) )
delete_nulls(content)
change_keys_http2db(content, http2db_port, reverse=True)
data={'ports' : content}
return format_out(data)
@bottle.route(url_base + '/ports/<port_id>', method='GET')
def http_get_port_id(port_id):
my = config_dic['http_threads'][ threading.current_thread().name ]
#obtain data
result, content = my.db.get_table(WHERE={'uuid': port_id}, FROM='ports')
if result < 0:
print "http_get_ports error", result, content
bottle.abort(-result, content)
elif result==0:
print "http_get_ports port '%s' not found" % str(port_id)
bottle.abort(HTTP_Not_Found, 'port %s not found' % port_id)
else:
convert_boolean(content, ('admin_state_up',) )
delete_nulls(content)
change_keys_http2db(content, http2db_port, reverse=True)
data={'port' : content[0]}
return format_out(data)
@bottle.route(url_base + '/ports', method='POST')
def http_post_ports():
'''insert an external port into the database.'''
my = config_dic['http_threads'][ threading.current_thread().name ]
if not my.admin:
bottle.abort(HTTP_Unauthorized, "Needed admin privileges")
#parse input data
http_content = format_in( port_new_schema )
r = remove_extra_items(http_content, port_new_schema)
if r is not None: print "http_post_ports: Warning: remove extra items ", r
change_keys_http2db(http_content['port'], http2db_port)
port=http_content['port']
port['type'] = 'external'
if 'net_id' in port and port['net_id'] == None:
del port['net_id']
if 'net_id' in port:
#check that new net has the correct type
result, new_net = my.db.check_target_net(port['net_id'], None, 'external' )
if result < 0:
bottle.abort(HTTP_Bad_Request, new_net)
return
#insert in data base
result, uuid = my.db.new_row('ports', port, True, True)
if result > 0:
if 'net_id' in port:
r,c = config_dic['of_thread'].insert_task("update-net", port['net_id'])
if r < 0:
print "http_post_ports error while launching openflow rules"
bottle.abort(HTTP_Internal_Server_Error, c)
return http_get_port_id(uuid)
else:
bottle.abort(-result, uuid)
return
@bottle.route(url_base + '/ports/<port_id>', method='PUT')
def http_put_port_id(port_id):
'''update a port_id into the database.'''
my = config_dic['http_threads'][ threading.current_thread().name ]
#parse input data
http_content = format_in( port_update_schema )
change_keys_http2db(http_content['port'], http2db_port)
port_dict=http_content['port']
#Look for the previous port data
where_ = {'uuid': port_id}
result, content = my.db.get_table(FROM="ports",WHERE=where_)
if result < 0:
print "http_put_port_id error", result, content
bottle.abort(-result, content)
return
elif result==0:
print "http_put_port_id port '%s' not found" % port_id
bottle.abort(HTTP_Not_Found, 'port %s not found' % port_id)
return
print port_dict
for k in ('vlan','switch_port','mac_address', 'tenant_id'):
if k in port_dict and not my.admin:
bottle.abort(HTTP_Unauthorized, "Needed admin privileges for changing " + k)
return
port=content[0]
#change_keys_http2db(port, http2db_port, reverse=True)
nets = []
host_id = None
result=1
if 'net_id' in port_dict:
#change of net.
old_net = port.get('net_id', None)
new_net = port_dict['net_id']
if old_net != new_net:
if new_net is not None: nets.append(new_net) #put first the new net, so that new openflow rules are created before removing the old ones
if old_net is not None: nets.append(old_net)
if port['type'] == 'instance:bridge':
bottle.abort(HTTP_Forbidden, "bridge interfaces cannot be attached to a different net")
return
elif port['type'] == 'external':
if not my.admin:
bottle.abort(HTTP_Unauthorized, "Needed admin privileges")
return
else:
if new_net != None:
#check that new net has the correct type
result, new_net_dict = my.db.check_target_net(new_net, None, port['type'] )
#change VLAN for SR-IOV ports
if result>=0 and port["type"]=="instance:data" and port["model"]=="VF": #TODO consider also VFnotShared
if new_net == None:
port_dict["vlan"] = None
else:
port_dict["vlan"] = new_net_dict["vlan"]
#get host where this VM is allocated
result, content = my.db.get_table(FROM="instances",WHERE={"uuid":port["instance_id"]})
if result<0:
print "http_put_port_id database error", content
elif result>0:
host_id = content[0]["host_id"]
#insert in data base
if result >= 0:
result, content = my.db.update_rows('ports', port_dict, WHERE={'uuid': port_id}, log=False )
#Insert task to complete actions
if result > 0:
for net_id in nets:
r,v = config_dic['of_thread'].insert_task("update-net", net_id)
if r<0: print "Error ********* http_put_port_id update_of_flows: ", v
#TODO Do something if fails
if host_id != None:
config_dic['host_threads'][host_id].insert_task("edit-iface", port_id, old_net, new_net)
if result >= 0:
return http_get_port_id(port_id)
else:
bottle.abort(HTTP_Bad_Request, content)
return
@bottle.route(url_base + '/ports/<port_id>', method='DELETE')
def http_delete_port_id(port_id):
'''delete a port_id from the database.'''
my = config_dic['http_threads'][ threading.current_thread().name ]
if not my.admin:
bottle.abort(HTTP_Unauthorized, "Needed admin privileges")
return
#Look for the previous port data
where_ = {'uuid': port_id, "type": "external"}
result, ports = my.db.get_table(WHERE=where_, FROM='ports',LIMIT=100)
if result<=0:
print "http_delete_port_id port '%s' not found" % port_id
bottle.abort(HTTP_Not_Found, 'port %s not found or device_owner is not external' % port_id)
return
#delete from the data base
result, content = my.db.delete_row('ports', port_id )
if result == 0:
bottle.abort(HTTP_Not_Found, content)
elif result >0:
network = ports[0].get('net_id', None)
if network is not None:
#change of net.
r,c = config_dic['of_thread'].insert_task("update-net", network)
if r<0: print "!!!!!! http_delete_port_id update_of_flows error", r, c
data={'result' : content}
return format_out(data)
else:
print "http_delete_port_id error",result, content
bottle.abort(-result, content)
return
| nfvlabs/openmano | openvim/httpserver.py | Python | apache-2.0 | 95,390 |
# lint-amnesty, pylint: disable=missing-module-docstring
class Creator(object):
"""
A placeholder class that provides a way to set the attribute on the model.
"""
def __init__(self, field):
self.field = field
def __get__(self, obj, type=None): # lint-amnesty, pylint: disable=redefined-builtin
if obj is None:
return self
return obj.__dict__[self.field.name]
def __set__(self, obj, value):
obj.__dict__[self.field.name] = self.field.to_python(value)
class CreatorMixin(object):
"""
Mixin class to provide SubfieldBase functionality to django fields.
See: https://docs.djangoproject.com/en/1.11/releases/1.8/#subfieldbase
"""
def contribute_to_class(self, cls, name, *args, **kwargs):
super(CreatorMixin, self).contribute_to_class(cls, name, *args, **kwargs) # lint-amnesty, pylint: disable=super-with-arguments
setattr(cls, name, Creator(self))
def from_db_value(self, value, expression, connection): # lint-amnesty, pylint: disable=unused-argument
return self.to_python(value)
| stvstnfrd/edx-platform | openedx/core/djangoapps/util/model_utils.py | Python | agpl-3.0 | 1,102 |
# coding=utf-8
from app import mongo_utils
from bson import json_util
from flask import Blueprint, render_template, request, Response,session
import json
import time
from datetime import datetime
from operator import itemgetter
mod_main = Blueprint('main', __name__)
@mod_main.route('/', methods=['GET'])
def index():
project_enabled = mongo_utils.get_enabled_project()
timestamp = int(time.mktime(datetime.now().timetuple()))
session['user_id'] = timestamp
user_id = session['user_id']
year=2017
for project in json.loads(json_util.dumps(project_enabled)):
year=project['year']
docs = mongo_utils.find_all(project['year'])
count_questions = mongo_utils.get_nr_questions_front(project['year'])
questions = mongo_utils.find_all_questions(project['year'])
date=datetime.utcnow()
mongo_utils.insert_user_session(user_id,year,date)
return render_template('mod_main/index.html', docs=json.loads(json_util.dumps(docs)),questions=json.loads(json_util.dumps(questions)), count_questions=count_questions,user_id=user_id)
@mod_main.route('/results/<int:user_id>', methods=['GET'])
def results(user_id):
project_enabled = mongo_utils.get_enabled_project()
for project in json.loads(json_util.dumps(project_enabled)):
docs = mongo_utils.find_all(project['year'])
results=mongo_utils.find_user_session_answers(project['year'],user_id)
countquestions=0
number_of_questions = len(json.loads(json_util.dumps(results['all_question'])))
candidates = json.loads(json_util.dumps(results['candidate_results']))
users = json.loads(json_util.dumps(results['user_results']))
candidates_array = []
while countquestions <= number_of_questions:
for candidate in candidates:
candidate_match = 0
for user in users:
if 'question_'+str(countquestions) in candidate and 'question_'+str(countquestions) in user:
if 'vazno_' + str(countquestions) in candidate and 'vazno_' + str(countquestions) in user:
candidate_question_value = candidate['question_' + str(countquestions)]
candidate_status_value = candidate['status_' + str(countquestions)]
candidate_vazno_value = candidate['vazno_' + str(countquestions)]
user_question_value = user['question_' + str(countquestions)]
user_status_value = user['status_' + str(countquestions)]
user_vazno_value = user['vazno_' + str(countquestions)]
if candidate_vazno_value == user_vazno_value and candidate_status_value == user_status_value and candidate_question_value == user_question_value:
candidate_match += 1
candidates_array.append({
"candidate_slug": candidate['candidate_slug'],
'question': candidate_question_value,
'status': candidate_status_value,
'vazno': candidate_vazno_value,
"matchcount": candidate_match,
})
countquestions += 1
candidates_percentages = []
for candidate in json.loads(json_util.dumps(results['candidates'])):
percentage = 0
count_match=0
for c_a in candidates_array:
if candidate['generated_id']==c_a['candidate_slug']:
count_match += 1
percentage = (float(count_match)/ number_of_questions) * 100
candidates_percentages.append({
'candidate_name':candidate['candidate_name'],
'percentage':percentage,
'candidate_biography':candidate['candidate_biography'],
'image':candidate['image']
})
sorted_c_array=sorted(candidates_percentages, key=itemgetter('percentage'),reverse=True)
return render_template('mod_main/results.html', docs=json.loads(json_util.dumps(results)),results=json.loads(json_util.dumps(sorted_c_array)),user_id=user_id)
@mod_main.route('/insertuseranswers', methods=['GET', "POST"])
def insert_user_answers():
if request.method == 'POST':
data = request.form.to_dict()
project_enabled = mongo_utils.get_enabled_project()
for project in json.loads(json_util.dumps(project_enabled)):
docs = mongo_utils.find_all(project['year'])
data['project_slug']=project['year']
data['user_id'] =session['user_id']
data['timestamp'] = datetime.utcnow()
result=mongo_utils.insert_users_answers(data)
#return render_template('mod_main/user_candidate_results.html.html', docs=json.loads(json_util.dumps(docs)), questions=json.loads(json_util.dumps(questions)), count_questions=count_questions)
return Response(response=json_util.dumps(result), status=200, mimetype='application/json')
@mod_main.route('/getuseranswerresults', methods=['GET', "POST"])
def get_user_answers_results():
if request.method == 'POST':
data = request.form.to_dict()
user_id = data['user_id']
project_year=""
project_enabled = mongo_utils.get_enabled_project()
for project in json.loads(json_util.dumps(project_enabled)):
docs = mongo_utils.find_all(project['year'])
project_year=project['year']
result = mongo_utils.find_user_session_answers(project_year,user_id)
return Response(response=json_util.dumps(result), status=200, mimetype='application/json')
@mod_main.route('/getusersessionidresults', methods=['GET', "POST"])
def get_user_session_id_results():
if request.method == 'POST':
if session.get('user_id') is not None:
user_id=session['user_id']
else:
user_id=""
data = request.form.to_dict()
project_enabled = mongo_utils.get_enabled_project()
for project in json.loads(json_util.dumps(project_enabled)):
docs = mongo_utils.find_all(project['year'])
data['project_slug']=project['year']
data['user_id'] = user_id
result=mongo_utils.find_user_session_answers(data)
#return render_template('mod_main/user_candidate_results.html.html', docs=json.loads(json_util.dumps(docs)), questions=json.loads(json_util.dumps(questions)), count_questions=count_questions)
return Response(response=json_util.dumps(result), status=200, mimetype='application/json')
@mod_main.route('/getallquestions', methods=['GET', "POST"])
def get_all_questions():
if request.method == 'GET':
array_questions=[]
project_enabled = mongo_utils.get_enabled_project()
for project in json.loads(json_util.dumps(project_enabled)):
groups=mongo_utils.find_all(project['year'])
for group in json.loads(json_util.dumps(groups)):
questions = mongo_utils.find_all_questions_ordered(group['generated_id'])
for question in questions:
array_questions.append(question)
return Response(response=json_util.dumps(array_questions), status=200, mimetype='application/json')
@mod_main.route('/getquestionsresults', methods=['GET', "POST"])
def get_questions_results():
if request.method == 'POST':
user_id=session['user_id']
data = request.form.to_dict()
question= data['question_name']
project_slug=""
project_enabled = mongo_utils.get_enabled_project()
for project in json.loads(json_util.dumps(project_enabled)):
project_slug=project['year']
questions = mongo_utils.find_all_questions(project['year'])
response = mongo_utils.find_all_questions_results(user_id,question,project['year'])
count_question=0
created_array = []
question_key=""
for questions in json.loads(json_util.dumps(response['all_question'])):
count_question = count_question + 1
question_key=count_question
answers = mongo_utils.find_all_answers_s(question_key,question,project_slug)
answers_users = mongo_utils.find_all_answers_users(question_key,question,user_id,project_slug)
for r in json.loads(json_util.dumps(answers)):
candidate_name_result = mongo_utils.get_candidate_name(r['candidate_slug'])
for c_name in json.loads(json_util.dumps(candidate_name_result)):
candidate_name=c_name['candidate_name']
if 'status_'+str(question_key) in r:
status=r['status_'+str(question_key)]
else:
status="/"
if 'vazno_'+str(question_key) in r:
vazno=r['vazno_'+str(question_key)]
else:
vazno="/"
if 'comment_' + str(question_key) in r:
comment = r['comment_' + str(question_key)]
else:
comment = "/"
created_array.append({'candidate_slug':candidate_name,'status':status,'vazno':vazno,'comment':comment})
for r_u in json.loads(json_util.dumps(answers_users)):
if 'status_' + str(question_key) in r_u:
status = r_u['status_' + str(question_key)]
else:
status = "/"
if 'vazno_' + str(question_key) in r_u:
vazno = r_u['vazno_' + str(question_key)]
else:
vazno = "/"
created_array.append({'candidate_slug': 'Vaš odgovor', 'status': status, 'vazno': vazno,'comment': "/"})
return Response(response=json_util.dumps(created_array), status=200, mimetype='application/json')
@mod_main.route('/getquestionsresultsshared', methods=['GET', "POST"])
def get_questions_results_shared():
if request.method == 'POST':
data = request.form.to_dict()
question= data['question_name']
user_id=data['user_id']
project_slug=""
project_enabled = mongo_utils.get_enabled_project()
for project in json.loads(json_util.dumps(project_enabled)):
project_slug=project['year']
questions = mongo_utils.find_all_questions(project['year'])
response = mongo_utils.find_all_questions_results(user_id,question,project['year'])
count_question=0
created_array = []
question_key=""
for questions in json.loads(json_util.dumps(response['all_question'])):
count_question = count_question + 1
question_key=count_question
answers = mongo_utils.find_all_answers_s(question_key,question,project_slug)
answers_users = mongo_utils.find_all_answers_users(question_key,question,user_id,project_slug)
for r in json.loads(json_util.dumps(answers)):
candidate_name_result = mongo_utils.get_candidate_name(r['candidate_slug'])
for c_name in json.loads(json_util.dumps(candidate_name_result)):
candidate_name=c_name['candidate_name']
if 'status_'+str(question_key) in r:
status=r['status_'+str(question_key)]
else:
status="/"
if 'vazno_'+str(question_key) in r:
vazno=r['vazno_'+str(question_key)]
else:
vazno="/"
if 'comment_' + str(question_key) in r:
comment = r['comment_' + str(question_key)]
else:
comment = "/"
created_array.append({'candidate_slug':candidate_name,'status':status,'vazno':vazno,'comment':comment})
for r_u in json.loads(json_util.dumps(answers_users)):
if 'status_' + str(question_key) in r_u:
status = r_u['status_' + str(question_key)]
else:
status = "/"
if 'vazno_' + str(question_key) in r_u:
vazno = r_u['vazno_' + str(question_key)]
else:
vazno = "/"
created_array.append({'candidate_slug': 'Moj odgovor', 'status': status, 'vazno': vazno,'comment': "/"})
return Response(response=json_util.dumps(created_array), status=200, mimetype='application/json')
@mod_main.route('/getallqu', methods=['GET', "POST"])
def get_all_q_a_u():
if request.method == 'GET':
if session.get('user_id') is not None:
user_id = session['user_id']
count_question=0
create_question_array=[]
project_slug=""
project_enabled = mongo_utils.get_enabled_project()
for project in json.loads(json_util.dumps(project_enabled)):
response_all_questions=mongo_utils.find_all_questions(project['year'])
project_slug=project['year']
for raq in json.loads(json_util.dumps(response_all_questions)):
count_question=count_question+1
question_key=count_question
response_user_q = mongo_utils.find_all_questions_user(user_id,project_slug)
for ruq in json.loads(json_util.dumps(response_user_q)):
if 'vazno_'+str(count_question) in ruq and 'status_'+str(count_question) in ruq:
create_question_array.append({'question_name':ruq['question_'+str(count_question)]})
return Response(response=json_util.dumps(create_question_array), status=200, mimetype='application/json')
@mod_main.route('/getallquresults', methods=['GET', "POST"])
def get_all_q_a_u_result():
if request.method == 'POST':
data = request.form.to_dict()
user_id=data['user_id']
count_question=0
create_question_array=[]
project_slug=""
project_enabled = mongo_utils.get_enabled_project()
for project in json.loads(json_util.dumps(project_enabled)):
response_all_questions=mongo_utils.find_all_questions(project['year'])
project_slug=project['year']
for raq in json.loads(json_util.dumps(response_all_questions)):
count_question=count_question+1
question_key=count_question
response_user_q = mongo_utils.find_all_questions_user(user_id,project_slug)
for ruq in json.loads(json_util.dumps(response_user_q)):
if 'vazno_'+str(count_question) in ruq and 'status_'+str(count_question) in ruq:
create_question_array.append({'question_name':ruq['question_'+str(count_question)]})
return Response(response=json_util.dumps(create_question_array), status=200, mimetype='application/json')
@mod_main.route('/getanswersusercandidate', methods=['GET', "POST"])
def get_answers_user_candidate():
if request.method=="POST":
if session.get('user_id') is not None:
user_id = session['user_id']
data = request.form.to_dict()
created_array=[]
count_question = 0
question_key=0;
response_users_questtion = mongo_utils.find_users_question_a(user_id)
response_all_questions = mongo_utils.find_all_questions()
for raq in json.loads(json_util.dumps(response_all_questions)):
count_question = count_question + 1
question_key=count_question
response_canidates_questtion = mongo_utils.find_candidates_question_a(question_key,data['question_name'])
answers_users = mongo_utils.find_all_answers_users(question_key, data['question_name'], user_id)
for r_candidates in json.loads(json_util.dumps(response_canidates_questtion)):
candidate_name_result = mongo_utils.get_candidate_name(r_candidates['candidate_slug'])
for c_name in json.loads(json_util.dumps(candidate_name_result)):
candidate_name = c_name['candidate_name']
if 'status_' + str(question_key) in r_candidates:
status = r_candidates['status_' + str(question_key)]
else:
status = "/"
if 'vazno_' + str(question_key) in r_candidates:
vazno = r_candidates['vazno_' + str(question_key)]
else:
vazno = "/"
if 'comment_' + str(question_key) in r_candidates:
comment = r_candidates['comment_' + str(question_key)]
else:
comment = "/"
created_array.append({'candidate_slug':candidate_name,'vazno':vazno,'status':status,'comment':comment})
for r_users in json.loads(json_util.dumps(answers_users)):
if 'status_' + str(question_key) in r_users:
status = r_users['status_' + str(question_key)]
else:
status = "/"
if 'vazno_' + str(question_key) in r_users:
vazno = r_users['vazno_' + str(question_key)]
else:
vazno = "/"
created_array.append({'candidate_slug':"Vaš odgovor",'vazno':vazno,'status':status,'comment':"/"})
return Response(response=json_util.dumps(created_array), status=200, mimetype='application/json')
| crtarsorg/glasomer.rs-v2 | app/mod_main/views.py | Python | cc0-1.0 | 17,233 |
# Copyright (c) 2007-2009 Citrix Systems Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 only.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import XenAPI
import commands, re, shutil, sys, socket
from pprint import pprint
from XSConsoleAuth import *
from XSConsoleLang import *
from XSConsoleState import *
from XSConsoleUtils import *
class HotOpaqueRef:
def __init__(self, inOpaqueRef, inType):
self.opaqueRef = inOpaqueRef
self.type = inType
self.hash = hash(inOpaqueRef)
def __repr__(self):
return str(self.__dict__)
# __hash__ and __cmp__ allow this object to be used as a dictionary key
def __hash__(self):
return self.hash
def __cmp__(self, inOther):
if not isinstance(inOther, HotOpaqueRef):
return 1
if self.opaqueRef == inOther.opaqueRef:
return 0
if self.opaqueRef < inOther.opaqueRef:
return -1
return 1
def OpaqueRef(self): return self.opaqueRef
def Type(self): return self.type
class HotAccessor:
def __init__(self, inName = None, inRefs = None):
self.name = FirstValue(inName, [])
self.refs = FirstValue(inRefs, [])
def __getattr__(self, inName):
retVal = HotAccessor(self.name[:], self.refs[:]) # [:] copies the array
retVal.name.append(inName)
retVal.refs.append(None)
return retVal
def __iter__(self):
iterData = HotData.Inst().GetData(self.name, {}, self.refs)
if isinstance(iterData, types.DictType):
self.iterKeys = iterData.keys()
elif isinstance(iterData, (types.ListType, types.TupleType)):
self.iterKeys = iterData[:] # [:] copy is necessary
else:
raise Exception(Lang("Cannot iterate over type '")+str(type(iterData))+"'")
return self
# This method will hide fields called 'next' in the xapi database. If any appear, __iter__ will need to
# return a new object type and this method will need to be moved into that
def next(self):
if len(self.iterKeys) <= 0:
raise StopIteration
retVal = HotAccessor(self.name[:], self.refs[:]) # [:] copies the array
retVal.refs[-1] = self.iterKeys.pop(0)
return retVal
def __getitem__(self, inParam):
# These are square brackets selecting a particular item from a dict using its OpaqueRef
if not isinstance(inParam, (types.IntType, HotOpaqueRef)):
raise Exception('Use of HotAccessor[param] requires param of type int or HotOpaqueRef, but got '+str(type(inParam)))
retVal = HotAccessor(self.name[:], self.refs[:])
retVal.refs[-1] = inParam
return retVal
def __call__(self, inParam = None):
# These are the brackets on the end of the statement, with optional default value.
# That makes it a request to fetch the data
if isinstance(inParam, HotOpaqueRef):
raise Exception('Use [] to pass HotOpaqueRefs to HotAccessors')
return HotData.Inst().GetData(self.name, inParam, self.refs)
def HotOpaqueRef(self):
return self.refs[-1]
def __str__(self):
return str(self.__dict__)
def __repr__(self):
return str(self.__dict__)
class HotData:
instance = None
def __init__(self):
self.data = {}
self.timestamps = {}
self.session = None
self.InitialiseFetchers()
@classmethod
def Inst(cls):
if cls.instance is None:
cls.instance = HotData()
return cls.instance
@classmethod
def Reset(cls):
if cls.instance is not None:
del cls.instance
cls.instance = None
def DeleteCache(self):
self.data = {}
self.timestamps = {}
def Fetch(self, inName, inRef):
# Top-level object are cached by name, referenced objects by reference
cacheName = FirstValue(inRef, inName)
cacheEntry = self.data.get(cacheName, None)
fetcher = self.fetchers[inName]
timeNow = time.time()
# If inRef is an array index, the result can't be cached
if not isinstance(inRef, types.IntType) and cacheEntry is not None and timeNow - cacheEntry.timestamp < fetcher.lifetimeSecs:
retVal = cacheEntry.value
else:
try:
retVal = fetcher.fetcher(inRef)
# Save in the cache
self.data[cacheName] = Struct(timestamp = timeNow, value = retVal)
except socket.timeout:
self.session = None
raise socket.timeout
return retVal
def FetchByRef(self, inRef):
retVal = self.Fetch(inRef.Type(), inRef)
return retVal
def FetchByNameOrRef(self, inName, inRef):
if inName in self.fetchers:
retVal = self.Fetch(inName, inRef)
else:
retVal = self.Fetch(inRef.Type(), inRef)
return retVal
def GetData(self, inNames, inDefault, inRefs):
try:
itemRef = self.data # Start at the top level
for i, name in enumerate(inNames):
currentRef = inRefs[i]
if isinstance(currentRef, HotOpaqueRef):
# If currentRef is a HotOpaqueRef, always fetch the corresponding object
itemRef = self.FetchByNameOrRef(name, currentRef)
else:
# Look for a data fetcher matching this item name
if name in self.fetchers:
# We have a fetcher for this element, so use it
# Handle the case where itemRef is a dictionary containing the key/value pair ( current name : HotOpaqueRef )
if isinstance(itemRef, types.DictType) and name in itemRef and isinstance(itemRef[name], HotOpaqueRef):
# This is a subitem with an OpaqueRef supplied by xapi, so fetch the obect it's referring to
itemRef = self.Fetch(name, itemRef[name])
else:
# Fetch without a reference
itemRef = self.Fetch(name, None)
else:
# No fetcher for this item, so return the value of the named element if is in the dictionary,
# or the default if not
# First, promote OpaqueRefs to the object they refer to
if isinstance(itemRef, HotOpaqueRef):
itemRef = self.FetchByRef(itemRef)
# This allows hash navigation using HotAccessor().key1.key2.key3(), etc.
itemRef = itemRef[name] # Allow to throw if element not present
# Handle integer references as list indices
if isinstance(currentRef, types.IntType):
if not isinstance(itemRef, (types.ListType, types.TupleType)):
raise Exception("List index supplied but element '"+'.'.join(inNames)+"' is not a list")
if inRefs[i] >= len(itemRef) or currentRef < -len(itemRef):
raise Exception("List index "+str(currentRef)+" out of range in '"+'.'.join(inNames)+"'")
itemRef = itemRef[currentRef]
return itemRef
except Exception, e:
# Data not present/fetchable, so return the default value
return FirstValue(inDefault, None)
def __getattr__(self, inName):
if inName[0].isupper():
# Don't expect elements to start with upper case, so probably an unknown method name
raise Exception("Unknown method HotData."+inName)
return HotAccessor([inName], [None])
def AddFetcher(self, inKey, inFetcher, inLifetimeSecs):
self.fetchers[inKey] = Struct( fetcher = inFetcher, lifetimeSecs = inLifetimeSecs )
def InitialiseFetchers(self):
self.fetchers = {}
self.AddFetcher('guest_metrics', self.FetchVMGuestMetrics, 5)
self.AddFetcher('guest_vm', self.FetchGuestVM, 5)
self.AddFetcher('guest_vm_derived', self.FetchGuestVMDerived, 5)
self.AddFetcher('host', self.FetchHost, 5)
self.AddFetcher('host_cpu', self.FetchHostCPUs, 5)
self.AddFetcher('local_host', self.FetchLocalHost, 5) # Derived
self.AddFetcher('local_host_ref', self.FetchLocalHostRef, 60) # Derived
self.AddFetcher('local_pool', self.FetchLocalPool, 5) # Derived
self.AddFetcher('metrics', self.FetchMetrics, 5)
self.AddFetcher('pbd', self.FetchPBD, 5)
self.AddFetcher('pool', self.FetchPool, 5)
self.AddFetcher('sr', self.FetchSR, 5)
self.AddFetcher('visible_sr', self.FetchVisibleSR, 5) # Derived
self.AddFetcher('vm', self.FetchVM, 5)
def FetchVMGuestMetrics(self, inOpaqueRef):
retVal = self.Session().xenapi.VM_guest_metrics.get_record(inOpaqueRef.OpaqueRef())
return retVal
def FetchGuestVM(self, inOpaqueRef):
if inOpaqueRef is not None:
# Don't need to filter, so can use the standard VM fetch
retVal = self.FetchVM(inOpaqueRef)
else:
retVal = {}
for key, value in self.vm().iteritems():
if not value.get('is_a_template', False) and not value.get('is_control_domain', False):
retVal[key] = value
return retVal
def FetchHostCPUs(self, inOpaqueRef):
def LocalConverter(inCPU):
return HotData.ConvertOpaqueRefs(inCPU,
host='host'
)
if inOpaqueRef is not None:
cpu = self.Session().xenapi.host_cpu.get_record(inOpaqueRef.OpaqueRef())
retVal = LocalConverter(cpu)
else:
cpus = self.Session().xenapi.host_cpu.get_all_records()
retVal = {}
for key, cpu in cpus.iteritems():
cpu = LocalConverter(cpu)
retVal[HotOpaqueRef(key, 'host_cpu')] = cpu
return retVal
def FetchGuestVMDerived(self, inOpaqueRef):
retVal = {}
halted = 0
paused = 0
running = 0
suspended = 0
for key, vm in self.guest_vm().iteritems():
powerState = vm.get('power_state', '').lower()
if powerState.startswith('halted'):
halted += 1
elif powerState.startswith('paused'):
paused += 1
elif powerState.startswith('running'):
running += 1
elif powerState.startswith('suspended'):
suspended += 1
retVal['num_halted'] = halted
retVal['num_paused'] = paused
retVal['num_running'] = running
retVal['num_suspended'] = suspended
return retVal
def FetchLocalHost(self, inOpaqueRef):
retVal = self.FetchHost(self.FetchLocalHostRef(inOpaqueRef))
return retVal
def FetchLocalHostRef(self, inOpaqueRef):
if inOpaqueRef is not None:
raise Exception("Request for local host must not be passed an OpaqueRef")
thisHost = self.Session().xenapi.session.get_this_host(self.Session()._session)
retVal = HotOpaqueRef(thisHost, 'host')
return retVal
def FetchLocalPool(self, inOpaqueRef):
if inOpaqueRef is not None:
raise Exception("Request for local pool must not be passed an OpaqueRef")
pools = self.Session().xenapi.pool.get_all()
if len(pools) != 1:
raise Exception("Unexpected number of pools "+str(pools))
retVal = self.FetchPool(HotOpaqueRef(pools[0], 'pool'))
return retVal
def FetchHost(self, inOpaqueRef):
def LocalConverter(inHost):
return HotData.ConvertOpaqueRefs(inHost,
crash_dump_sr = 'sr',
consoles = 'console',
current_operations = 'task',
host_CPUs = 'host_cpu',
metrics = 'host::metrics',
PBDs = 'pbd',
PIFs='pif',
resident_VMs = 'vm',
suspend_image_sr = 'sr',
VBDs = 'vbd',
VIFs = 'vif'
)
if inOpaqueRef is not None:
host = self.Session().xenapi.host.get_record(inOpaqueRef.OpaqueRef())
retVal = LocalConverter(host)
else:
hosts = self.Session().xenapi.host.get_all_records()
retVal = {}
for key, host in hosts.iteritems():
host = LocalConverter(host)
retVal[HotOpaqueRef(key, 'host')] = host
return retVal
def FetchMetrics(self, inOpaqueRef):
if inOpaqueRef is None:
raise Exception("Request for VM metrics requires an OpaqueRef")
if inOpaqueRef.Type() == 'vm::metrics':
retVal = self.Session().xenapi.VM_metrics.get_record(inOpaqueRef.OpaqueRef())
elif inOpaqueRef.Type() == 'host::metrics':
retVal = self.Session().xenapi.host_metrics.get_record(inOpaqueRef.OpaqueRef())
else:
raise Exception("Unknown metrics type '"+inOpaqueRef.Type()+"'")
return retVal
def FetchPBD(self, inOpaqueRef):
def LocalConverter(inPBD):
return HotData.ConvertOpaqueRefs(inPBD,
host='host',
SR='sr'
)
if inOpaqueRef is not None:
pbd = self.Session().xenapi.PBD.get_record(inOpaqueRef.OpaqueRef())
retVal = LocalConverter(pbd)
else:
pbds = self.Session().xenapi.PBD.get_all_records()
retVal = {}
for key, pbd in pbds.iteritems():
pbd = LocalConverter(pbd)
retVal[HotOpaqueRef(key, 'pbd')] = pbd
return retVal
def FetchPool(self, inOpaqueRef):
def LocalConverter(inPool):
return HotData.ConvertOpaqueRefs(inPool,
crash_dump_SR='sr',
default_SR='sr',
master='host',
suspend_image_SR='sr'
)
if inOpaqueRef is not None:
pool = self.Session().xenapi.pool.get_record(inOpaqueRef.OpaqueRef())
retVal = LocalConverter(pool)
else:
pools = self.Session().xenapi.pool.get_all_records()
retVal = {}
for key, pool in pools.iteritems():
pool = LocalConverter(pool)
retVal[HotOpaqueRef(key, 'pool')] = pool
return retVal
def FetchSR(self, inOpaqueRef):
def LocalConverter(inSR):
return HotData.ConvertOpaqueRefs(inSR,
current_operations = 'task',
PBDs = 'pbd',
VDIs = 'vdi')
if inOpaqueRef is not None:
sr = self.Session().xenapi.SR.get_record(inOpaqueRef.OpaqueRef())
retVal = LocalConverter(sr)
else:
srs = self.Session().xenapi.SR.get_all_records()
retVal = {}
for key, sr in srs.iteritems():
sr = LocalConverter(sr)
retVal[HotOpaqueRef(key, 'sr')] = sr
return retVal
def FetchVisibleSR(self, inOpaqueRef):
if inOpaqueRef is not None:
# Make sr[ref] and visible_sr[ref] do the same thing, i.e. don't check the the SR is visible
retVal = self.FetchSR(inOpaqueRef)
else:
retVal = {}
for sr in HotAccessor().sr: # Iterates through HotAccessors to SRs
visible = False
if len(sr.PBDs()) == 0:
visible = True # This is a detached SR so list it as visible
else:
for pbd in sr.PBDs(): # Iterates through HotOpaqueRefs to PBDs
if pbd in HotAccessor().local_host.PBDs(): # host.PBDs() is a list of HotOpaqueRefs
visible = True
if visible:
retVal[sr.HotOpaqueRef()] = sr
return retVal
def FetchVM(self, inOpaqueRef):
def LocalConverter(inVM):
return HotData.ConvertOpaqueRefs(inVM,
affinity='host',
consoles='console',
current_operations = 'task',
guest_metrics='guest_metrics',
metrics='vm::metrics',
PIFs='pif',
resident_on='host',
suspend_VDI='vdi',
snapshot_of='snapshot',
VBDs = 'vbd',
VIFs = 'vif')
if inOpaqueRef is not None:
vm = self.Session().xenapi.VM.get_record(inOpaqueRef.OpaqueRef())
retVal = LocalConverter(vm)
else:
vms = self.Session().xenapi.VM.get_all_records()
retVal = {}
for key, vm in vms.iteritems():
vm = LocalConverter(vm)
retVal[HotOpaqueRef(key, 'vm')] = vm
return retVal
@classmethod # classmethod so that other class's fetchers can use it easily
def ConvertOpaqueRefs(cls, *inArgs, **inKeywords):
if len(inArgs) != 1:
raise Exception('ConvertOpaqueRef requires a dictionary object as the first argument')
ioObj = inArgs[0]
for keyword, value in inKeywords.iteritems():
obj = ioObj.get(keyword, None)
if obj is not None:
if isinstance(obj, str):
ioObj[keyword] = HotOpaqueRef(obj, value)
elif isinstance(obj, types.ListType):
ioObj[keyword] = [ HotOpaqueRef(x, value) for x in obj ]
elif isinstance(obj, types.DictType):
result = {}
for key, item in obj.iteritems():
result[ HotOpaqueRef(key, value) ] = item
ioObj[keyword] = result
if Auth.Inst().IsTestMode(): # Tell the caller what they've missed, when in test mode
for key,value in ioObj.iteritems():
if isinstance(value, str) and value.startswith('OpaqueRef'):
print('Missed OpaqueRef string in HotData item: '+key)
elif isinstance(value, types.ListType):
for item in value:
if isinstance(item, str) and item.startswith('OpaqueRef'):
print('Missed OpaqueRef List in HotData item: '+key)
break
elif isinstance(value, types.DictType):
for item in value.keys():
if isinstance(item, str) and item.startswith('OpaqueRef'):
print('Missed OpaqueRef Dict in HotData item: '+key)
break
return ioObj
def Session(self):
if self.session is None:
self.session = Auth.Inst().OpenSession()
return self.session
def Dump(self):
print "Contents of HotData cache:"
pprint(self.data)
| jamesbulpin/xsconsole | XSConsoleHotData.py | Python | gpl-2.0 | 19,975 |
# import std libs
import os
from pkg_resources import resource_filename
import json
# import third party libs
import jinja2
# import local libs
from cycle.meta import __title__ as pkgname
def format_json(data):
return json.dumps(data, indent=2, sort_keys=True)
def load_resource_json(resource_path, pkgname=pkgname):
chunks = resource_path.split('/')
return json.load(open(resource_filename(pkgname, os.path.join(*chunks)), 'r'))
def get_template_renderer(template_string=None):
jinja_tmpl_opts = dict(
block_start_string='<%',
block_end_string='%>',
variable_start_string='%%',
variable_end_string='%%',
comment_start_string='<#',
comment_end_string='#>',
)
tmpl_environ = jinja2.Environment(**jinja_tmpl_opts)
return tmpl_environ.from_string(template_string)
| refnode/python-cycle | src/cycle/utils.py | Python | apache-2.0 | 841 |
from __future__ import unicode_literals
from unittest import TestCase
import requests
import requests_mock
import time
try:
from urlparse import urlparse, parse_qs
except ImportError:
from urllib.parse import urlparse, parse_qs
from oauthlib.oauth2.rfc6749.errors import InvalidGrantError
from requests_oauthlib import OAuth2Session
from requests_oauthlib.compliance_fixes import facebook_compliance_fix
from requests_oauthlib.compliance_fixes import fitbit_compliance_fix
from requests_oauthlib.compliance_fixes import mailchimp_compliance_fix
from requests_oauthlib.compliance_fixes import weibo_compliance_fix
from requests_oauthlib.compliance_fixes import slack_compliance_fix
from requests_oauthlib.compliance_fixes import instagram_compliance_fix
from requests_oauthlib.compliance_fixes import plentymarkets_compliance_fix
from requests_oauthlib.compliance_fixes import ebay_compliance_fix
class FacebookComplianceFixTest(TestCase):
def setUp(self):
mocker = requests_mock.Mocker()
mocker.post(
"https://graph.facebook.com/oauth/access_token",
text="access_token=urlencoded",
headers={"Content-Type": "text/plain"},
)
mocker.start()
self.addCleanup(mocker.stop)
facebook = OAuth2Session("someclientid", redirect_uri="https://i.b")
self.session = facebook_compliance_fix(facebook)
def test_fetch_access_token(self):
token = self.session.fetch_token(
"https://graph.facebook.com/oauth/access_token",
client_secret="someclientsecret",
authorization_response="https://i.b/?code=hello",
)
self.assertEqual(token, {"access_token": "urlencoded", "token_type": "Bearer"})
class FitbitComplianceFixTest(TestCase):
def setUp(self):
self.mocker = requests_mock.Mocker()
self.mocker.post(
"https://api.fitbit.com/oauth2/token",
json={"errors": [{"errorType": "invalid_grant"}]},
)
self.mocker.start()
self.addCleanup(self.mocker.stop)
fitbit = OAuth2Session("someclientid", redirect_uri="https://i.b")
self.session = fitbit_compliance_fix(fitbit)
def test_fetch_access_token(self):
self.assertRaises(
InvalidGrantError,
self.session.fetch_token,
"https://api.fitbit.com/oauth2/token",
client_secret="someclientsecret",
authorization_response="https://i.b/?code=hello",
)
self.mocker.post(
"https://api.fitbit.com/oauth2/token", json={"access_token": "fitbit"}
)
token = self.session.fetch_token(
"https://api.fitbit.com/oauth2/token", client_secret="good"
)
self.assertEqual(token, {"access_token": "fitbit"})
def test_refresh_token(self):
self.assertRaises(
InvalidGrantError,
self.session.refresh_token,
"https://api.fitbit.com/oauth2/token",
auth=requests.auth.HTTPBasicAuth("someclientid", "someclientsecret"),
)
self.mocker.post(
"https://api.fitbit.com/oauth2/token",
json={"access_token": "access", "refresh_token": "refresh"},
)
token = self.session.refresh_token(
"https://api.fitbit.com/oauth2/token",
auth=requests.auth.HTTPBasicAuth("someclientid", "someclientsecret"),
)
self.assertEqual(token["access_token"], "access")
self.assertEqual(token["refresh_token"], "refresh")
class MailChimpComplianceFixTest(TestCase):
def setUp(self):
mocker = requests_mock.Mocker()
mocker.post(
"https://login.mailchimp.com/oauth2/token",
json={"access_token": "mailchimp", "expires_in": 0, "scope": None},
)
mocker.start()
self.addCleanup(mocker.stop)
mailchimp = OAuth2Session("someclientid", redirect_uri="https://i.b")
self.session = mailchimp_compliance_fix(mailchimp)
def test_fetch_access_token(self):
token = self.session.fetch_token(
"https://login.mailchimp.com/oauth2/token",
client_secret="someclientsecret",
authorization_response="https://i.b/?code=hello",
)
# Times should be close
approx_expires_at = time.time() + 3600
actual_expires_at = token.pop("expires_at")
self.assertAlmostEqual(actual_expires_at, approx_expires_at, places=2)
# Other token values exact
self.assertEqual(token, {"access_token": "mailchimp", "expires_in": 3600})
# And no scope at all
self.assertNotIn("scope", token)
class WeiboComplianceFixTest(TestCase):
def setUp(self):
mocker = requests_mock.Mocker()
mocker.post(
"https://api.weibo.com/oauth2/access_token", json={"access_token": "weibo"}
)
mocker.start()
self.addCleanup(mocker.stop)
weibo = OAuth2Session("someclientid", redirect_uri="https://i.b")
self.session = weibo_compliance_fix(weibo)
def test_fetch_access_token(self):
token = self.session.fetch_token(
"https://api.weibo.com/oauth2/access_token",
client_secret="someclientsecret",
authorization_response="https://i.b/?code=hello",
)
self.assertEqual(token, {"access_token": "weibo", "token_type": "Bearer"})
class SlackComplianceFixTest(TestCase):
def setUp(self):
mocker = requests_mock.Mocker()
mocker.post(
"https://slack.com/api/oauth.access",
json={"access_token": "xoxt-23984754863-2348975623103", "scope": "read"},
)
for method in ("GET", "POST"):
mocker.request(
method=method,
url="https://slack.com/api/auth.test",
json={
"ok": True,
"url": "https://myteam.slack.com/",
"team": "My Team",
"user": "cal",
"team_id": "T12345",
"user_id": "U12345",
},
)
mocker.start()
self.addCleanup(mocker.stop)
slack = OAuth2Session("someclientid", redirect_uri="https://i.b")
self.session = slack_compliance_fix(slack)
def test_protected_request(self):
self.session.token = {"access_token": "dummy-access-token"}
response = self.session.get("https://slack.com/api/auth.test")
url = response.request.url
query = parse_qs(urlparse(url).query)
self.assertNotIn("token", query)
body = response.request.body
data = parse_qs(body)
self.assertEqual(data["token"], ["dummy-access-token"])
def test_protected_request_override_token_get(self):
self.session.token = {"access_token": "dummy-access-token"}
response = self.session.get(
"https://slack.com/api/auth.test", data={"token": "different-token"}
)
url = response.request.url
query = parse_qs(urlparse(url).query)
self.assertNotIn("token", query)
body = response.request.body
data = parse_qs(body)
self.assertEqual(data["token"], ["different-token"])
def test_protected_request_override_token_post(self):
self.session.token = {"access_token": "dummy-access-token"}
response = self.session.post(
"https://slack.com/api/auth.test", data={"token": "different-token"}
)
url = response.request.url
query = parse_qs(urlparse(url).query)
self.assertNotIn("token", query)
body = response.request.body
data = parse_qs(body)
self.assertEqual(data["token"], ["different-token"])
def test_protected_request_override_token_url(self):
self.session.token = {"access_token": "dummy-access-token"}
response = self.session.get(
"https://slack.com/api/auth.test?token=different-token"
)
url = response.request.url
query = parse_qs(urlparse(url).query)
self.assertEqual(query["token"], ["different-token"])
self.assertIsNone(response.request.body)
class InstagramComplianceFixTest(TestCase):
def setUp(self):
mocker = requests_mock.Mocker()
mocker.request(
method="GET",
url="https://api.instagram.com/v1/users/self",
json={
"data": {
"id": "1574083",
"username": "snoopdogg",
"full_name": "Snoop Dogg",
"profile_picture": "http://distillery.s3.amazonaws.com/profiles/profile_1574083_75sq_1295469061.jpg",
"bio": "This is my bio",
"website": "http://snoopdogg.com",
"is_business": False,
"counts": {"media": 1320, "follows": 420, "followed_by": 3410},
}
},
)
mocker.start()
self.addCleanup(mocker.stop)
instagram = OAuth2Session("someclientid", redirect_uri="https://i.b")
self.session = instagram_compliance_fix(instagram)
def test_protected_request(self):
self.session.token = {"access_token": "dummy-access-token"}
response = self.session.get("https://api.instagram.com/v1/users/self")
url = response.request.url
query = parse_qs(urlparse(url).query)
self.assertIn("access_token", query)
self.assertEqual(query["access_token"], ["dummy-access-token"])
def test_protected_request_dont_override(self):
"""check that if the access_token param
already exist we don't override it"""
self.session.token = {"access_token": "dummy-access-token"}
response = self.session.get(
"https://api.instagram.com/v1/users/self?access_token=correct-access-token"
)
url = response.request.url
query = parse_qs(urlparse(url).query)
self.assertIn("access_token", query)
self.assertEqual(query["access_token"], ["correct-access-token"])
class PlentymarketsComplianceFixTest(TestCase):
def setUp(self):
mocker = requests_mock.Mocker()
mocker.post(
"https://shop.plentymarkets-cloud02.com",
json={
"accessToken": "ecUN1r8KhJewMCdLAmpHOdZ4O0ofXKB9zf6CXK61",
"tokenType": "Bearer",
"expiresIn": 86400,
"refreshToken": "iG2kBGIjcXaRE4xmTVUnv7xwxX7XMcWCHqJmFaSX",
},
headers={"Content-Type": "application/json"},
)
mocker.start()
self.addCleanup(mocker.stop)
plentymarkets = OAuth2Session("someclientid", redirect_uri="https://i.b")
self.session = plentymarkets_compliance_fix(plentymarkets)
def test_fetch_access_token(self):
token = self.session.fetch_token(
"https://shop.plentymarkets-cloud02.com",
authorization_response="https://i.b/?code=hello",
)
approx_expires_at = time.time() + 86400
actual_expires_at = token.pop("expires_at")
self.assertAlmostEqual(actual_expires_at, approx_expires_at, places=2)
self.assertEqual(
token,
{
"access_token": "ecUN1r8KhJewMCdLAmpHOdZ4O0ofXKB9zf6CXK61",
"expires_in": 86400,
"token_type": "Bearer",
"refresh_token": "iG2kBGIjcXaRE4xmTVUnv7xwxX7XMcWCHqJmFaSX",
},
)
class EbayComplianceFixTest(TestCase):
def setUp(self):
mocker = requests_mock.Mocker()
mocker.post(
"https://api.ebay.com/identity/v1/oauth2/token",
json={
"access_token": "this is the access token",
"expires_in": 7200,
"token_type": "Application Access Token",
},
headers={"Content-Type": "application/json"},
)
mocker.start()
self.addCleanup(mocker.stop)
session = OAuth2Session()
self.fixed_session = ebay_compliance_fix(session)
def test_fetch_access_token(self):
token = self.fixed_session.fetch_token(
"https://api.ebay.com/identity/v1/oauth2/token",
authorization_response="https://i.b/?code=hello",
)
assert token["token_type"] == "Bearer"
def access_and_refresh_token_request_compliance_fix_test(session, client_secret):
def _non_compliant_header(url, headers, body):
headers["X-Client-Secret"] = client_secret
return url, headers, body
session.register_compliance_hook("access_token_request", _non_compliant_header)
session.register_compliance_hook("refresh_token_request", _non_compliant_header)
return session
class RefreshTokenRequestComplianceFixTest(TestCase):
value_to_test_for = "value_to_test_for"
def setUp(self):
mocker = requests_mock.Mocker()
mocker.post(
"https://example.com/token",
request_headers={"X-Client-Secret": self.value_to_test_for},
json={
"access_token": "this is the access token",
"expires_in": 7200,
"token_type": "Bearer",
},
headers={"Content-Type": "application/json"},
)
mocker.post(
"https://example.com/refresh",
request_headers={"X-Client-Secret": self.value_to_test_for},
json={
"access_token": "this is the access token",
"expires_in": 7200,
"token_type": "Bearer",
},
headers={"Content-Type": "application/json"},
)
mocker.start()
self.addCleanup(mocker.stop)
session = OAuth2Session()
self.fixed_session = access_and_refresh_token_request_compliance_fix_test(
session, self.value_to_test_for
)
def test_access_token(self):
token = self.fixed_session.fetch_token(
"https://example.com/token",
authorization_response="https://i.b/?code=hello",
)
assert token["token_type"] == "Bearer"
def test_refresh_token(self):
token = self.fixed_session.refresh_token(
"https://example.com/refresh",
)
assert token["token_type"] == "Bearer"
| requests/requests-oauthlib | tests/test_compliance_fixes.py | Python | isc | 14,397 |
# coding: utf-8
#
# SearchEntry - An enhanced search entry with timeout
#
# Copyright (C) 2007 Sebastian Heinlein
# 2007-2009 Canonical Ltd.
#
# Authors:
# Sebastian Heinlein <glatzor@ubuntu.com>
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; version 3.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
from gi.repository import Gtk, GObject, GLib
from gettext import gettext as _
from softwarecenter.ui.gtk3.em import em
class SearchEntry(Gtk.Entry):
# FIXME: we need "can-undo", "can-redo" signals
__gsignals__ = {'terms-changed': (GObject.SignalFlags.RUN_FIRST,
None,
(GObject.TYPE_STRING,))}
SEARCH_TIMEOUT = 600
def __init__(self, icon_theme=None):
"""
Creates an enhanced IconEntry that triggers a timeout when typing
"""
Gtk.Entry.__init__(self)
self.set_width_chars(25)
self.set_size_request(0, em(1.7))
if not icon_theme:
icon_theme = Gtk.IconTheme.get_default()
self._handler_changed = self.connect_after("changed",
self._on_changed)
self.connect("icon-press", self._on_icon_pressed)
self.set_icon_from_icon_name(Gtk.EntryIconPosition.PRIMARY,
'edit-find-symbolic')
self.set_icon_from_stock(Gtk.EntryIconPosition.SECONDARY, None)
# set sensible atk name
atk_desc = self.get_accessible()
atk_desc.set_name(_("Search"))
# data
self._timeout_id = 0
self._undo_stack = [""]
self._redo_stack = []
def _on_icon_pressed(self, widget, icon, mouse_button):
"""
Emit the terms-changed signal without any time out when the clear
button was clicked
"""
if icon == Gtk.EntryIconPosition.SECONDARY:
# clear with no signal and emit manually to avoid the
# search-timeout
self.clear_with_no_signal()
self.grab_focus()
self.emit("terms-changed", "")
elif icon == Gtk.EntryIconPosition.PRIMARY:
self.select_region(0, -1)
self.grab_focus()
def undo(self):
if len(self._undo_stack) <= 1:
return
# pop top element and push on redo stack
text = self._undo_stack.pop()
self._redo_stack.append(text)
# the next element is the one we want to display
text = self._undo_stack.pop()
self.set_text(text)
self.set_position(-1)
def redo(self):
if not self._redo_stack:
return
# just reply the redo stack
text = self._redo_stack.pop()
self.set_text(text)
self.set_position(-1)
def clear(self):
self.set_text("")
self._check_style()
def set_text(self, text, cursor_to_end=True):
Gtk.Entry.set_text(self, text)
self.emit("move-cursor", Gtk.MovementStep.BUFFER_ENDS, 1, False)
def set_text_with_no_signal(self, text):
"""Clear and do not send a term-changed signal"""
self.handler_block(self._handler_changed)
self.set_text(text)
self.emit("move-cursor", Gtk.MovementStep.BUFFER_ENDS, 1, False)
self.handler_unblock(self._handler_changed)
def clear_with_no_signal(self):
"""Clear and do not send a term-changed signal"""
self.handler_block(self._handler_changed)
self.clear()
self.handler_unblock(self._handler_changed)
def _emit_terms_changed(self):
text = self.get_text()
# add to the undo stack once a term changes
self._undo_stack.append(text)
self.emit("terms-changed", text)
def _on_changed(self, widget):
"""
Call the actual search method after a small timeout to allow the user
to enter a longer search term
"""
self._check_style()
if self._timeout_id > 0:
GLib.source_remove(self._timeout_id)
self._timeout_id = GLib.timeout_add(self.SEARCH_TIMEOUT,
self._emit_terms_changed)
def _check_style(self):
"""
Show the clear icon whenever the field is not empty
"""
if self.get_text() != "":
self.set_icon_from_stock(Gtk.EntryIconPosition.SECONDARY,
Gtk.STOCK_CLEAR)
# reverse the icon if we are in an rtl environment
if self.get_direction() == Gtk.TextDirection.RTL:
pb = self.get_icon_pixbuf(
Gtk.EntryIconPosition.SECONDARY).flip(True)
self.set_icon_from_pixbuf(Gtk.EntryIconPosition.SECONDARY, pb)
else:
self.set_icon_from_stock(Gtk.EntryIconPosition.SECONDARY, None)
| sti-lyneos/shop | softwarecenter/ui/gtk3/widgets/searchentry.py | Python | lgpl-3.0 | 5,342 |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module to download and do light processing on import data."""
# TODO(beets): Add tests
import io
import os
import ssl
import pandas as pd
import requests
import zipfile
DOWNLOAD_URI = 'https://www.epa.gov/sites/default/files/2020-11/2019_data_summary_spreadsheets.zip'
YEAR_DATA_FILENAME = 'ghgp_data_{year}.xlsx'
HEADER_ROW = 3
CROSSWALK_URI = 'https://www.epa.gov/sites/default/files/2020-12/ghgrp_oris_power_plant_crosswalk_11_24_20.xlsx'
CROSSWALK_COLS_TO_KEEP = [
'GHGRP Facility ID', 'ORIS CODE', 'ORIS CODE 2', 'ORIS CODE 3',
'ORIS CODE 4', 'ORIS CODE 5'
]
GHGRP_ID_COL = 'Facility Id'
_DIRECT_EMITTERS_SHEET = 'Direct Emitters'
SHEET_NAMES_TO_CSV_FILENAMES = {
_DIRECT_EMITTERS_SHEET: 'direct_emitters.csv',
'Onshore Oil & Gas Prod.': 'oil_and_gas.csv',
'Gathering & Boosting': 'gathering_and_boosting.csv',
'LDC - Direct Emissions': 'local_distribution.csv',
'SF6 from Elec. Equip.': 'elec_equip.csv',
# Needs schema:
# - 'Transmission Pipelines',
# The following sheets are skipped due to sparse data:
# - 'Suppliers',
# - 'CO2 Injection',
# - 'Geologic Sequestration of CO2',
}
class Downloader:
"""
The following must be called in order. Earlier steps can be skipped if it has successfully completed in a previous run.
- download_data
- extract_all_years
- save_all_crosswalks
"""
def __init__(self, save_path):
self.years = list(range(2010, 2020))
self.current_year = None
self.files = [] # list of (year, filename) of all extracted files
self.save_path = save_path
def download_data(self):
"""Downloads and unzips excel files from DOWNLOAD_URI."""
print(f'Downloading data')
r = requests.get(DOWNLOAD_URI)
z = zipfile.ZipFile(io.BytesIO(r.content))
z.extractall(self.save_path)
def extract_all_years(self):
"""Saves relevant sheets from each year's Excel file to a csv."""
headers = {}
for sheet, _ in SHEET_NAMES_TO_CSV_FILENAMES.items():
headers[sheet] = {}
for current_year in self.years:
print(f'Extracting data for {current_year}')
self.current_year = current_year
self._extract_data(headers)
for sheet, csv_name in SHEET_NAMES_TO_CSV_FILENAMES.items():
headers_df = pd.DataFrame.from_dict(headers[sheet], orient='index')
headers_df.transpose().to_csv(os.path.join(self.save_path,
f'cols_{csv_name}'),
index=None)
return self.files
def save_all_crosswalks(self, filepath):
"""Builds individual year crosswalks, as well as a join crosswalk for all years."""
print(f'Saving all ID crosswalks')
crosswalks = []
for current_year in self.years:
crosswalks.append(self._gen_crosswalk())
all_crosswalks_df = pd.concat(crosswalks, join='outer')
all_crosswalks_df = all_crosswalks_df.sort_values(
by=[GHGRP_ID_COL, 'FRS Id', 'ORIS CODE'])
all_crosswalks_df = all_crosswalks_df.drop_duplicates()
all_crosswalks_df.to_csv(filepath, header=True, index=None)
return all_crosswalks_df
def _csv_path(self, csv_filename, year=None):
if not year:
year = self.current_year
return os.path.join(self.save_path, f'{year}_{csv_filename}')
def _extract_data(self, headers):
summary_filename = os.path.join(
self.save_path, YEAR_DATA_FILENAME.format(year=self.current_year))
xl = pd.ExcelFile(summary_filename, engine='openpyxl')
for sheet in xl.sheet_names:
csv_filename = SHEET_NAMES_TO_CSV_FILENAMES.get(sheet, None)
if not csv_filename:
print(f'Skipping sheet: {sheet}')
continue
summary_file = xl.parse(sheet, header=HEADER_ROW, dtype=str)
csv_filename = self._csv_path(csv_filename)
summary_file.to_csv(csv_filename, index=None, header=True)
headers[sheet][self.current_year] = summary_file.columns
self.files.append((self.current_year, csv_filename))
def _gen_crosswalk(self):
# Per https://stackoverflow.com/a/56230607
ssl._create_default_https_context = ssl._create_unverified_context
oris_df = pd.read_excel(CROSSWALK_URI,
'ORIS Crosswalk',
header=0,
dtype=str,
usecols=CROSSWALK_COLS_TO_KEEP,
engine='openpyxl')
oris_df = oris_df.rename(columns={'GHGRP Facility ID': GHGRP_ID_COL})
all_facilities_df = pd.DataFrame()
for sheet, csv_filename in SHEET_NAMES_TO_CSV_FILENAMES.items():
csv_path = self._csv_path(csv_filename)
if not os.path.exists(csv_path):
continue
df = pd.read_csv(csv_path,
usecols=[GHGRP_ID_COL, 'FRS Id'],
dtype=str)
all_facilities_df = all_facilities_df.append(df)
all_facilities_df = all_facilities_df.join(
oris_df.set_index(GHGRP_ID_COL), on=GHGRP_ID_COL, how='left')
return all_facilities_df
if __name__ == '__main__':
downloader = Downloader('tmp_data')
downloader.download_data()
downloader.extract_all_years()
downloader.save_all_crosswalks(
os.path.join(self.save_path, 'crosswalks.csv'))
| datacommonsorg/data | scripts/us_epa/ghgrp/download.py | Python | apache-2.0 | 6,169 |
# Lazygal, a lazy static web gallery generator.
# Copyright (C) 2007-2012 Alexandre Rossi <alexandre.rossi@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import os
__all__ = ['generators', ]
# Compute installation prefix
if os.path.isfile(os.path.join(os.path.dirname(__file__), '..', 'setup.py')):
INSTALL_MODE = 'source'
INSTALL_PREFIX = ''
else:
# Lazygal is installed, assume we are in
# $prefix/lib/python2.X/dist-packages/lazygal
INSTALL_MODE = 'installed'
INSTALL_PREFIX = os.path.join(os.path.dirname(__file__),
'..', '..', '..', '..')
INSTALL_PREFIX = os.path.normpath(INSTALL_PREFIX)
def get_hg_rev():
try:
lazygal_dir = os.path.join(os.path.dirname(__file__), '..')
if not os.path.isdir(os.path.join(lazygal_dir, '.hg')):
raise IOError
import mercurial.hg
import mercurial.node
import mercurial.ui
repo = mercurial.hg.repository(mercurial.ui.ui(), lazygal_dir)
last_revs = repo.changelog.parents(repo.dirstate.parents()[0])
known_tags = repo.tags().items()
for tag, rev in known_tags:
if tag != 'tip':
for last_rev in last_revs:
if rev == last_rev:
# This is a tagged revision, assume this is a release.
return ''
return mercurial.node.short(last_revs[0])
except (IOError, OSError, ImportError):
return ''
__version__ = '0.8.1'
hg_rev = get_hg_rev()
if hg_rev: __version__ += '+hg' + hg_rev
# vim: ts=4 sw=4 expandtab
| Konubinix/lazygal | lazygal/__init__.py | Python | gpl-2.0 | 2,278 |
from nltk.corpus import treebank
from nltk.tag import tnt, CRFTagger
# split training data from test data
train_data = treebank.tagged_sents()[:3000]
test_data = treebank.tagged_sents()[3000:]
# train a trigram N tagger (TnT)
tnt_pos_tagger = tnt.TnT()
tnt_pos_tagger.train(train_data)
print tnt_pos_tagger.evaluate(test_data)
# train a CRF tagger
crf_tagger = CRFTagger()
crf_tagger.train(train_data,
'~/Documents/NLP/NLP/crf_model.txt')
print crf_tagger.evaluate(test_data)
| Elixeus/NLP | own_model.py | Python | mit | 497 |
"""SCons.Debug
Code for debugging SCons internal things. Shouldn't be
needed by most users.
"""
#
# Copyright (c) 2001 - 2015 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Debug.py rel_2.3.5:3329:275e75118ad4 2015/06/20 11:18:26 bdbaddog"
import os
import sys
import time
import weakref
# Global variable that gets set to 'True' by the Main script,
# when the creation of class instances should get tracked.
track_instances = False
# List of currently tracked classes
tracked_classes = {}
def logInstanceCreation(instance, name=None):
if name is None:
name = instance.__class__.__name__
if name not in tracked_classes:
tracked_classes[name] = []
tracked_classes[name].append(weakref.ref(instance))
def string_to_classes(s):
if s == '*':
return sorted(tracked_classes.keys())
else:
return s.split()
def fetchLoggedInstances(classes="*"):
classnames = string_to_classes(classes)
return [(cn, len(tracked_classes[cn])) for cn in classnames]
def countLoggedInstances(classes, file=sys.stdout):
for classname in string_to_classes(classes):
file.write("%s: %d\n" % (classname, len(tracked_classes[classname])))
def listLoggedInstances(classes, file=sys.stdout):
for classname in string_to_classes(classes):
file.write('\n%s:\n' % classname)
for ref in tracked_classes[classname]:
obj = ref()
if obj is not None:
file.write(' %s\n' % repr(obj))
def dumpLoggedInstances(classes, file=sys.stdout):
for classname in string_to_classes(classes):
file.write('\n%s:\n' % classname)
for ref in tracked_classes[classname]:
obj = ref()
if obj is not None:
file.write(' %s:\n' % obj)
for key, value in obj.__dict__.items():
file.write(' %20s : %s\n' % (key, value))
if sys.platform[:5] == "linux":
# Linux doesn't actually support memory usage stats from getrusage().
def memory():
mstr = open('/proc/self/stat').read()
mstr = mstr.split()[22]
return int(mstr)
elif sys.platform[:6] == 'darwin':
#TODO really get memory stats for OS X
def memory():
return 0
else:
try:
import resource
except ImportError:
try:
import win32process
import win32api
except ImportError:
def memory():
return 0
else:
def memory():
process_handle = win32api.GetCurrentProcess()
memory_info = win32process.GetProcessMemoryInfo( process_handle )
return memory_info['PeakWorkingSetSize']
else:
def memory():
res = resource.getrusage(resource.RUSAGE_SELF)
return res[4]
# returns caller's stack
def caller_stack():
import traceback
tb = traceback.extract_stack()
# strip itself and the caller from the output
tb = tb[:-2]
result = []
for back in tb:
# (filename, line number, function name, text)
key = back[:3]
result.append('%s:%d(%s)' % func_shorten(key))
return result
caller_bases = {}
caller_dicts = {}
# trace a caller's stack
def caller_trace(back=0):
import traceback
tb = traceback.extract_stack(limit=3+back)
tb.reverse()
callee = tb[1][:3]
caller_bases[callee] = caller_bases.get(callee, 0) + 1
for caller in tb[2:]:
caller = callee + caller[:3]
try:
entry = caller_dicts[callee]
except KeyError:
caller_dicts[callee] = entry = {}
entry[caller] = entry.get(caller, 0) + 1
callee = caller
# print a single caller and its callers, if any
def _dump_one_caller(key, file, level=0):
leader = ' '*level
for v,c in sorted([(-v,c) for c,v in caller_dicts[key].items()]):
file.write("%s %6d %s:%d(%s)\n" % ((leader,-v) + func_shorten(c[-3:])))
if c in caller_dicts:
_dump_one_caller(c, file, level+1)
# print each call tree
def dump_caller_counts(file=sys.stdout):
for k in sorted(caller_bases.keys()):
file.write("Callers of %s:%d(%s), %d calls:\n"
% (func_shorten(k) + (caller_bases[k],)))
_dump_one_caller(k, file)
shorten_list = [
( '/scons/SCons/', 1),
( '/src/engine/SCons/', 1),
( '/usr/lib/python', 0),
]
if os.sep != '/':
shorten_list = [(t[0].replace('/', os.sep), t[1]) for t in shorten_list]
def func_shorten(func_tuple):
f = func_tuple[0]
for t in shorten_list:
i = f.find(t[0])
if i >= 0:
if t[1]:
i = i + len(t[0])
return (f[i:],)+func_tuple[1:]
return func_tuple
TraceFP = {}
if sys.platform == 'win32':
TraceDefault = 'con'
else:
TraceDefault = '/dev/tty'
TimeStampDefault = None
StartTime = time.time()
PreviousTime = StartTime
def Trace(msg, file=None, mode='w', tstamp=None):
"""Write a trace message to a file. Whenever a file is specified,
it becomes the default for the next call to Trace()."""
global TraceDefault
global TimeStampDefault
global PreviousTime
if file is None:
file = TraceDefault
else:
TraceDefault = file
if tstamp is None:
tstamp = TimeStampDefault
else:
TimeStampDefault = tstamp
try:
fp = TraceFP[file]
except KeyError:
try:
fp = TraceFP[file] = open(file, mode)
except TypeError:
# Assume we were passed an open file pointer.
fp = file
if tstamp:
now = time.time()
fp.write('%8.4f %8.4f: ' % (now - StartTime, now - PreviousTime))
PreviousTime = now
fp.write(msg)
fp.flush()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| IljaGrebel/OpenWrt-SDK-imx6_HummingBoard | staging_dir/host/lib/scons-2.3.5/SCons/Debug.py | Python | gpl-2.0 | 6,970 |
from src.types.game import Game
from src.types.player import Player
class GamesAndPlayers(object):
def __init__(self, player_db):
self.player_db = player_db
def parse(self, data):
pdb = self.player_db
# Single values
self.points = data.get('CurrentTotalPoints')
# Arrays
# Players before Games
self.players = [Player(pdb).parse(p) for p in data.get('Players')]
self.games = [Game(pdb).parse(g) for g in data.get('Games')]
return self
def missing_player_ids(self):
missing = set()
for game in self.games:
for player in game.players:
if player.has_extras():
continue
if not player.id:
continue
missing.add(player.id)
return missing
| gak/giant-multiplayer-robot-helper | src/types/games_and_players.py | Python | mit | 846 |
from PIL import Image
import os.path,os
#import pickle
#import sqlite3
import hashlib
import time
import random
import logging
import copy
import threading
import itertools
from math import ceil
from enum import Enum
from copy import deepcopy
import itertools
from lipyc.utility import recursion_protect
from lipyc.Version import Versionned
from lipyc.config import *
from lipyc.utility import check_ext, make_thumbnail
from tkinter import messagebox
class Album(Versionned): #subalbums not fully implemented
def __init__(self, id, scheduler, name=None, datetime=None):
super().__init__()
self.scheduler = scheduler
self.id = id
self.name = name
self.datetime = datetime if datetime else time.mktime(time.gmtime())
self.subalbums = set()
self.thumbnail = None
self.files = set() #order by id
self.inner_keys = [] #use for inner albums
def __deepcopy__(self, memo):
new = Album(self.id, self.scheduler, self.name, self.datetime)
new.subalbums = deepcopy(self.subalbums)
new.thumbnail = deepcopy(self.thumbnail)
new.files = deepcopy(self.files)
new.inner_keys = deepcopy(self.inner_keys)
return new
#for copy_to,add_to,move_to
def clone(self, new_id):
alb = self.__deepcopy__(None)
alb.inner_keys.clear()
alb.id = new_id
return alb
def pseudo_clone(self):
new = Album(self.id, self.scheduler, self.name, self.datetime)
if self.thumbnail:
self.scheduler.duplicate(self.thumbnail)
new.subalbums = self.subalbums
new.thumbnail = self.thumbnail
new.files = self.files
return new
def sql(self):
return (self.id, self.name, self.datetime,
'|'.join( [ str(alb.id) for alb in self.subalbums] ), self.thumbnail,
'|'.join( [ str(afile.id) for afile in self.files] ),
'|'.join(self.inner_keys) )
def rename(self, name):
self.name = name
def add_file(self, _file):
self.files.add(_file)
if self.thumbnail == None and _file.thumbnail :
self.thumbnail = self.scheduler.duplicate_file( _file.thumbnail )
def remove_file(self, _file):
self.files.discard(_file)
@recursion_protect()
def remove_all(self):
for album in list(self.subalbums):
album.remove_all()
self.subalbums.clear()
for _file in list(self.files):
self.remove_file(_file)
self.files.clear()
def add_subalbum(self, album):
self.subalbums.add( album )
def remove_subalbum(self, album):
if album in self.subalbums:
if album.thumbnail :
self.scheduler.remove_file( album.thumbnail )
self.subalbums.discard( album )
@recursion_protect()
def export_to(self, path):
location = os.path.join(path, self.name)
if not os.path.isdir(location):
os.makedirs( location )
for _file in self.files:
_file.export_to(location)
for album in self.subalbums:
album.export_to( location )
@recursion_protect()
def lock_files(self):
for _file in self.files:
_file.io_lock.acquire()
for album in self.subalbums:
album.lock_files()
def set_thumbnail(self, location):
if self.thumbnail :
self.scheduler.remove_file(self.thumbnail)
if not isinstance(location, str) or check_ext(location, img_exts): #fichier ouvert
self.thumbnail = make_thumbnail(self.scheduler, location )
else:
self.thumbnail = self.scheduler.add_file(location_album_default) #size and md5 ought to be combute once for all
def deep_files(self):
tmp = itertools.chain.from_iterable(map(Album.deep_files, self.subalbums))
return itertools.chain( self.files, tmp)
@recursion_protect(0)
def __len__(self): #number of file in dir and subdir
return len(self.files) + sum( [len(a) for a in self.subalbums ] )
@recursion_protect(0)
def all_albums(self):
return itertools.chain( [self], *list(map( lambda x:x.all_albums(), self.subalbums )) )
@recursion_protect(0)
def all_files(self):
return set(itertools.chain( *list(map(lambda x:x.files, self.all_albums()))))
@recursion_protect(0)
def duplicate(self):
if self.thumbnail:
self.scheduler.duplicate_file(self.thumbnail)
for f in self.files:
f.duplicate()
for alb in self.subalbums:
alb.duplicate()
| severus21/LiPyc | src/Album.py | Python | apache-2.0 | 4,909 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from pyparsecom.query import Query
from pyparsecom.objects import ParseObject
from tests import init_parse
class QueryTest(unittest.TestCase):
def setUp(self):
init_parse()
def tearDown(self):
pass
def create_cities(self):
class City(ParseObject):
pass
ny = City(name='New York')
sf = City(name='San Francisco', country='United States')
ny.save()
sf.save()
def test_simple_fetch(self):
class City(ParseObject):
pass
ny = City(name='New York')
ny.save()
query = Query('City')
cities = query.fetch()
for city in cities:
self.assertTrue(isinstance(city, City))
self.assertTrue(city._is_loaded)
self.assertTrue(hasattr(city, 'objectId'))
for k, v in city.__dict__.items():
self.assertFalse(isinstance(v, dict))
def test_get(self):
class City(ParseObject):
pass
ny = City(name='New York')
ny.save()
ny2 = Query('City').get(ny.objectId)
self.assertTrue(isinstance(ny2, City))
self.assertEqual(ny.objectId, ny2.objectId)
def test_where(self):
class City(ParseObject):
pass
name = 'New York'
ny = City(name=name)
sf = City(name='San Francisco')
ny.save()
sf.save()
query = Query('City').equal_to('name', name)
cities = query.fetch()
self.assertTrue(len(cities) > 0)
for city in cities:
self.assertEqual(city.name, name)
def test_keys(self):
class City(ParseObject):
pass
name = 'New York'
ny = City(name=name)
sf = City(name='San Francisco', country='United States')
ny.save()
sf.save()
query = Query('City').keys(['country'])
cities = query.fetch()
self.assertTrue(len(cities) > 0)
for city in cities:
self.assertRaises(AttributeError, getattr, city, 'name')
self.assertFalse(city._is_loaded)
def test_limit(self):
class City(ParseObject):
pass
name = 'New York'
ny = City(name=name)
sf = City(name='San Francisco', country='United States')
ny.save()
sf.save()
query = Query('City')
cities = query.limit(1).fetch()
self.assertTrue(len(cities) == 1)
query = Query('City')
cities = query.limit(1).fetch()
self.assertTrue(len(cities) == 1)
def test_exists(self):
self.create_cities()
query = Query('City')
query = query.exists('country')
cities = query.fetch()
self.assertTrue(len(cities) > 0)
for city in cities:
self.assertTrue(hasattr(city, 'country'))
def test_does_not_exist(self):
self.create_cities()
query = Query('City')
query = query.does_not_exist('country')
cities = query.fetch()
self.assertTrue(len(cities) > 0)
for city in cities:
self.assertFalse(hasattr(city, 'country'))
def test_query_set_slicing(self):
self.create_cities()
query = Query('City')
query = query.does_not_exist('country')
cities = query.fetch()[2:4]
self.assertEqual(2, len(cities))
| justinwp/pyparsecom | tests/test_Query.py | Python | mit | 3,440 |
"""
tests for quantecon.compute_fp module
@author : Spencer Lyon
@date : 2014-07-31
References
----------
https://www.math.ucdavis.edu/~hunter/book/ch3.pdf
TODO: add multivariate case
"""
from __future__ import division
import unittest
from quantecon import compute_fixed_point
class TestFPLogisticEquation(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.mu_1 = 0.2 # 0 is unique fixed point forall x_0 \in [0, 1]
# (4mu - 1)/(4mu) is a fixed point forall x_0 \in [0, 1]
cls.mu_2 = 0.3
# starting points on (0, 1)
cls.unit_inverval = [0.1, 0.3, 0.6, 0.9]
# arguments for compute_fixed_point
cls.kwargs = {"error_tol": 1e-5, "max_iter": 200, "verbose": 0}
def T(self, x, mu):
return 4.0 * mu * x * (1.0 - x)
def test_contraction_1(self):
"compute_fp: convergence inside interval of convergence"
f = lambda x: self.T(x, self.mu_1)
for i in self.unit_inverval:
# should have fixed point of 0.0
self.assertTrue(abs(compute_fixed_point(f, i, **self.kwargs))
< 1e-4)
def test_not_contraction_2(self):
"compute_fp: no convergence outside interval of convergence"
f = lambda x: self.T(x, self.mu_2)
for i in self.unit_inverval:
# This shouldn't converge to 0.0
self.assertFalse(abs(compute_fixed_point(f, i, **self.kwargs))
< 1e-4)
def test_contraction_2(self):
"compute_fp: convergence inside interval of convergence"
f = lambda x: self.T(x, self.mu_2)
fp = (4 * self.mu_2 - 1) / (4 * self.mu_2)
for i in self.unit_inverval:
# This should converge to fp
self.assertTrue(abs(compute_fixed_point(f, i, **self.kwargs)-fp)
< 1e-4)
def test_not_contraction_1(self):
"compute_fp: no convergence outside interval of convergence"
f = lambda x: self.T(x, self.mu_1)
fp = (4 * self.mu_1 - 1) / (4 * self.mu_1)
for i in self.unit_inverval:
# This should not converge (b/c unique fp is 0.0)
self.assertFalse(abs(compute_fixed_point(f, i, **self.kwargs)-fp)
< 1e-4)
| dingliumath/quant-econ | quantecon/tests/test_compute_fp.py | Python | bsd-3-clause | 2,288 |
# -*- coding: utf-8 -*-
"""
Copyright © 2017 - Alexandre Machado <axmachado@gmail.com>
This file is part of Simple POS Compiler.
Simnple POS Compiler is free software: you can redistribute it
and/or modify it under the terms of the GNU General Public License
as published by the Free Software Foundation, either version 3
of the License, or (at your option) any later version.
Simple POS Compiler is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Simple POS Compiler. If not, see <http://www.gnu.org/licenses/>.
Boolean and logical expressions and conditions
@author: Alexandre Machado <axmachado@gmail.com>
"""
import logging
from .base import CodeGenerationError, quotedValue
from .variables import Assignment
from ..objfile import typedefs
# pylint: disable=C0103
# it's not a constant
logger = logging.getLogger("link")
# noinspection PyMethodMayBeStatic
class Conditional(object):
"""
If and While - statements that require an conditional
evaluation before the main block
"""
def __init__(self):
super(Conditional, self).__init__()
def preConditionalCode(self, codeBlock, controlVariable=None):
# pylint: disable=W0613
"""
generate into the codeBlock the code to compute the conditional
before emitting the comparison
"""
pass
def emit(self):
# pylint: disable=R0201
" emit the comparison attributes to the conditional instruction "
return ""
def negate(self):
" Negate the expression, inverting it's logical value "
pass
def usesVariable(self, name):
"check if condition uses a variable"
return False
def assignVariable(self, name):
"check if condition assigns a value to a variable"
return False
class ConstantConditional(Conditional):
"""
The use of as Integer or Logical constant as condition.
Zero is considered false, any other value is true
"""
def __init__(self, value):
super(ConstantConditional, self).__init__()
self.value = value
def negate(self):
self.value = not self.value
class SimpleComparison(Conditional):
"""
Compare the value of a variable to any value
"""
# POSXML conditional operators constants
# pylint: disable=C0103
# those are really constants, but pylint thinks they are class attributes
LT = 'lessthan'
GT = 'greaterthan'
EQ = 'equalto'
NE = 'notequalto'
GE = 'greaterthanorequalto'
LE = 'lessthanorequalto'
def __init__(self, left, op, right):
super(SimpleComparison, self).__init__()
if op == '<':
self.operator = self.LT
elif op == '>':
self.operator = self.GT
elif op == '==':
self.operator = self.EQ
elif op == '!=':
self.operator = self.NE
elif op == '>=':
self.operator = self.GE
elif op == '<=':
self.operator = self.LE
else:
raise CodeGenerationError('Invalid operator "%s"', op)
self.originalOperator = self.operator
self._updateValues(left, right)
def _updateValues(self, vLeft, vRight):
"""
Update the values of the expression.
Used by subclasses and when the simple comparison is used inside
complex logical expression, when the values of the expression must
be evaluated before the comparison
"""
# in POSXML, the left side of a comparison must always be
# a variable. So, if the left side of our expression is not a
# variable, we must "invert" the expression
invert = False
if vLeft.startswith('$'):
self.variable = vLeft
self.value = vRight
else:
self.variable = vRight
self.value = vLeft
invert = True
self.operator = self.originalOperator
if invert:
if self.operator == self.LT:
self.operator = self.GT
elif self.operator == self.GT:
self.operator = self.LT
elif self.operator == self.GE:
self.operator = self.LE
elif self.operator == self.LE:
self.operator = self.GE
def negate(self):
"""
Negate the result of the comparison"
"""
if self.operator == self.LT:
self.operator = self.GE
elif self.operator == self.GT:
self.operator = self.LE
elif self.operator == self.EQ:
self.operator = self.NE
elif self.operator == self.NE:
self.operator = self.EQ
elif self.operator == self.GE:
self.operator = self.LT
elif self.operator == self.LE:
self.operator = self.GT
self.originalOperator = self.operator
def usesVariable(self, name):
varInExpression = '$(' + name + ')'
return (self.variable == varInExpression) \
or (varInExpression in self.value)
def emit(self):
"emits the attributes for the POSXML tag"
return 'variable="%s" operator="%s" value=%s' % \
(self.variable, self.operator, quotedValue(self.value))
class LogicalValueContext(object):
"""
Context to process intermediate code generation for logical
expressions and values
"""
def __init__(self, codeBlock):
super(LogicalValueContext, self).__init__()
self.codeBlock = codeBlock
def procNegatedValue(self, value):
"""
Processes the negation of an expression value: !(expression)
"""
# import here to avoid cyclic import problems
from .control import IfStatement
valueToNegate = self.procValue(value.value)
if valueToNegate.startswith('$'):
# the value is a variable. It's necessary to produce
# an "if" statement
condition = SimpleComparison('$(%s)' % valueToNegate, '==', '0')
ifBlock = [Assignment(typedefs.INT, valueToNegate, '1')]
elseBlock = [Assignment(typedefs.INT, valueToNegate, '0')]
ifStm = IfStatement(self.codeBlock)
ifStm.selfGenerated(condition, ifBlock, elseBlock)
self.codeBlock.addStatements(ifStm)
return valueToNegate
else:
return '1' if int(valueToNegate) == 0 else '0'
def procRelationalExpression(self, value):
"processes a relational expression"
# import here to avoid cyclic import problems
from .control import IfStatement
exprResult = self.codeBlock.currentScope().autoInt()
leftVal = self.procValue(value.left)
rightVal = self.procValue(value.right)
conditional = SimpleComparison(leftVal, value.operator, rightVal)
ifBlock = [Assignment(typedefs.INT, exprResult, '1')]
elseBlock = [Assignment(typedefs.INT, exprResult, '0')]
ifStm = IfStatement(self.codeBlock)
ifStm.selfGenerated(conditional, ifBlock, elseBlock)
self.codeBlock.addStatements(ifStm)
return "$(%s)" % exprResult
def procLogicalExpression(self, value):
"processes a logical expression"
exprResult = self.codeBlock.currentScope().autoInt()
logExpr = LogicalExpr(value.left, value.operator, value.right)
logExpr.preConditionalCode(self.codeBlock, exprResult)
return "$(%s)" % exprResult
def procValue(self, value):
"processes a logical value, generating intermediate code"
result = None
if not isinstance(value, typedefs.Value):
result = str(value)
elif isinstance(value, typedefs.Constant):
result = str(value)
elif isinstance(value, typedefs.VarValue):
result = value.value
elif isinstance(value, typedefs.FunctionReturnValue):
varName = self.codeBlock.functionReturnVariable(value)
result = '$(%s)' % varName
elif isinstance(value, typedefs.ExpressionValue):
varName = self.codeBlock.expressionReturnVariable(value)
result = '$(%s)' % varName
elif isinstance(value, typedefs.NegatedValue):
result = self.procNegatedValue(value)
elif isinstance(value, typedefs.RelationalExpressionValue):
result = self.procRelationalExpression(value)
elif isinstance(value, typedefs.LogicalExpressionValue):
result = self.procLogicalExpression(value)
return result
class SingleVariableCondition(SimpleComparison):
"""
Condition represented by a single variable value
"""
def __init__(self, variable):
super(SingleVariableCondition, self).__init__(variable, '!=', '0')
class ConditionalFactory(object):
"""
Factory to generate code for the condition
"""
def __init__(self):
super(ConditionalFactory, self).__init__()
@staticmethod
def canBeSimple(condition):
# pylint: disable=R0201
# It can really be a function (pylint-R0201), but I prefer it as a
# method in order to encapsulate the conditional creation code
# into the factory
"Can the condition be a SimpleCoparison?"
left = condition.left
right = condition.right
if isinstance(left, typedefs.VarValue):
return isinstance(right, typedefs.Constant) \
or isinstance(right, typedefs.VarValue)
elif isinstance(right, typedefs.VarValue):
return isinstance(left, typedefs.Constant)
else:
return False
@staticmethod
def simpleValue(value):
# pylint: disable=R0201
# It can really be a function (pylint-R0201), but I prefer it as a
# method in order to encapsulate the conditional creation code
# into the factory
"The condition is a single value"
if isinstance(value, typedefs.Constant):
return str(value.value)
elif isinstance(value, typedefs.VarValue):
return value.value
return None
def getConditional(self, condition):
"""
Builds and return the Conditional object that represents
this condition
"""
if isinstance(condition, typedefs.Constant):
try:
value = int(condition.value) != 0
except ValueError:
value = False
return ConstantConditional(value)
elif isinstance(condition, typedefs.VarValue):
return SingleVariableCondition('$(%s)' % condition.variable.name)
elif isinstance(condition, typedefs.FunctionReturnValue):
leftValue = condition
rightValue = typedefs.IntConstant(0)
return ValueComparison(leftValue, "!=", rightValue)
elif isinstance(condition, typedefs.NegatedValue):
cond = self.getConditional(condition.value)
cond.negate()
return cond
elif isinstance(condition, typedefs.RelationalExpressionValue):
if self.canBeSimple(condition):
leftValue = self.simpleValue(condition.left)
rightValue = self.simpleValue(condition.right)
return SimpleComparison(leftValue, condition.operator,
rightValue)
else:
return ValueComparison(condition.left, condition.operator,
condition.right)
elif isinstance(condition, typedefs.LogicalExpressionValue):
return LogicalExpr(condition.left, condition.operator,
condition.right)
class ValueComparison(SimpleComparison):
"""
Condition that represents the comparison of two values
"""
def __init__(self, leftValue, op, rightValue):
self.codeBlock = None
self.leftValue = leftValue
self.rightValue = rightValue
super(ValueComparison, self).__init__("$(left)", op, "$(right)")
def preConditionalCode(self, codeBlock, controlVariable=None):
ctx = LogicalValueContext(codeBlock)
leftValue = ctx.procValue(self.leftValue)
rightValue = ctx.procValue(self.rightValue)
self._updateValues(leftValue, rightValue)
class LogicalExpr(Conditional):
"""
Logical expression - AND and OR operations
"""
# pylint: disable=C0103
# pylint thinks of OR and AND as class attributes, but they are constants
OR = "||"
AND = "&&"
"""
Logical operation - AND, OR
"""
def __init__(self, left, op, right):
super(LogicalExpr, self).__init__()
self.operator = op
self.negated = False
self.resultVar = None
self.left = left
self.right = right
self.isNegated = False
def orCode(self, codeBlock, cond1, cond2):
"""
result = False
if cond1:
result = True
else:
if cond2:
result = True
"""
# import here to avoid cyclic import problems
from .control import IfStatement
codeBlock.addStatements(Assignment(typedefs.INT, self.resultVar,
'0'))
if1 = IfStatement(codeBlock)
if2 = IfStatement(codeBlock)
trueBlock = [Assignment(typedefs.INT, self.resultVar, '1')]
if2.selfGenerated(cond2, trueBlock)
elseBlock = [if2]
if1.selfGenerated(cond1, trueBlock, elseBlock)
codeBlock.addStatements(if1)
def andCode(self, codeBlock, cond1, cond2):
"""
result = False
if cond1:
if cond2:
result = True
"""
# import here to avoid cyclic import problems
from .control import IfStatement
codeBlock.addStatements(Assignment(typedefs.INT, self.resultVar, '0'))
if1 = IfStatement(codeBlock)
if2 = IfStatement(codeBlock)
if2Block = [Assignment(typedefs.INT, self.resultVar, '1')]
if2.selfGenerated(cond2, if2Block)
if1Block = [if2]
if1.selfGenerated(cond1, if1Block)
codeBlock.addStatements(if1)
def preConditionalCode(self, codeBlock, controlVariable=None):
self.resultVar = (controlVariable, None) \
if controlVariable \
else codeBlock.currentScope().autoInt()
ctx = LogicalValueContext(codeBlock)
leftValue = ctx.procValue(self.left)
rightValue = ctx.procValue(self.right)
cond1 = SingleVariableCondition(leftValue)
cond2 = SingleVariableCondition(rightValue)
if self.operator == self.OR:
self.orCode(codeBlock, cond1, cond2)
else:
self.andCode(codeBlock, cond1, cond2)
def emit(self):
operator = "notequalto" if not self.isNegated else "equalto"
return 'variable="$(%s)" operator="%s" value="%s"' % \
(self.resultVar, operator, "0")
def negate(self):
self.isNegated = not self.isNegated
| axmachado/simplepos | simplepos/codegen/boolean.py | Python | gpl-3.0 | 15,374 |
from datetime import datetime, timedelta
from dateutil import tz
import numpy as np
import pandas as pd
from pandas import DataFrame, Index, Series, Timestamp, date_range
from pandas.util import testing as tm
class TestDatetimeIndex(object):
def test_setitem_with_datetime_tz(self):
# 16889
# support .loc with alignment and tz-aware DatetimeIndex
mask = np.array([True, False, True, False])
idx = date_range('20010101', periods=4, tz='UTC')
df = DataFrame({'a': np.arange(4)}, index=idx).astype('float64')
result = df.copy()
result.loc[mask, :] = df.loc[mask, :]
tm.assert_frame_equal(result, df)
result = df.copy()
result.loc[mask] = df.loc[mask]
tm.assert_frame_equal(result, df)
idx = date_range('20010101', periods=4)
df = DataFrame({'a': np.arange(4)}, index=idx).astype('float64')
result = df.copy()
result.loc[mask, :] = df.loc[mask, :]
tm.assert_frame_equal(result, df)
result = df.copy()
result.loc[mask] = df.loc[mask]
tm.assert_frame_equal(result, df)
def test_indexing_with_datetime_tz(self):
# 8260
# support datetime64 with tz
idx = Index(date_range('20130101', periods=3, tz='US/Eastern'),
name='foo')
dr = date_range('20130110', periods=3)
df = DataFrame({'A': idx, 'B': dr})
df['C'] = idx
df.iloc[1, 1] = pd.NaT
df.iloc[1, 2] = pd.NaT
# indexing
result = df.iloc[1]
expected = Series([Timestamp('2013-01-02 00:00:00-0500',
tz='US/Eastern'), np.nan, np.nan],
index=list('ABC'), dtype='object', name=1)
tm.assert_series_equal(result, expected)
result = df.loc[1]
expected = Series([Timestamp('2013-01-02 00:00:00-0500',
tz='US/Eastern'), np.nan, np.nan],
index=list('ABC'), dtype='object', name=1)
tm.assert_series_equal(result, expected)
# indexing - fast_xs
df = DataFrame({'a': date_range('2014-01-01', periods=10, tz='UTC')})
result = df.iloc[5]
expected = Timestamp('2014-01-06 00:00:00+0000', tz='UTC', freq='D')
assert result == expected
result = df.loc[5]
assert result == expected
# indexing - boolean
result = df[df.a > df.a[3]]
expected = df.iloc[4:]
tm.assert_frame_equal(result, expected)
# indexing - setting an element
df = DataFrame(data=pd.to_datetime(
['2015-03-30 20:12:32', '2015-03-12 00:11:11']), columns=['time'])
df['new_col'] = ['new', 'old']
df.time = df.set_index('time').index.tz_localize('UTC')
v = df[df.new_col == 'new'].set_index('time').index.tz_convert(
'US/Pacific')
# trying to set a single element on a part of a different timezone
# this converts to object
df2 = df.copy()
df2.loc[df2.new_col == 'new', 'time'] = v
expected = Series([v[0], df.loc[1, 'time']], name='time')
tm.assert_series_equal(df2.time, expected)
v = df.loc[df.new_col == 'new', 'time'] + pd.Timedelta('1s')
df.loc[df.new_col == 'new', 'time'] = v
tm.assert_series_equal(df.loc[df.new_col == 'new', 'time'], v)
def test_consistency_with_tz_aware_scalar(self):
# xef gh-12938
# various ways of indexing the same tz-aware scalar
df = Series([Timestamp('2016-03-30 14:35:25',
tz='Europe/Brussels')]).to_frame()
df = pd.concat([df, df]).reset_index(drop=True)
expected = Timestamp('2016-03-30 14:35:25+0200',
tz='Europe/Brussels')
result = df[0][0]
assert result == expected
result = df.iloc[0, 0]
assert result == expected
result = df.loc[0, 0]
assert result == expected
result = df.iat[0, 0]
assert result == expected
result = df.at[0, 0]
assert result == expected
result = df[0].loc[0]
assert result == expected
result = df[0].at[0]
assert result == expected
def test_indexing_with_datetimeindex_tz(self):
# GH 12050
# indexing on a series with a datetimeindex with tz
index = date_range('2015-01-01', periods=2, tz='utc')
ser = Series(range(2), index=index, dtype='int64')
# list-like indexing
for sel in (index, list(index)):
# getitem
tm.assert_series_equal(ser[sel], ser)
# setitem
result = ser.copy()
result[sel] = 1
expected = Series(1, index=index)
tm.assert_series_equal(result, expected)
# .loc getitem
tm.assert_series_equal(ser.loc[sel], ser)
# .loc setitem
result = ser.copy()
result.loc[sel] = 1
expected = Series(1, index=index)
tm.assert_series_equal(result, expected)
# single element indexing
# getitem
assert ser[index[1]] == 1
# setitem
result = ser.copy()
result[index[1]] = 5
expected = Series([0, 5], index=index)
tm.assert_series_equal(result, expected)
# .loc getitem
assert ser.loc[index[1]] == 1
# .loc setitem
result = ser.copy()
result.loc[index[1]] = 5
expected = Series([0, 5], index=index)
tm.assert_series_equal(result, expected)
def test_partial_setting_with_datetimelike_dtype(self):
# GH9478
# a datetimeindex alignment issue with partial setting
df = DataFrame(np.arange(6.).reshape(3, 2), columns=list('AB'),
index=date_range('1/1/2000', periods=3, freq='1H'))
expected = df.copy()
expected['C'] = [expected.index[0]] + [pd.NaT, pd.NaT]
mask = df.A < 1
df.loc[mask, 'C'] = df.loc[mask].index
tm.assert_frame_equal(df, expected)
def test_loc_setitem_datetime(self):
# GH 9516
dt1 = Timestamp('20130101 09:00:00')
dt2 = Timestamp('20130101 10:00:00')
for conv in [lambda x: x, lambda x: x.to_datetime64(),
lambda x: x.to_pydatetime(), lambda x: np.datetime64(x)]:
df = DataFrame()
df.loc[conv(dt1), 'one'] = 100
df.loc[conv(dt2), 'one'] = 200
expected = DataFrame({'one': [100.0, 200.0]}, index=[dt1, dt2])
tm.assert_frame_equal(df, expected)
def test_series_partial_set_datetime(self):
# GH 11497
idx = date_range('2011-01-01', '2011-01-02', freq='D', name='idx')
ser = Series([0.1, 0.2], index=idx, name='s')
result = ser.loc[[Timestamp('2011-01-01'), Timestamp('2011-01-02')]]
exp = Series([0.1, 0.2], index=idx, name='s')
tm.assert_series_equal(result, exp, check_index_type=True)
keys = [Timestamp('2011-01-02'), Timestamp('2011-01-02'),
Timestamp('2011-01-01')]
exp = Series([0.2, 0.2, 0.1], index=pd.DatetimeIndex(keys, name='idx'),
name='s')
tm.assert_series_equal(ser.loc[keys], exp, check_index_type=True)
keys = [Timestamp('2011-01-03'), Timestamp('2011-01-02'),
Timestamp('2011-01-03')]
exp = Series([np.nan, 0.2, np.nan],
index=pd.DatetimeIndex(keys, name='idx'), name='s')
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
tm.assert_series_equal(ser.loc[keys], exp, check_index_type=True)
def test_series_partial_set_period(self):
# GH 11497
idx = pd.period_range('2011-01-01', '2011-01-02', freq='D', name='idx')
ser = Series([0.1, 0.2], index=idx, name='s')
result = ser.loc[[pd.Period('2011-01-01', freq='D'),
pd.Period('2011-01-02', freq='D')]]
exp = Series([0.1, 0.2], index=idx, name='s')
tm.assert_series_equal(result, exp, check_index_type=True)
keys = [pd.Period('2011-01-02', freq='D'),
pd.Period('2011-01-02', freq='D'),
pd.Period('2011-01-01', freq='D')]
exp = Series([0.2, 0.2, 0.1], index=pd.PeriodIndex(keys, name='idx'),
name='s')
tm.assert_series_equal(ser.loc[keys], exp, check_index_type=True)
keys = [pd.Period('2011-01-03', freq='D'),
pd.Period('2011-01-02', freq='D'),
pd.Period('2011-01-03', freq='D')]
exp = Series([np.nan, 0.2, np.nan],
index=pd.PeriodIndex(keys, name='idx'), name='s')
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = ser.loc[keys]
tm.assert_series_equal(result, exp)
def test_nanosecond_getitem_setitem_with_tz(self):
# GH 11679
data = ['2016-06-28 08:30:00.123456789']
index = pd.DatetimeIndex(data, dtype='datetime64[ns, America/Chicago]')
df = DataFrame({'a': [10]}, index=index)
result = df.loc[df.index[0]]
expected = Series(10, index=['a'], name=df.index[0])
tm.assert_series_equal(result, expected)
result = df.copy()
result.loc[df.index[0], 'a'] = -1
expected = DataFrame(-1, index=index, columns=['a'])
tm.assert_frame_equal(result, expected)
def test_loc_getitem_across_dst(self):
# GH 21846
idx = pd.date_range('2017-10-29 01:30:00',
tz='Europe/Berlin', periods=5, freq='30 min')
series2 = pd.Series([0, 1, 2, 3, 4],
index=idx)
t_1 = pd.Timestamp('2017-10-29 02:30:00+02:00', tz='Europe/Berlin',
freq='30min')
t_2 = pd.Timestamp('2017-10-29 02:00:00+01:00', tz='Europe/Berlin',
freq='30min')
result = series2.loc[t_1:t_2]
expected = pd.Series([2, 3], index=idx[2:4])
tm.assert_series_equal(result, expected)
result = series2[t_1]
expected = 2
assert result == expected
def test_loc_incremental_setitem_with_dst(self):
# GH 20724
base = datetime(2015, 11, 1, tzinfo=tz.gettz("US/Pacific"))
idxs = [base + timedelta(seconds=i * 900) for i in range(16)]
result = pd.Series([0], index=[idxs[0]])
for ts in idxs:
result.loc[ts] = 1
expected = pd.Series(1, index=idxs)
tm.assert_series_equal(result, expected)
def test_loc_setitem_with_existing_dst(self):
# GH 18308
start = pd.Timestamp('2017-10-29 00:00:00+0200', tz='Europe/Madrid')
end = pd.Timestamp('2017-10-29 03:00:00+0100', tz='Europe/Madrid')
ts = pd.Timestamp('2016-10-10 03:00:00', tz='Europe/Madrid')
idx = pd.date_range(start, end, closed='left', freq="H")
result = pd.DataFrame(index=idx, columns=['value'])
result.loc[ts, 'value'] = 12
expected = pd.DataFrame([np.nan] * len(idx) + [12],
index=idx.append(pd.DatetimeIndex([ts])),
columns=['value'],
dtype=object)
tm.assert_frame_equal(result, expected)
| GuessWhoSamFoo/pandas | pandas/tests/indexing/test_datetime.py | Python | bsd-3-clause | 11,482 |
# -*- coding: utf-8 -*-
# pyLottoSimu,
# Copyright (C) <2012-2018> Markus Hackspacher
# This file is part of pyLottoSimu.
# pyLottoSimu is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# pyLottoSimu is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with pyLottoSimu. If not, see <http://www.gnu.org/licenses/>.
"""test case
"""
| MarkusHackspacher/pyLottoSimu | tests/__init__.py | Python | gpl-3.0 | 780 |
"""MolLib command line interface
"""
# Author: Justin L Lorieau
# Copyright 2016
import argparse
import logging
import sys
import os
import mollib
from mollib.utils import FormattedStr
from mollib.plugins import PluginManager
from mollib.core import list_global_settings, load_settings
import mollib.utils.settings
try:
import configparser
except ImportError:
import ConfigParser as configparser
def list_plugins(plugin_manager):
"Prints a list of the installed plugins."
print('Installed plugins:')
for plugin in plugin_manager.plugins():
msg = '\t{:<15} '.format(plugin.name)
enabled = (FormattedStr('Enabled', 'green') if plugin.enabled else
FormattedStr('Not Enabled', 'red'))
print(msg + enabled)
def list_settings():
"Prints a list of the installed setting sections."
print('Installed settings sections:')
for section in list_global_settings():
msg = '\t[{}]'.format(section)
print(msg)
def main():
# Load the argument parser and subparsers
parser = argparse.ArgumentParser(prog='mollib',
description='A molecular processor')
subparsers = parser.add_subparsers(title='commands', dest='command',
metavar='')
# Logging levels
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument('-d', '--debug',
action="store_const", dest="loglevel",
const=logging.DEBUG,
default=logging.WARNING,
help="Print debugging statements",)
group.add_argument('-s', '--suppress',
action="store_const", dest="loglevel",
const=logging.CRITICAL,
default=logging.WARNING,
help="Suppress all messages, except critical", )
group.add_argument('-v', '--verbose',
action="store_const", dest="loglevel",
const=logging.INFO,
help="Print extra information")
# Version information and other installation information
parser.add_argument('--list-plugins',
action='store_true',
help='List the installed plugins')
parser.add_argument('--list-settings',
action='store_true',
help='List the available sections for settings')
parser.add_argument('--version', action='version',
version=('%(prog)s ' + mollib.__version__.__version__),
help='Show the program version')
# Load the plugins
plugin_manager = PluginManager(parser=parser, subparser=subparsers)
parser = plugin_manager.process_parsers()
# process the --list-settings and --list_plugins options
if '--list-plugins' in sys.argv:
list_plugins(plugin_manager)
exit()
if '--list-settings' in sys.argv:
list_settings()
exit()
# Parse the commands
args = parser.parse_args()
# Set special flags that need to be set before processing molecules
if getattr(args, 'save', False):
mollib.utils.settings.save_fetched_files_locally = True
# Setup the logger
fmt = '{}: %(levelname)-8s %(message)s'.format('mollib')
logging.basicConfig(format=fmt, level=args.loglevel)
logging.debug(args)
# Read in the configuration file(s)
config_files = [os.path.expanduser('~/.mollibrc'), ]
if args.config:
config_files.append(args.config[0])
config = configparser.ConfigParser()
config.read(config_files)
load_settings(config)
# Load the molecules
if args.models:
molecules = []
mr = mollib.MoleculeReader()
model_ids = args.models
for identifier in args.i[0]:
molecules += mr.read(identifier, model_ids=model_ids)
else:
molecules = [mollib.Molecule(identifier) for identifier in args.i[0]]
# Find the relevant plugins to execute
active_plugins = plugin_manager.plugins()
# Pre-process the molecules
for plugin in active_plugins:
logging.debug('Preprocessing:{}'.format(plugin))
plugin.preprocess(molecules, args)
# Process the molecules
for plugin in active_plugins:
logging.debug('Processing: {}'.format(plugin))
plugin.process(molecules, args)
# Post-process the molecules
for plugin in active_plugins:
logging.debug('Post-rocessing: {}'.format(plugin))
plugin.postprocess(molecules, args)
if __name__ == "__main__":
main() | jlorieau/mollib | mollib/__main__.py | Python | gpl-3.0 | 4,658 |
import sys
import operator
import pytest
import ctypes
import gc
import types
from typing import Any
import numpy as np
from numpy.core._rational_tests import rational
from numpy.core._multiarray_tests import create_custom_field_dtype
from numpy.testing import (
assert_, assert_equal, assert_array_equal, assert_raises, HAS_REFCOUNT,
IS_PYSTON)
from numpy.compat import pickle
from itertools import permutations
import random
import hypothesis
from hypothesis.extra import numpy as hynp
def assert_dtype_equal(a, b):
assert_equal(a, b)
assert_equal(hash(a), hash(b),
"two equivalent types do not hash to the same value !")
def assert_dtype_not_equal(a, b):
assert_(a != b)
assert_(hash(a) != hash(b),
"two different types hash to the same value !")
class TestBuiltin:
@pytest.mark.parametrize('t', [int, float, complex, np.int32, str, object,
np.compat.unicode])
def test_run(self, t):
"""Only test hash runs at all."""
dt = np.dtype(t)
hash(dt)
@pytest.mark.parametrize('t', [int, float])
def test_dtype(self, t):
# Make sure equivalent byte order char hash the same (e.g. < and = on
# little endian)
dt = np.dtype(t)
dt2 = dt.newbyteorder("<")
dt3 = dt.newbyteorder(">")
if dt == dt2:
assert_(dt.byteorder != dt2.byteorder, "bogus test")
assert_dtype_equal(dt, dt2)
else:
assert_(dt.byteorder != dt3.byteorder, "bogus test")
assert_dtype_equal(dt, dt3)
def test_equivalent_dtype_hashing(self):
# Make sure equivalent dtypes with different type num hash equal
uintp = np.dtype(np.uintp)
if uintp.itemsize == 4:
left = uintp
right = np.dtype(np.uint32)
else:
left = uintp
right = np.dtype(np.ulonglong)
assert_(left == right)
assert_(hash(left) == hash(right))
def test_invalid_types(self):
# Make sure invalid type strings raise an error
assert_raises(TypeError, np.dtype, 'O3')
assert_raises(TypeError, np.dtype, 'O5')
assert_raises(TypeError, np.dtype, 'O7')
assert_raises(TypeError, np.dtype, 'b3')
assert_raises(TypeError, np.dtype, 'h4')
assert_raises(TypeError, np.dtype, 'I5')
assert_raises(TypeError, np.dtype, 'e3')
assert_raises(TypeError, np.dtype, 'f5')
if np.dtype('g').itemsize == 8 or np.dtype('g').itemsize == 16:
assert_raises(TypeError, np.dtype, 'g12')
elif np.dtype('g').itemsize == 12:
assert_raises(TypeError, np.dtype, 'g16')
if np.dtype('l').itemsize == 8:
assert_raises(TypeError, np.dtype, 'l4')
assert_raises(TypeError, np.dtype, 'L4')
else:
assert_raises(TypeError, np.dtype, 'l8')
assert_raises(TypeError, np.dtype, 'L8')
if np.dtype('q').itemsize == 8:
assert_raises(TypeError, np.dtype, 'q4')
assert_raises(TypeError, np.dtype, 'Q4')
else:
assert_raises(TypeError, np.dtype, 'q8')
assert_raises(TypeError, np.dtype, 'Q8')
def test_richcompare_invalid_dtype_equality(self):
# Make sure objects that cannot be converted to valid
# dtypes results in False/True when compared to valid dtypes.
# Here 7 cannot be converted to dtype. No exceptions should be raised
assert not np.dtype(np.int32) == 7, "dtype richcompare failed for =="
assert np.dtype(np.int32) != 7, "dtype richcompare failed for !="
@pytest.mark.parametrize(
'operation',
[operator.le, operator.lt, operator.ge, operator.gt])
def test_richcompare_invalid_dtype_comparison(self, operation):
# Make sure TypeError is raised for comparison operators
# for invalid dtypes. Here 7 is an invalid dtype.
with pytest.raises(TypeError):
operation(np.dtype(np.int32), 7)
@pytest.mark.parametrize("dtype",
['Bool', 'Bytes0', 'Complex32', 'Complex64',
'Datetime64', 'Float16', 'Float32', 'Float64',
'Int8', 'Int16', 'Int32', 'Int64',
'Object0', 'Str0', 'Timedelta64',
'UInt8', 'UInt16', 'Uint32', 'UInt32',
'Uint64', 'UInt64', 'Void0',
"Float128", "Complex128"])
def test_numeric_style_types_are_invalid(self, dtype):
with assert_raises(TypeError):
np.dtype(dtype)
@pytest.mark.parametrize(
'value',
['m8', 'M8', 'datetime64', 'timedelta64',
'i4, (2,3)f8, f4', 'a3, 3u8, (3,4)a10',
'>f', '<f', '=f', '|f',
])
def test_dtype_bytes_str_equivalence(self, value):
bytes_value = value.encode('ascii')
from_bytes = np.dtype(bytes_value)
from_str = np.dtype(value)
assert_dtype_equal(from_bytes, from_str)
def test_dtype_from_bytes(self):
# Empty bytes object
assert_raises(TypeError, np.dtype, b'')
# Byte order indicator, but no type
assert_raises(TypeError, np.dtype, b'|')
# Single character with ordinal < NPY_NTYPES returns
# type by index into _builtin_descrs
assert_dtype_equal(np.dtype(bytes([0])), np.dtype('bool'))
assert_dtype_equal(np.dtype(bytes([17])), np.dtype(object))
# Single character where value is a valid type code
assert_dtype_equal(np.dtype(b'f'), np.dtype('float32'))
# Bytes with non-ascii values raise errors
assert_raises(TypeError, np.dtype, b'\xff')
assert_raises(TypeError, np.dtype, b's\xff')
def test_bad_param(self):
# Can't give a size that's too small
assert_raises(ValueError, np.dtype,
{'names':['f0', 'f1'],
'formats':['i4', 'i1'],
'offsets':[0, 4],
'itemsize':4})
# If alignment is enabled, the alignment (4) must divide the itemsize
assert_raises(ValueError, np.dtype,
{'names':['f0', 'f1'],
'formats':['i4', 'i1'],
'offsets':[0, 4],
'itemsize':9}, align=True)
# If alignment is enabled, the individual fields must be aligned
assert_raises(ValueError, np.dtype,
{'names':['f0', 'f1'],
'formats':['i1', 'f4'],
'offsets':[0, 2]}, align=True)
def test_field_order_equality(self):
x = np.dtype({'names': ['A', 'B'],
'formats': ['i4', 'f4'],
'offsets': [0, 4]})
y = np.dtype({'names': ['B', 'A'],
'formats': ['f4', 'i4'],
'offsets': [4, 0]})
assert_equal(x == y, False)
# But it is currently an equivalent cast:
assert np.can_cast(x, y, casting="equiv")
class TestRecord:
def test_equivalent_record(self):
"""Test whether equivalent record dtypes hash the same."""
a = np.dtype([('yo', int)])
b = np.dtype([('yo', int)])
assert_dtype_equal(a, b)
def test_different_names(self):
# In theory, they may hash the same (collision) ?
a = np.dtype([('yo', int)])
b = np.dtype([('ye', int)])
assert_dtype_not_equal(a, b)
def test_different_titles(self):
# In theory, they may hash the same (collision) ?
a = np.dtype({'names': ['r', 'b'],
'formats': ['u1', 'u1'],
'titles': ['Red pixel', 'Blue pixel']})
b = np.dtype({'names': ['r', 'b'],
'formats': ['u1', 'u1'],
'titles': ['RRed pixel', 'Blue pixel']})
assert_dtype_not_equal(a, b)
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_refcount_dictionary_setting(self):
names = ["name1"]
formats = ["f8"]
titles = ["t1"]
offsets = [0]
d = dict(names=names, formats=formats, titles=titles, offsets=offsets)
refcounts = {k: sys.getrefcount(i) for k, i in d.items()}
np.dtype(d)
refcounts_new = {k: sys.getrefcount(i) for k, i in d.items()}
assert refcounts == refcounts_new
def test_mutate(self):
# Mutating a dtype should reset the cached hash value
a = np.dtype([('yo', int)])
b = np.dtype([('yo', int)])
c = np.dtype([('ye', int)])
assert_dtype_equal(a, b)
assert_dtype_not_equal(a, c)
a.names = ['ye']
assert_dtype_equal(a, c)
assert_dtype_not_equal(a, b)
state = b.__reduce__()[2]
a.__setstate__(state)
assert_dtype_equal(a, b)
assert_dtype_not_equal(a, c)
def test_not_lists(self):
"""Test if an appropriate exception is raised when passing bad values to
the dtype constructor.
"""
assert_raises(TypeError, np.dtype,
dict(names={'A', 'B'}, formats=['f8', 'i4']))
assert_raises(TypeError, np.dtype,
dict(names=['A', 'B'], formats={'f8', 'i4'}))
def test_aligned_size(self):
# Check that structured dtypes get padded to an aligned size
dt = np.dtype('i4, i1', align=True)
assert_equal(dt.itemsize, 8)
dt = np.dtype([('f0', 'i4'), ('f1', 'i1')], align=True)
assert_equal(dt.itemsize, 8)
dt = np.dtype({'names':['f0', 'f1'],
'formats':['i4', 'u1'],
'offsets':[0, 4]}, align=True)
assert_equal(dt.itemsize, 8)
dt = np.dtype({'f0': ('i4', 0), 'f1':('u1', 4)}, align=True)
assert_equal(dt.itemsize, 8)
# Nesting should preserve that alignment
dt1 = np.dtype([('f0', 'i4'),
('f1', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')]),
('f2', 'i1')], align=True)
assert_equal(dt1.itemsize, 20)
dt2 = np.dtype({'names':['f0', 'f1', 'f2'],
'formats':['i4',
[('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')],
'i1'],
'offsets':[0, 4, 16]}, align=True)
assert_equal(dt2.itemsize, 20)
dt3 = np.dtype({'f0': ('i4', 0),
'f1': ([('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 4),
'f2': ('i1', 16)}, align=True)
assert_equal(dt3.itemsize, 20)
assert_equal(dt1, dt2)
assert_equal(dt2, dt3)
# Nesting should preserve packing
dt1 = np.dtype([('f0', 'i4'),
('f1', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')]),
('f2', 'i1')], align=False)
assert_equal(dt1.itemsize, 11)
dt2 = np.dtype({'names':['f0', 'f1', 'f2'],
'formats':['i4',
[('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')],
'i1'],
'offsets':[0, 4, 10]}, align=False)
assert_equal(dt2.itemsize, 11)
dt3 = np.dtype({'f0': ('i4', 0),
'f1': ([('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 4),
'f2': ('i1', 10)}, align=False)
assert_equal(dt3.itemsize, 11)
assert_equal(dt1, dt2)
assert_equal(dt2, dt3)
# Array of subtype should preserve alignment
dt1 = np.dtype([('a', '|i1'),
('b', [('f0', '<i2'),
('f1', '<f4')], 2)], align=True)
assert_equal(dt1.descr, [('a', '|i1'), ('', '|V3'),
('b', [('f0', '<i2'), ('', '|V2'),
('f1', '<f4')], (2,))])
def test_union_struct(self):
# Should be able to create union dtypes
dt = np.dtype({'names':['f0', 'f1', 'f2'], 'formats':['<u4', '<u2', '<u2'],
'offsets':[0, 0, 2]}, align=True)
assert_equal(dt.itemsize, 4)
a = np.array([3], dtype='<u4').view(dt)
a['f1'] = 10
a['f2'] = 36
assert_equal(a['f0'], 10 + 36*256*256)
# Should be able to specify fields out of order
dt = np.dtype({'names':['f0', 'f1', 'f2'], 'formats':['<u4', '<u2', '<u2'],
'offsets':[4, 0, 2]}, align=True)
assert_equal(dt.itemsize, 8)
# field name should not matter: assignment is by position
dt2 = np.dtype({'names':['f2', 'f0', 'f1'],
'formats':['<u4', '<u2', '<u2'],
'offsets':[4, 0, 2]}, align=True)
vals = [(0, 1, 2), (3, -1, 4)]
vals2 = [(0, 1, 2), (3, -1, 4)]
a = np.array(vals, dt)
b = np.array(vals2, dt2)
assert_equal(a.astype(dt2), b)
assert_equal(b.astype(dt), a)
assert_equal(a.view(dt2), b)
assert_equal(b.view(dt), a)
# Should not be able to overlap objects with other types
assert_raises(TypeError, np.dtype,
{'names':['f0', 'f1'],
'formats':['O', 'i1'],
'offsets':[0, 2]})
assert_raises(TypeError, np.dtype,
{'names':['f0', 'f1'],
'formats':['i4', 'O'],
'offsets':[0, 3]})
assert_raises(TypeError, np.dtype,
{'names':['f0', 'f1'],
'formats':[[('a', 'O')], 'i1'],
'offsets':[0, 2]})
assert_raises(TypeError, np.dtype,
{'names':['f0', 'f1'],
'formats':['i4', [('a', 'O')]],
'offsets':[0, 3]})
# Out of order should still be ok, however
dt = np.dtype({'names':['f0', 'f1'],
'formats':['i1', 'O'],
'offsets':[np.dtype('intp').itemsize, 0]})
@pytest.mark.parametrize(["obj", "dtype", "expected"],
[([], ("(2)f4,"), np.empty((0, 2), dtype="f4")),
(3, "(3)f4,", [3, 3, 3]),
(np.float64(2), "(2)f4,", [2, 2]),
([((0, 1), (1, 2)), ((2,),)], '(2,2)f4', None),
(["1", "2"], "(2)i,", None)])
def test_subarray_list(self, obj, dtype, expected):
dtype = np.dtype(dtype)
res = np.array(obj, dtype=dtype)
if expected is None:
# iterate the 1-d list to fill the array
expected = np.empty(len(obj), dtype=dtype)
for i in range(len(expected)):
expected[i] = obj[i]
assert_array_equal(res, expected)
def test_comma_datetime(self):
dt = np.dtype('M8[D],datetime64[Y],i8')
assert_equal(dt, np.dtype([('f0', 'M8[D]'),
('f1', 'datetime64[Y]'),
('f2', 'i8')]))
def test_from_dictproxy(self):
# Tests for PR #5920
dt = np.dtype({'names': ['a', 'b'], 'formats': ['i4', 'f4']})
assert_dtype_equal(dt, np.dtype(dt.fields))
dt2 = np.dtype((np.void, dt.fields))
assert_equal(dt2.fields, dt.fields)
def test_from_dict_with_zero_width_field(self):
# Regression test for #6430 / #2196
dt = np.dtype([('val1', np.float32, (0,)), ('val2', int)])
dt2 = np.dtype({'names': ['val1', 'val2'],
'formats': [(np.float32, (0,)), int]})
assert_dtype_equal(dt, dt2)
assert_equal(dt.fields['val1'][0].itemsize, 0)
assert_equal(dt.itemsize, dt.fields['val2'][0].itemsize)
def test_bool_commastring(self):
d = np.dtype('?,?,?') # raises?
assert_equal(len(d.names), 3)
for n in d.names:
assert_equal(d.fields[n][0], np.dtype('?'))
def test_nonint_offsets(self):
# gh-8059
def make_dtype(off):
return np.dtype({'names': ['A'], 'formats': ['i4'],
'offsets': [off]})
assert_raises(TypeError, make_dtype, 'ASD')
assert_raises(OverflowError, make_dtype, 2**70)
assert_raises(TypeError, make_dtype, 2.3)
assert_raises(ValueError, make_dtype, -10)
# no errors here:
dt = make_dtype(np.uint32(0))
np.zeros(1, dtype=dt)[0].item()
def test_fields_by_index(self):
dt = np.dtype([('a', np.int8), ('b', np.float32, 3)])
assert_dtype_equal(dt[0], np.dtype(np.int8))
assert_dtype_equal(dt[1], np.dtype((np.float32, 3)))
assert_dtype_equal(dt[-1], dt[1])
assert_dtype_equal(dt[-2], dt[0])
assert_raises(IndexError, lambda: dt[-3])
assert_raises(TypeError, operator.getitem, dt, 3.0)
assert_equal(dt[1], dt[np.int8(1)])
@pytest.mark.parametrize('align_flag',[False, True])
def test_multifield_index(self, align_flag):
# indexing with a list produces subfields
# the align flag should be preserved
dt = np.dtype([
(('title', 'col1'), '<U20'), ('A', '<f8'), ('B', '<f8')
], align=align_flag)
dt_sub = dt[['B', 'col1']]
assert_equal(
dt_sub,
np.dtype({
'names': ['B', 'col1'],
'formats': ['<f8', '<U20'],
'offsets': [88, 0],
'titles': [None, 'title'],
'itemsize': 96
})
)
assert_equal(dt_sub.isalignedstruct, align_flag)
dt_sub = dt[['B']]
assert_equal(
dt_sub,
np.dtype({
'names': ['B'],
'formats': ['<f8'],
'offsets': [88],
'itemsize': 96
})
)
assert_equal(dt_sub.isalignedstruct, align_flag)
dt_sub = dt[[]]
assert_equal(
dt_sub,
np.dtype({
'names': [],
'formats': [],
'offsets': [],
'itemsize': 96
})
)
assert_equal(dt_sub.isalignedstruct, align_flag)
assert_raises(TypeError, operator.getitem, dt, ())
assert_raises(TypeError, operator.getitem, dt, [1, 2, 3])
assert_raises(TypeError, operator.getitem, dt, ['col1', 2])
assert_raises(KeyError, operator.getitem, dt, ['fake'])
assert_raises(KeyError, operator.getitem, dt, ['title'])
assert_raises(ValueError, operator.getitem, dt, ['col1', 'col1'])
def test_partial_dict(self):
# 'names' is missing
assert_raises(ValueError, np.dtype,
{'formats': ['i4', 'i4'], 'f0': ('i4', 0), 'f1':('i4', 4)})
def test_fieldless_views(self):
a = np.zeros(2, dtype={'names':[], 'formats':[], 'offsets':[],
'itemsize':8})
assert_raises(ValueError, a.view, np.dtype([]))
d = np.dtype((np.dtype([]), 10))
assert_equal(d.shape, (10,))
assert_equal(d.itemsize, 0)
assert_equal(d.base, np.dtype([]))
arr = np.fromiter((() for i in range(10)), [])
assert_equal(arr.dtype, np.dtype([]))
assert_raises(ValueError, np.frombuffer, b'', dtype=[])
assert_equal(np.frombuffer(b'', dtype=[], count=2),
np.empty(2, dtype=[]))
assert_raises(ValueError, np.dtype, ([], 'f8'))
assert_raises(ValueError, np.zeros(1, dtype='i4').view, [])
assert_equal(np.zeros(2, dtype=[]) == np.zeros(2, dtype=[]),
np.ones(2, dtype=bool))
assert_equal(np.zeros((1, 2), dtype=[]) == a,
np.ones((1, 2), dtype=bool))
class TestSubarray:
def test_single_subarray(self):
a = np.dtype((int, (2)))
b = np.dtype((int, (2,)))
assert_dtype_equal(a, b)
assert_equal(type(a.subdtype[1]), tuple)
assert_equal(type(b.subdtype[1]), tuple)
def test_equivalent_record(self):
"""Test whether equivalent subarray dtypes hash the same."""
a = np.dtype((int, (2, 3)))
b = np.dtype((int, (2, 3)))
assert_dtype_equal(a, b)
def test_nonequivalent_record(self):
"""Test whether different subarray dtypes hash differently."""
a = np.dtype((int, (2, 3)))
b = np.dtype((int, (3, 2)))
assert_dtype_not_equal(a, b)
a = np.dtype((int, (2, 3)))
b = np.dtype((int, (2, 2)))
assert_dtype_not_equal(a, b)
a = np.dtype((int, (1, 2, 3)))
b = np.dtype((int, (1, 2)))
assert_dtype_not_equal(a, b)
def test_shape_equal(self):
"""Test some data types that are equal"""
assert_dtype_equal(np.dtype('f8'), np.dtype(('f8', tuple())))
# FutureWarning during deprecation period; after it is passed this
# should instead check that "(1)f8" == "1f8" == ("f8", 1).
with pytest.warns(FutureWarning):
assert_dtype_equal(np.dtype('f8'), np.dtype(('f8', 1)))
assert_dtype_equal(np.dtype((int, 2)), np.dtype((int, (2,))))
assert_dtype_equal(np.dtype(('<f4', (3, 2))), np.dtype(('<f4', (3, 2))))
d = ([('a', 'f4', (1, 2)), ('b', 'f8', (3, 1))], (3, 2))
assert_dtype_equal(np.dtype(d), np.dtype(d))
def test_shape_simple(self):
"""Test some simple cases that shouldn't be equal"""
assert_dtype_not_equal(np.dtype('f8'), np.dtype(('f8', (1,))))
assert_dtype_not_equal(np.dtype(('f8', (1,))), np.dtype(('f8', (1, 1))))
assert_dtype_not_equal(np.dtype(('f4', (3, 2))), np.dtype(('f4', (2, 3))))
def test_shape_monster(self):
"""Test some more complicated cases that shouldn't be equal"""
assert_dtype_not_equal(
np.dtype(([('a', 'f4', (2, 1)), ('b', 'f8', (1, 3))], (2, 2))),
np.dtype(([('a', 'f4', (1, 2)), ('b', 'f8', (1, 3))], (2, 2))))
assert_dtype_not_equal(
np.dtype(([('a', 'f4', (2, 1)), ('b', 'f8', (1, 3))], (2, 2))),
np.dtype(([('a', 'f4', (2, 1)), ('b', 'i8', (1, 3))], (2, 2))))
assert_dtype_not_equal(
np.dtype(([('a', 'f4', (2, 1)), ('b', 'f8', (1, 3))], (2, 2))),
np.dtype(([('e', 'f8', (1, 3)), ('d', 'f4', (2, 1))], (2, 2))))
assert_dtype_not_equal(
np.dtype(([('a', [('a', 'i4', 6)], (2, 1)), ('b', 'f8', (1, 3))], (2, 2))),
np.dtype(([('a', [('a', 'u4', 6)], (2, 1)), ('b', 'f8', (1, 3))], (2, 2))))
def test_shape_sequence(self):
# Any sequence of integers should work as shape, but the result
# should be a tuple (immutable) of base type integers.
a = np.array([1, 2, 3], dtype=np.int16)
l = [1, 2, 3]
# Array gets converted
dt = np.dtype([('a', 'f4', a)])
assert_(isinstance(dt['a'].shape, tuple))
assert_(isinstance(dt['a'].shape[0], int))
# List gets converted
dt = np.dtype([('a', 'f4', l)])
assert_(isinstance(dt['a'].shape, tuple))
#
class IntLike:
def __index__(self):
return 3
def __int__(self):
# (a PyNumber_Check fails without __int__)
return 3
dt = np.dtype([('a', 'f4', IntLike())])
assert_(isinstance(dt['a'].shape, tuple))
assert_(isinstance(dt['a'].shape[0], int))
dt = np.dtype([('a', 'f4', (IntLike(),))])
assert_(isinstance(dt['a'].shape, tuple))
assert_(isinstance(dt['a'].shape[0], int))
def test_shape_matches_ndim(self):
dt = np.dtype([('a', 'f4', ())])
assert_equal(dt['a'].shape, ())
assert_equal(dt['a'].ndim, 0)
dt = np.dtype([('a', 'f4')])
assert_equal(dt['a'].shape, ())
assert_equal(dt['a'].ndim, 0)
dt = np.dtype([('a', 'f4', 4)])
assert_equal(dt['a'].shape, (4,))
assert_equal(dt['a'].ndim, 1)
dt = np.dtype([('a', 'f4', (1, 2, 3))])
assert_equal(dt['a'].shape, (1, 2, 3))
assert_equal(dt['a'].ndim, 3)
def test_shape_invalid(self):
# Check that the shape is valid.
max_int = np.iinfo(np.intc).max
max_intp = np.iinfo(np.intp).max
# Too large values (the datatype is part of this)
assert_raises(ValueError, np.dtype, [('a', 'f4', max_int // 4 + 1)])
assert_raises(ValueError, np.dtype, [('a', 'f4', max_int + 1)])
assert_raises(ValueError, np.dtype, [('a', 'f4', (max_int, 2))])
# Takes a different code path (fails earlier:
assert_raises(ValueError, np.dtype, [('a', 'f4', max_intp + 1)])
# Negative values
assert_raises(ValueError, np.dtype, [('a', 'f4', -1)])
assert_raises(ValueError, np.dtype, [('a', 'f4', (-1, -1))])
def test_alignment(self):
#Check that subarrays are aligned
t1 = np.dtype('(1,)i4', align=True)
t2 = np.dtype('2i4', align=True)
assert_equal(t1.alignment, t2.alignment)
def test_aligned_empty(self):
# Mainly regression test for gh-19696: construction failed completely
dt = np.dtype([], align=True)
assert dt == np.dtype([])
dt = np.dtype({"names": [], "formats": [], "itemsize": 0}, align=True)
assert dt == np.dtype([])
def iter_struct_object_dtypes():
"""
Iterates over a few complex dtypes and object pattern which
fill the array with a given object (defaults to a singleton).
Yields
------
dtype : dtype
pattern : tuple
Structured tuple for use with `np.array`.
count : int
Number of objects stored in the dtype.
singleton : object
A singleton object. The returned pattern is constructed so that
all objects inside the datatype are set to the singleton.
"""
obj = object()
dt = np.dtype([('b', 'O', (2, 3))])
p = ([[obj] * 3] * 2,)
yield pytest.param(dt, p, 6, obj, id="<subarray>")
dt = np.dtype([('a', 'i4'), ('b', 'O', (2, 3))])
p = (0, [[obj] * 3] * 2)
yield pytest.param(dt, p, 6, obj, id="<subarray in field>")
dt = np.dtype([('a', 'i4'),
('b', [('ba', 'O'), ('bb', 'i1')], (2, 3))])
p = (0, [[(obj, 0)] * 3] * 2)
yield pytest.param(dt, p, 6, obj, id="<structured subarray 1>")
dt = np.dtype([('a', 'i4'),
('b', [('ba', 'O'), ('bb', 'O')], (2, 3))])
p = (0, [[(obj, obj)] * 3] * 2)
yield pytest.param(dt, p, 12, obj, id="<structured subarray 2>")
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
class TestStructuredObjectRefcounting:
"""These tests cover various uses of complicated structured types which
include objects and thus require reference counting.
"""
@pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],
iter_struct_object_dtypes())
@pytest.mark.parametrize(["creation_func", "creation_obj"], [
pytest.param(np.empty, None,
# None is probably used for too many things
marks=pytest.mark.skip("unreliable due to python's behaviour")),
(np.ones, 1),
(np.zeros, 0)])
def test_structured_object_create_delete(self, dt, pat, count, singleton,
creation_func, creation_obj):
"""Structured object reference counting in creation and deletion"""
# The test assumes that 0, 1, and None are singletons.
gc.collect()
before = sys.getrefcount(creation_obj)
arr = creation_func(3, dt)
now = sys.getrefcount(creation_obj)
assert now - before == count * 3
del arr
now = sys.getrefcount(creation_obj)
assert now == before
@pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],
iter_struct_object_dtypes())
def test_structured_object_item_setting(self, dt, pat, count, singleton):
"""Structured object reference counting for simple item setting"""
one = 1
gc.collect()
before = sys.getrefcount(singleton)
arr = np.array([pat] * 3, dt)
assert sys.getrefcount(singleton) - before == count * 3
# Fill with `1` and check that it was replaced correctly:
before2 = sys.getrefcount(one)
arr[...] = one
after2 = sys.getrefcount(one)
assert after2 - before2 == count * 3
del arr
gc.collect()
assert sys.getrefcount(one) == before2
assert sys.getrefcount(singleton) == before
@pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],
iter_struct_object_dtypes())
@pytest.mark.parametrize(
['shape', 'index', 'items_changed'],
[((3,), ([0, 2],), 2),
((3, 2), ([0, 2], slice(None)), 4),
((3, 2), ([0, 2], [1]), 2),
((3,), ([True, False, True]), 2)])
def test_structured_object_indexing(self, shape, index, items_changed,
dt, pat, count, singleton):
"""Structured object reference counting for advanced indexing."""
# Use two small negative values (should be singletons, but less likely
# to run into race-conditions). This failed in some threaded envs
# When using 0 and 1. If it fails again, should remove all explicit
# checks, and rely on `pytest-leaks` reference count checker only.
val0 = -4
val1 = -5
arr = np.full(shape, val0, dt)
gc.collect()
before_val0 = sys.getrefcount(val0)
before_val1 = sys.getrefcount(val1)
# Test item getting:
part = arr[index]
after_val0 = sys.getrefcount(val0)
assert after_val0 - before_val0 == count * items_changed
del part
# Test item setting:
arr[index] = val1
gc.collect()
after_val0 = sys.getrefcount(val0)
after_val1 = sys.getrefcount(val1)
assert before_val0 - after_val0 == count * items_changed
assert after_val1 - before_val1 == count * items_changed
@pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],
iter_struct_object_dtypes())
def test_structured_object_take_and_repeat(self, dt, pat, count, singleton):
"""Structured object reference counting for specialized functions.
The older functions such as take and repeat use different code paths
then item setting (when writing this).
"""
indices = [0, 1]
arr = np.array([pat] * 3, dt)
gc.collect()
before = sys.getrefcount(singleton)
res = arr.take(indices)
after = sys.getrefcount(singleton)
assert after - before == count * 2
new = res.repeat(10)
gc.collect()
after_repeat = sys.getrefcount(singleton)
assert after_repeat - after == count * 2 * 10
class TestStructuredDtypeSparseFields:
"""Tests subarray fields which contain sparse dtypes so that
not all memory is used by the dtype work. Such dtype's should
leave the underlying memory unchanged.
"""
dtype = np.dtype([('a', {'names':['aa', 'ab'], 'formats':['f', 'f'],
'offsets':[0, 4]}, (2, 3))])
sparse_dtype = np.dtype([('a', {'names':['ab'], 'formats':['f'],
'offsets':[4]}, (2, 3))])
def test_sparse_field_assignment(self):
arr = np.zeros(3, self.dtype)
sparse_arr = arr.view(self.sparse_dtype)
sparse_arr[...] = np.finfo(np.float32).max
# dtype is reduced when accessing the field, so shape is (3, 2, 3):
assert_array_equal(arr["a"]["aa"], np.zeros((3, 2, 3)))
def test_sparse_field_assignment_fancy(self):
# Fancy assignment goes to the copyswap function for complex types:
arr = np.zeros(3, self.dtype)
sparse_arr = arr.view(self.sparse_dtype)
sparse_arr[[0, 1, 2]] = np.finfo(np.float32).max
# dtype is reduced when accessing the field, so shape is (3, 2, 3):
assert_array_equal(arr["a"]["aa"], np.zeros((3, 2, 3)))
class TestMonsterType:
"""Test deeply nested subtypes."""
def test1(self):
simple1 = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'],
'titles': ['Red pixel', 'Blue pixel']})
a = np.dtype([('yo', int), ('ye', simple1),
('yi', np.dtype((int, (3, 2))))])
b = np.dtype([('yo', int), ('ye', simple1),
('yi', np.dtype((int, (3, 2))))])
assert_dtype_equal(a, b)
c = np.dtype([('yo', int), ('ye', simple1),
('yi', np.dtype((a, (3, 2))))])
d = np.dtype([('yo', int), ('ye', simple1),
('yi', np.dtype((a, (3, 2))))])
assert_dtype_equal(c, d)
@pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking")
def test_list_recursion(self):
l = list()
l.append(('f', l))
with pytest.raises(RecursionError):
np.dtype(l)
@pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking")
def test_tuple_recursion(self):
d = np.int32
for i in range(100000):
d = (d, (1,))
with pytest.raises(RecursionError):
np.dtype(d)
@pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking")
def test_dict_recursion(self):
d = dict(names=['self'], formats=[None], offsets=[0])
d['formats'][0] = d
with pytest.raises(RecursionError):
np.dtype(d)
class TestMetadata:
def test_no_metadata(self):
d = np.dtype(int)
assert_(d.metadata is None)
def test_metadata_takes_dict(self):
d = np.dtype(int, metadata={'datum': 1})
assert_(d.metadata == {'datum': 1})
def test_metadata_rejects_nondict(self):
assert_raises(TypeError, np.dtype, int, metadata='datum')
assert_raises(TypeError, np.dtype, int, metadata=1)
assert_raises(TypeError, np.dtype, int, metadata=None)
def test_nested_metadata(self):
d = np.dtype([('a', np.dtype(int, metadata={'datum': 1}))])
assert_(d['a'].metadata == {'datum': 1})
def test_base_metadata_copied(self):
d = np.dtype((np.void, np.dtype('i4,i4', metadata={'datum': 1})))
assert_(d.metadata == {'datum': 1})
class TestString:
def test_complex_dtype_str(self):
dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),
('rtile', '>f4', (64, 36))], (3,)),
('bottom', [('bleft', ('>f4', (8, 64)), (1,)),
('bright', '>f4', (8, 36))])])
assert_equal(str(dt),
"[('top', [('tiles', ('>f4', (64, 64)), (1,)), "
"('rtile', '>f4', (64, 36))], (3,)), "
"('bottom', [('bleft', ('>f4', (8, 64)), (1,)), "
"('bright', '>f4', (8, 36))])]")
# If the sticky aligned flag is set to True, it makes the
# str() function use a dict representation with an 'aligned' flag
dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),
('rtile', '>f4', (64, 36))],
(3,)),
('bottom', [('bleft', ('>f4', (8, 64)), (1,)),
('bright', '>f4', (8, 36))])],
align=True)
assert_equal(str(dt),
"{'names': ['top', 'bottom'],"
" 'formats': [([('tiles', ('>f4', (64, 64)), (1,)), "
"('rtile', '>f4', (64, 36))], (3,)), "
"[('bleft', ('>f4', (8, 64)), (1,)), "
"('bright', '>f4', (8, 36))]],"
" 'offsets': [0, 76800],"
" 'itemsize': 80000,"
" 'aligned': True}")
with np.printoptions(legacy='1.21'):
assert_equal(str(dt),
"{'names':['top','bottom'], "
"'formats':[([('tiles', ('>f4', (64, 64)), (1,)), "
"('rtile', '>f4', (64, 36))], (3,)),"
"[('bleft', ('>f4', (8, 64)), (1,)), "
"('bright', '>f4', (8, 36))]], "
"'offsets':[0,76800], "
"'itemsize':80000, "
"'aligned':True}")
assert_equal(np.dtype(eval(str(dt))), dt)
dt = np.dtype({'names': ['r', 'g', 'b'], 'formats': ['u1', 'u1', 'u1'],
'offsets': [0, 1, 2],
'titles': ['Red pixel', 'Green pixel', 'Blue pixel']})
assert_equal(str(dt),
"[(('Red pixel', 'r'), 'u1'), "
"(('Green pixel', 'g'), 'u1'), "
"(('Blue pixel', 'b'), 'u1')]")
dt = np.dtype({'names': ['rgba', 'r', 'g', 'b'],
'formats': ['<u4', 'u1', 'u1', 'u1'],
'offsets': [0, 0, 1, 2],
'titles': ['Color', 'Red pixel',
'Green pixel', 'Blue pixel']})
assert_equal(str(dt),
"{'names': ['rgba', 'r', 'g', 'b'],"
" 'formats': ['<u4', 'u1', 'u1', 'u1'],"
" 'offsets': [0, 0, 1, 2],"
" 'titles': ['Color', 'Red pixel', "
"'Green pixel', 'Blue pixel'],"
" 'itemsize': 4}")
dt = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'],
'offsets': [0, 2],
'titles': ['Red pixel', 'Blue pixel']})
assert_equal(str(dt),
"{'names': ['r', 'b'],"
" 'formats': ['u1', 'u1'],"
" 'offsets': [0, 2],"
" 'titles': ['Red pixel', 'Blue pixel'],"
" 'itemsize': 3}")
dt = np.dtype([('a', '<m8[D]'), ('b', '<M8[us]')])
assert_equal(str(dt),
"[('a', '<m8[D]'), ('b', '<M8[us]')]")
def test_repr_structured(self):
dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),
('rtile', '>f4', (64, 36))], (3,)),
('bottom', [('bleft', ('>f4', (8, 64)), (1,)),
('bright', '>f4', (8, 36))])])
assert_equal(repr(dt),
"dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)), "
"('rtile', '>f4', (64, 36))], (3,)), "
"('bottom', [('bleft', ('>f4', (8, 64)), (1,)), "
"('bright', '>f4', (8, 36))])])")
dt = np.dtype({'names': ['r', 'g', 'b'], 'formats': ['u1', 'u1', 'u1'],
'offsets': [0, 1, 2],
'titles': ['Red pixel', 'Green pixel', 'Blue pixel']},
align=True)
assert_equal(repr(dt),
"dtype([(('Red pixel', 'r'), 'u1'), "
"(('Green pixel', 'g'), 'u1'), "
"(('Blue pixel', 'b'), 'u1')], align=True)")
def test_repr_structured_not_packed(self):
dt = np.dtype({'names': ['rgba', 'r', 'g', 'b'],
'formats': ['<u4', 'u1', 'u1', 'u1'],
'offsets': [0, 0, 1, 2],
'titles': ['Color', 'Red pixel',
'Green pixel', 'Blue pixel']}, align=True)
assert_equal(repr(dt),
"dtype({'names': ['rgba', 'r', 'g', 'b'],"
" 'formats': ['<u4', 'u1', 'u1', 'u1'],"
" 'offsets': [0, 0, 1, 2],"
" 'titles': ['Color', 'Red pixel', "
"'Green pixel', 'Blue pixel'],"
" 'itemsize': 4}, align=True)")
dt = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'],
'offsets': [0, 2],
'titles': ['Red pixel', 'Blue pixel'],
'itemsize': 4})
assert_equal(repr(dt),
"dtype({'names': ['r', 'b'], "
"'formats': ['u1', 'u1'], "
"'offsets': [0, 2], "
"'titles': ['Red pixel', 'Blue pixel'], "
"'itemsize': 4})")
def test_repr_structured_datetime(self):
dt = np.dtype([('a', '<M8[D]'), ('b', '<m8[us]')])
assert_equal(repr(dt),
"dtype([('a', '<M8[D]'), ('b', '<m8[us]')])")
def test_repr_str_subarray(self):
dt = np.dtype(('<i2', (1,)))
assert_equal(repr(dt), "dtype(('<i2', (1,)))")
assert_equal(str(dt), "('<i2', (1,))")
def test_base_dtype_with_object_type(self):
# Issue gh-2798, should not error.
np.array(['a'], dtype="O").astype(("O", [("name", "O")]))
def test_empty_string_to_object(self):
# Pull request #4722
np.array(["", ""]).astype(object)
def test_void_subclass_unsized(self):
dt = np.dtype(np.record)
assert_equal(repr(dt), "dtype('V')")
assert_equal(str(dt), '|V0')
assert_equal(dt.name, 'record')
def test_void_subclass_sized(self):
dt = np.dtype((np.record, 2))
assert_equal(repr(dt), "dtype('V2')")
assert_equal(str(dt), '|V2')
assert_equal(dt.name, 'record16')
def test_void_subclass_fields(self):
dt = np.dtype((np.record, [('a', '<u2')]))
assert_equal(repr(dt), "dtype((numpy.record, [('a', '<u2')]))")
assert_equal(str(dt), "(numpy.record, [('a', '<u2')])")
assert_equal(dt.name, 'record16')
class TestDtypeAttributeDeletion:
def test_dtype_non_writable_attributes_deletion(self):
dt = np.dtype(np.double)
attr = ["subdtype", "descr", "str", "name", "base", "shape",
"isbuiltin", "isnative", "isalignedstruct", "fields",
"metadata", "hasobject"]
for s in attr:
assert_raises(AttributeError, delattr, dt, s)
def test_dtype_writable_attributes_deletion(self):
dt = np.dtype(np.double)
attr = ["names"]
for s in attr:
assert_raises(AttributeError, delattr, dt, s)
class TestDtypeAttributes:
def test_descr_has_trailing_void(self):
# see gh-6359
dtype = np.dtype({
'names': ['A', 'B'],
'formats': ['f4', 'f4'],
'offsets': [0, 8],
'itemsize': 16})
new_dtype = np.dtype(dtype.descr)
assert_equal(new_dtype.itemsize, 16)
def test_name_dtype_subclass(self):
# Ticket #4357
class user_def_subcls(np.void):
pass
assert_equal(np.dtype(user_def_subcls).name, 'user_def_subcls')
class TestDTypeMakeCanonical:
def check_canonical(self, dtype, canonical):
"""
Check most properties relevant to "canonical" versions of a dtype,
which is mainly native byte order for datatypes supporting this.
The main work is checking structured dtypes with fields, where we
reproduce most the actual logic used in the C-code.
"""
assert type(dtype) is type(canonical)
# a canonical DType should always have equivalent casting (both ways)
assert np.can_cast(dtype, canonical, casting="equiv")
assert np.can_cast(canonical, dtype, casting="equiv")
# a canonical dtype (and its fields) is always native (checks fields):
assert canonical.isnative
# Check that canonical of canonical is the same (no casting):
assert np.result_type(canonical) == canonical
if not dtype.names:
# The flags currently never change for unstructured dtypes
assert dtype.flags == canonical.flags
return
# Must have all the needs API flag set:
assert dtype.flags & 0b10000
# Check that the fields are identical (including titles):
assert dtype.fields.keys() == canonical.fields.keys()
def aligned_offset(offset, alignment):
# round up offset:
return - (-offset // alignment) * alignment
totalsize = 0
max_alignment = 1
for name in dtype.names:
# each field is also canonical:
new_field_descr = canonical.fields[name][0]
self.check_canonical(dtype.fields[name][0], new_field_descr)
# Must have the "inherited" object related flags:
expected = 0b11011 & new_field_descr.flags
assert (canonical.flags & expected) == expected
if canonical.isalignedstruct:
totalsize = aligned_offset(totalsize, new_field_descr.alignment)
max_alignment = max(new_field_descr.alignment, max_alignment)
assert canonical.fields[name][1] == totalsize
# if a title exists, they must match (otherwise empty tuple):
assert dtype.fields[name][2:] == canonical.fields[name][2:]
totalsize += new_field_descr.itemsize
if canonical.isalignedstruct:
totalsize = aligned_offset(totalsize, max_alignment)
assert canonical.itemsize == totalsize
assert canonical.alignment == max_alignment
def test_simple(self):
dt = np.dtype(">i4")
assert np.result_type(dt).isnative
assert np.result_type(dt).num == dt.num
# dtype with empty space:
struct_dt = np.dtype(">i4,<i1,i8,V3")[["f0", "f2"]]
canonical = np.result_type(struct_dt)
assert canonical.itemsize == 4+8
assert canonical.isnative
# aligned struct dtype with empty space:
struct_dt = np.dtype(">i1,<i4,i8,V3", align=True)[["f0", "f2"]]
canonical = np.result_type(struct_dt)
assert canonical.isalignedstruct
assert canonical.itemsize == np.dtype("i8").alignment + 8
assert canonical.isnative
def test_object_flag_not_inherited(self):
# The following dtype still indicates "object", because its included
# in the unaccessible space (maybe this could change at some point):
arr = np.ones(3, "i,O,i")[["f0", "f2"]]
assert arr.dtype.hasobject
canonical_dt = np.result_type(arr.dtype)
assert not canonical_dt.hasobject
@pytest.mark.slow
@hypothesis.given(dtype=hynp.nested_dtypes())
def test_make_canonical_hypothesis(self, dtype):
canonical = np.result_type(dtype)
self.check_canonical(dtype, canonical)
@pytest.mark.slow
@hypothesis.given(
dtype=hypothesis.extra.numpy.array_dtypes(
subtype_strategy=hypothesis.extra.numpy.array_dtypes(),
min_size=5, max_size=10, allow_subarrays=True))
def test_structured(self, dtype):
# Pick 4 of the fields at random. This will leave empty space in the
# dtype (since we do not canonicalize it here).
field_subset = random.sample(dtype.names, k=4)
dtype_with_empty_space = dtype[field_subset]
assert dtype_with_empty_space.itemsize == dtype.itemsize
canonicalized = np.result_type(dtype_with_empty_space)
self.check_canonical(dtype_with_empty_space, canonicalized)
# Ensure that we also check aligned struct (check the opposite, in
# case hypothesis grows support for `align`. Then repeat the test:
dtype_aligned = np.dtype(dtype.descr, align=not dtype.isalignedstruct)
dtype_with_empty_space = dtype_aligned[field_subset]
assert dtype_with_empty_space.itemsize == dtype_aligned.itemsize
canonicalized = np.result_type(dtype_with_empty_space)
self.check_canonical(dtype_with_empty_space, canonicalized)
class TestPickling:
def check_pickling(self, dtype):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
buf = pickle.dumps(dtype, proto)
# The dtype pickling itself pickles `np.dtype` if it is pickled
# as a singleton `dtype` should be stored in the buffer:
assert b"_DType_reconstruct" not in buf
assert b"dtype" in buf
pickled = pickle.loads(buf)
assert_equal(pickled, dtype)
assert_equal(pickled.descr, dtype.descr)
if dtype.metadata is not None:
assert_equal(pickled.metadata, dtype.metadata)
# Check the reconstructed dtype is functional
x = np.zeros(3, dtype=dtype)
y = np.zeros(3, dtype=pickled)
assert_equal(x, y)
assert_equal(x[0], y[0])
@pytest.mark.parametrize('t', [int, float, complex, np.int32, str, object,
np.compat.unicode, bool])
def test_builtin(self, t):
self.check_pickling(np.dtype(t))
def test_structured(self):
dt = np.dtype(([('a', '>f4', (2, 1)), ('b', '<f8', (1, 3))], (2, 2)))
self.check_pickling(dt)
def test_structured_aligned(self):
dt = np.dtype('i4, i1', align=True)
self.check_pickling(dt)
def test_structured_unaligned(self):
dt = np.dtype('i4, i1', align=False)
self.check_pickling(dt)
def test_structured_padded(self):
dt = np.dtype({
'names': ['A', 'B'],
'formats': ['f4', 'f4'],
'offsets': [0, 8],
'itemsize': 16})
self.check_pickling(dt)
def test_structured_titles(self):
dt = np.dtype({'names': ['r', 'b'],
'formats': ['u1', 'u1'],
'titles': ['Red pixel', 'Blue pixel']})
self.check_pickling(dt)
@pytest.mark.parametrize('base', ['m8', 'M8'])
@pytest.mark.parametrize('unit', ['', 'Y', 'M', 'W', 'D', 'h', 'm', 's',
'ms', 'us', 'ns', 'ps', 'fs', 'as'])
def test_datetime(self, base, unit):
dt = np.dtype('%s[%s]' % (base, unit) if unit else base)
self.check_pickling(dt)
if unit:
dt = np.dtype('%s[7%s]' % (base, unit))
self.check_pickling(dt)
def test_metadata(self):
dt = np.dtype(int, metadata={'datum': 1})
self.check_pickling(dt)
@pytest.mark.parametrize("DType",
[type(np.dtype(t)) for t in np.typecodes['All']] +
[np.dtype(rational), np.dtype])
def test_pickle_types(self, DType):
# Check that DTypes (the classes/types) roundtrip when pickling
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
roundtrip_DType = pickle.loads(pickle.dumps(DType, proto))
assert roundtrip_DType is DType
class TestPromotion:
"""Test cases related to more complex DType promotions. Further promotion
tests are defined in `test_numeric.py`
"""
@pytest.mark.parametrize(["other", "expected"],
[(2**16-1, np.complex64),
(2**32-1, np.complex128),
(np.float16(2), np.complex64),
(np.float32(2), np.complex64),
(np.longdouble(2), np.complex64),
# Base of the double value to sidestep any rounding issues:
(np.longdouble(np.nextafter(1.7e308, 0.)), np.complex128),
# Additionally use "nextafter" so the cast can't round down:
(np.longdouble(np.nextafter(1.7e308, np.inf)), np.clongdouble),
# repeat for complex scalars:
(np.complex64(2), np.complex64),
(np.clongdouble(2), np.complex64),
# Base of the double value to sidestep any rounding issues:
(np.clongdouble(np.nextafter(1.7e308, 0.) * 1j), np.complex128),
# Additionally use "nextafter" so the cast can't round down:
(np.clongdouble(np.nextafter(1.7e308, np.inf)), np.clongdouble),
])
def test_complex_other_value_based(self, other, expected):
# This would change if we modify the value based promotion
min_complex = np.dtype(np.complex64)
res = np.result_type(other, min_complex)
assert res == expected
# Check the same for a simple ufunc call that uses the same logic:
res = np.minimum(other, np.ones(3, dtype=min_complex)).dtype
assert res == expected
@pytest.mark.parametrize(["other", "expected"],
[(np.bool_, np.complex128),
(np.int64, np.complex128),
(np.float16, np.complex64),
(np.float32, np.complex64),
(np.float64, np.complex128),
(np.longdouble, np.clongdouble),
(np.complex64, np.complex64),
(np.complex128, np.complex128),
(np.clongdouble, np.clongdouble),
])
def test_complex_scalar_value_based(self, other, expected):
# This would change if we modify the value based promotion
complex_scalar = 1j
res = np.result_type(other, complex_scalar)
assert res == expected
# Check the same for a simple ufunc call that uses the same logic:
res = np.minimum(np.ones(3, dtype=other), complex_scalar).dtype
assert res == expected
def test_complex_pyscalar_promote_rational(self):
with pytest.raises(TypeError,
match=r".* do not have a common DType"):
np.result_type(1j, rational)
with pytest.raises(TypeError,
match=r".* no common DType exists for the given inputs"):
np.result_type(1j, rational(1, 2))
@pytest.mark.parametrize(["other", "expected"],
[(1, rational), (1., np.float64)])
def test_float_int_pyscalar_promote_rational(self, other, expected):
# Note that rationals are a bit akward as they promote with float64
# or default ints, but not float16 or uint8/int8 (which looks
# inconsistent here)
with pytest.raises(TypeError,
match=r".* do not have a common DType"):
np.result_type(other, rational)
assert np.result_type(other, rational(1, 2)) == expected
@pytest.mark.parametrize(["dtypes", "expected"], [
# These promotions are not associative/commutative:
([np.uint16, np.int16, np.float16], np.float32),
([np.uint16, np.int8, np.float16], np.float32),
([np.uint8, np.int16, np.float16], np.float32),
# The following promotions are not ambiguous, but cover code
# paths of abstract promotion (no particular logic being tested)
([1, 1, np.float64], np.float64),
([1, 1., np.complex128], np.complex128),
([1, 1j, np.float64], np.complex128),
([1., 1., np.int64], np.float64),
([1., 1j, np.float64], np.complex128),
([1j, 1j, np.float64], np.complex128),
([1, True, np.bool_], np.int_),
])
def test_permutations_do_not_influence_result(self, dtypes, expected):
# Tests that most permutations do not influence the result. In the
# above some uint and int combintations promote to a larger integer
# type, which would then promote to a larger than necessary float.
for perm in permutations(dtypes):
assert np.result_type(*perm) == expected
def test_rational_dtype():
# test for bug gh-5719
a = np.array([1111], dtype=rational).astype
assert_raises(OverflowError, a, 'int8')
# test that dtype detection finds user-defined types
x = rational(1)
assert_equal(np.array([x,x]).dtype, np.dtype(rational))
def test_dtypes_are_true():
# test for gh-6294
assert bool(np.dtype('f8'))
assert bool(np.dtype('i8'))
assert bool(np.dtype([('a', 'i8'), ('b', 'f4')]))
def test_invalid_dtype_string():
# test for gh-10440
assert_raises(TypeError, np.dtype, 'f8,i8,[f8,i8]')
assert_raises(TypeError, np.dtype, u'Fl\xfcgel')
def test_keyword_argument():
# test for https://github.com/numpy/numpy/pull/16574#issuecomment-642660971
assert np.dtype(dtype=np.float64) == np.dtype(np.float64)
class TestFromDTypeAttribute:
def test_simple(self):
class dt:
dtype = np.dtype("f8")
assert np.dtype(dt) == np.float64
assert np.dtype(dt()) == np.float64
@pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking")
def test_recursion(self):
class dt:
pass
dt.dtype = dt
with pytest.raises(RecursionError):
np.dtype(dt)
dt_instance = dt()
dt_instance.dtype = dt
with pytest.raises(RecursionError):
np.dtype(dt_instance)
def test_void_subtype(self):
class dt(np.void):
# This code path is fully untested before, so it is unclear
# what this should be useful for. Note that if np.void is used
# numpy will think we are deallocating a base type [1.17, 2019-02].
dtype = np.dtype("f,f")
np.dtype(dt)
np.dtype(dt(1))
@pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking")
def test_void_subtype_recursion(self):
class vdt(np.void):
pass
vdt.dtype = vdt
with pytest.raises(RecursionError):
np.dtype(vdt)
with pytest.raises(RecursionError):
np.dtype(vdt(1))
class TestDTypeClasses:
@pytest.mark.parametrize("dtype", list(np.typecodes['All']) + [rational])
def test_basic_dtypes_subclass_properties(self, dtype):
# Note: Except for the isinstance and type checks, these attributes
# are considered currently private and may change.
dtype = np.dtype(dtype)
assert isinstance(dtype, np.dtype)
assert type(dtype) is not np.dtype
assert type(dtype).__name__ == f"dtype[{dtype.type.__name__}]"
assert type(dtype).__module__ == "numpy"
assert not type(dtype)._abstract
# the flexible dtypes and datetime/timedelta have additional parameters
# which are more than just storage information, these would need to be
# given when creating a dtype:
parametric = (np.void, np.str_, np.bytes_, np.datetime64, np.timedelta64)
if dtype.type not in parametric:
assert not type(dtype)._parametric
assert type(dtype)() is dtype
else:
assert type(dtype)._parametric
with assert_raises(TypeError):
type(dtype)()
def test_dtype_superclass(self):
assert type(np.dtype) is not type
assert isinstance(np.dtype, type)
assert type(np.dtype).__name__ == "_DTypeMeta"
assert type(np.dtype).__module__ == "numpy"
assert np.dtype._abstract
class TestFromCTypes:
@staticmethod
def check(ctype, dtype):
dtype = np.dtype(dtype)
assert_equal(np.dtype(ctype), dtype)
assert_equal(np.dtype(ctype()), dtype)
def test_array(self):
c8 = ctypes.c_uint8
self.check( 3 * c8, (np.uint8, (3,)))
self.check( 1 * c8, (np.uint8, (1,)))
self.check( 0 * c8, (np.uint8, (0,)))
self.check(1 * (3 * c8), ((np.uint8, (3,)), (1,)))
self.check(3 * (1 * c8), ((np.uint8, (1,)), (3,)))
def test_padded_structure(self):
class PaddedStruct(ctypes.Structure):
_fields_ = [
('a', ctypes.c_uint8),
('b', ctypes.c_uint16)
]
expected = np.dtype([
('a', np.uint8),
('b', np.uint16)
], align=True)
self.check(PaddedStruct, expected)
def test_bit_fields(self):
class BitfieldStruct(ctypes.Structure):
_fields_ = [
('a', ctypes.c_uint8, 7),
('b', ctypes.c_uint8, 1)
]
assert_raises(TypeError, np.dtype, BitfieldStruct)
assert_raises(TypeError, np.dtype, BitfieldStruct())
def test_pointer(self):
p_uint8 = ctypes.POINTER(ctypes.c_uint8)
assert_raises(TypeError, np.dtype, p_uint8)
def test_void_pointer(self):
self.check(ctypes.c_void_p, np.uintp)
def test_union(self):
class Union(ctypes.Union):
_fields_ = [
('a', ctypes.c_uint8),
('b', ctypes.c_uint16),
]
expected = np.dtype(dict(
names=['a', 'b'],
formats=[np.uint8, np.uint16],
offsets=[0, 0],
itemsize=2
))
self.check(Union, expected)
def test_union_with_struct_packed(self):
class Struct(ctypes.Structure):
_pack_ = 1
_fields_ = [
('one', ctypes.c_uint8),
('two', ctypes.c_uint32)
]
class Union(ctypes.Union):
_fields_ = [
('a', ctypes.c_uint8),
('b', ctypes.c_uint16),
('c', ctypes.c_uint32),
('d', Struct),
]
expected = np.dtype(dict(
names=['a', 'b', 'c', 'd'],
formats=['u1', np.uint16, np.uint32, [('one', 'u1'), ('two', np.uint32)]],
offsets=[0, 0, 0, 0],
itemsize=ctypes.sizeof(Union)
))
self.check(Union, expected)
def test_union_packed(self):
class Struct(ctypes.Structure):
_fields_ = [
('one', ctypes.c_uint8),
('two', ctypes.c_uint32)
]
_pack_ = 1
class Union(ctypes.Union):
_pack_ = 1
_fields_ = [
('a', ctypes.c_uint8),
('b', ctypes.c_uint16),
('c', ctypes.c_uint32),
('d', Struct),
]
expected = np.dtype(dict(
names=['a', 'b', 'c', 'd'],
formats=['u1', np.uint16, np.uint32, [('one', 'u1'), ('two', np.uint32)]],
offsets=[0, 0, 0, 0],
itemsize=ctypes.sizeof(Union)
))
self.check(Union, expected)
def test_packed_structure(self):
class PackedStructure(ctypes.Structure):
_pack_ = 1
_fields_ = [
('a', ctypes.c_uint8),
('b', ctypes.c_uint16)
]
expected = np.dtype([
('a', np.uint8),
('b', np.uint16)
])
self.check(PackedStructure, expected)
def test_large_packed_structure(self):
class PackedStructure(ctypes.Structure):
_pack_ = 2
_fields_ = [
('a', ctypes.c_uint8),
('b', ctypes.c_uint16),
('c', ctypes.c_uint8),
('d', ctypes.c_uint16),
('e', ctypes.c_uint32),
('f', ctypes.c_uint32),
('g', ctypes.c_uint8)
]
expected = np.dtype(dict(
formats=[np.uint8, np.uint16, np.uint8, np.uint16, np.uint32, np.uint32, np.uint8 ],
offsets=[0, 2, 4, 6, 8, 12, 16],
names=['a', 'b', 'c', 'd', 'e', 'f', 'g'],
itemsize=18))
self.check(PackedStructure, expected)
def test_big_endian_structure_packed(self):
class BigEndStruct(ctypes.BigEndianStructure):
_fields_ = [
('one', ctypes.c_uint8),
('two', ctypes.c_uint32)
]
_pack_ = 1
expected = np.dtype([('one', 'u1'), ('two', '>u4')])
self.check(BigEndStruct, expected)
def test_little_endian_structure_packed(self):
class LittleEndStruct(ctypes.LittleEndianStructure):
_fields_ = [
('one', ctypes.c_uint8),
('two', ctypes.c_uint32)
]
_pack_ = 1
expected = np.dtype([('one', 'u1'), ('two', '<u4')])
self.check(LittleEndStruct, expected)
def test_little_endian_structure(self):
class PaddedStruct(ctypes.LittleEndianStructure):
_fields_ = [
('a', ctypes.c_uint8),
('b', ctypes.c_uint16)
]
expected = np.dtype([
('a', '<B'),
('b', '<H')
], align=True)
self.check(PaddedStruct, expected)
def test_big_endian_structure(self):
class PaddedStruct(ctypes.BigEndianStructure):
_fields_ = [
('a', ctypes.c_uint8),
('b', ctypes.c_uint16)
]
expected = np.dtype([
('a', '>B'),
('b', '>H')
], align=True)
self.check(PaddedStruct, expected)
def test_simple_endian_types(self):
self.check(ctypes.c_uint16.__ctype_le__, np.dtype('<u2'))
self.check(ctypes.c_uint16.__ctype_be__, np.dtype('>u2'))
self.check(ctypes.c_uint8.__ctype_le__, np.dtype('u1'))
self.check(ctypes.c_uint8.__ctype_be__, np.dtype('u1'))
all_types = set(np.typecodes['All'])
all_pairs = permutations(all_types, 2)
@pytest.mark.parametrize("pair", all_pairs)
def test_pairs(self, pair):
"""
Check that np.dtype('x,y') matches [np.dtype('x'), np.dtype('y')]
Example: np.dtype('d,I') -> dtype([('f0', '<f8'), ('f1', '<u4')])
"""
# gh-5645: check that np.dtype('i,L') can be used
pair_type = np.dtype('{},{}'.format(*pair))
expected = np.dtype([('f0', pair[0]), ('f1', pair[1])])
assert_equal(pair_type, expected)
class TestUserDType:
@pytest.mark.leaks_references(reason="dynamically creates custom dtype.")
def test_custom_structured_dtype(self):
class mytype:
pass
blueprint = np.dtype([("field", object)])
dt = create_custom_field_dtype(blueprint, mytype, 0)
assert dt.type == mytype
# We cannot (currently) *create* this dtype with `np.dtype` because
# mytype does not inherit from `np.generic`. This seems like an
# unnecessary restriction, but one that has been around forever:
assert np.dtype(mytype) == np.dtype("O")
def test_custom_structured_dtype_errors(self):
class mytype:
pass
blueprint = np.dtype([("field", object)])
with pytest.raises(ValueError):
# Tests what happens if fields are unset during creation
# which is currently rejected due to the containing object
# (see PyArray_RegisterDataType).
create_custom_field_dtype(blueprint, mytype, 1)
with pytest.raises(RuntimeError):
# Tests that a dtype must have its type field set up to np.dtype
# or in this case a builtin instance.
create_custom_field_dtype(blueprint, mytype, 2)
@pytest.mark.skipif(sys.version_info < (3, 9), reason="Requires python 3.9")
class TestClassGetItem:
def test_dtype(self) -> None:
alias = np.dtype[Any]
assert isinstance(alias, types.GenericAlias)
assert alias.__origin__ is np.dtype
@pytest.mark.parametrize("code", np.typecodes["All"])
def test_dtype_subclass(self, code: str) -> None:
cls = type(np.dtype(code))
alias = cls[Any]
assert isinstance(alias, types.GenericAlias)
assert alias.__origin__ is cls
@pytest.mark.parametrize("arg_len", range(4))
def test_subscript_tuple(self, arg_len: int) -> None:
arg_tup = (Any,) * arg_len
if arg_len == 1:
assert np.dtype[arg_tup]
else:
with pytest.raises(TypeError):
np.dtype[arg_tup]
def test_subscript_scalar(self) -> None:
assert np.dtype[Any]
def test_result_type_integers_and_unitless_timedelta64():
# Regression test for gh-20077. The following call of `result_type`
# would cause a seg. fault.
td = np.timedelta64(4)
result = np.result_type(0, td)
assert_dtype_equal(result, td.dtype)
@pytest.mark.skipif(sys.version_info >= (3, 9), reason="Requires python 3.8")
def test_class_getitem_38() -> None:
match = "Type subscription requires python >= 3.9"
with pytest.raises(TypeError, match=match):
np.dtype[Any]
| numpy/numpy | numpy/core/tests/test_dtype.py | Python | bsd-3-clause | 68,138 |
# $Id$ *** pyformex ***
##
## This file is part of pyFormex 0.8.9 (Fri Nov 9 10:49:51 CET 2012)
## pyFormex is a tool for generating, manipulating and transforming 3D
## geometrical models by sequences of mathematical operations.
## Home page: http://pyformex.org
## Project page: http://savannah.nongnu.org/projects/pyformex/
## Copyright 2004-2012 (C) Benedict Verhegghe (benedict.verhegghe@ugent.be)
## Distributed under the GNU General Public License version 3 or later.
##
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see http://www.gnu.org/licenses/.
##
#
"""Barrel Vault
level = 'beginner'
topics = ['geometry']
techniques = ['stepmode','cylindrical']
"""
from __future__ import print_function
clear()
m=10 # number of modules in axial direction
n=8 # number of modules in tangential direction
r=10. # barrel radius
a=180. # barrel opening angle
l=30. # barrel length
# Diagonals
d = Formex([[[0.,0.,0.],[1.,1.,0.]]],1) # a single diagonal
draw(d,view='front')
d += d.reflect(0,1.) # reflect in x-direction and add to the original
draw(d)
d += d.reflect(1,1.) # reflect in y-direction
draw(d)
da = d.replic(m,2,0) # replicate in x-direction
draw(da)
da = da.replic(n,2,1) # replicate in y-direction
draw(da)
# Longitudinals
h = Formex('l:1',3) # same as Formex([[[0.,0.,0.],[1.,0.,0.]]],3)
draw(h)
ha = h.replic2(2*m,2*n+1,1,1) # replicate in x- and y-direction
draw(ha)
# End bars
e = Formex('l:2',0) # a unit vertical line
draw(e)
ea = e.replic2(2,2*n,2*m,1) # verticals only at the ends!
draw(ea)
# Choose better viewing angle for 3D
view('iso')
drawAxes()
# Rotate the grid to (y,z) plane and give it an offset from the z-axis
grid = (da+ha+ea).rotate(90,1).translate(0,r)
draw(grid)
# Scale the grid to the requested length and circumference of the barrel
# The current height of the grid is 2*n
# As the angle a is given in degrees, the circumference is
circum = a*Deg*r
scaled_grid = grid.scale([1.,circum/(2*n),l/(2*m)])
draw(scaled_grid)
# Create barrel
# The cylindrical transformation by default expects angles in degrees
barrel = scaled_grid.cylindrical(scale=[1.,(1./r)/Deg,1.])
draw(barrel)
print("Het aantal elementen is %s (plexitude %s)" % (barrel.nelems(),barrel.nplex()))
print("De grootte van de coordinatenarray is %s" % str(barrel.shape()))
# Remark: if we did not want to show the scaled grid, the creation
# of the barrel could be simplified by combining the last two transformations:
# barrel = grid.cylindrical(scale=[1.,a/(2*n),l/(2*m)])
# That's all, folks!
| dladd/pyFormex | pyformex/examples/Demos/BarrelVault2.py | Python | gpl-3.0 | 3,104 |
# (c) Copyright 2013-2015 Hewlett Packard Enterprise Development LP
# All Rights Reserved.
#
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Base class for HPE Storage Drivers.
This driver requires 3.1.3 or later firmware on the 3PAR array, using
the 4.x version of the hpe3parclient.
You will need to install the python hpe3parclient.
sudo pip install --upgrade "hpe3parclient>=4.0"
"""
try:
from hpe3parclient import exceptions as hpeexceptions
except ImportError:
hpeexceptions = None
from oslo_log import log as logging
from cinder import exception
from cinder.i18n import _
from cinder import utils
from cinder.volume import driver
from cinder.volume.drivers.hpe import hpe_3par_common as hpecommon
from cinder.volume.drivers.san import san
LOG = logging.getLogger(__name__)
class HPE3PARDriverBase(driver.ManageableVD,
driver.ManageableSnapshotsVD,
driver.MigrateVD,
driver.BaseVD):
"""OpenStack base driver to enable 3PAR storage array.
Version history:
.. code-block:: none
1.0.0 - Initial base driver
1.0.1 - Adds consistency group capability in generic volume groups.
1.0.2 - Adds capability.
1.0.3 - Added Tiramisu feature on 3PAR.
1.0.4 - Fixed Volume migration for "in-use" volume. bug #1744021
"""
VERSION = "1.0.4"
def __init__(self, *args, **kwargs):
super(HPE3PARDriverBase, self).__init__(*args, **kwargs)
self._active_backend_id = kwargs.get('active_backend_id', None)
self.configuration.append_config_values(hpecommon.hpe3par_opts)
self.configuration.append_config_values(san.san_opts)
self.protocol = None
def _init_common(self):
return hpecommon.HPE3PARCommon(self.configuration,
self._active_backend_id)
def _login(self, timeout=None):
common = self._init_common()
# If replication is enabled and we cannot login, we do not want to
# raise an exception so a failover can still be executed.
try:
common.do_setup(None, timeout=timeout, stats=self._stats)
common.client_login()
except Exception:
if common._replication_enabled:
LOG.warning("The primary array is not reachable at this "
"time. Since replication is enabled, "
"listing replication targets and failing over "
"a volume can still be performed.")
pass
else:
raise
return common
def _logout(self, common):
# If replication is enabled and we do not have a client ID, we did not
# login, but can still failover. There is no need to logout.
if common.client is None and common._replication_enabled:
return
common.client_logout()
def _check_flags(self, common):
"""Sanity check to ensure we have required options set."""
required_flags = ['hpe3par_api_url', 'hpe3par_username',
'hpe3par_password', 'san_ip', 'san_login',
'san_password']
common.check_flags(self.configuration, required_flags)
@utils.trace
def get_volume_stats(self, refresh=False):
common = self._login()
try:
self._stats = common.get_volume_stats(
refresh,
self.get_filter_function(),
self.get_goodness_function())
self._stats['storage_protocol'] = self.protocol
self._stats['driver_version'] = self.VERSION
backend_name = self.configuration.safe_get('volume_backend_name')
self._stats['volume_backend_name'] = (backend_name or
self.__class__.__name__)
return self._stats
finally:
self._logout(common)
def check_for_setup_error(self):
"""Setup errors are already checked for in do_setup so return pass."""
pass
@utils.trace
def create_volume(self, volume):
common = self._login()
try:
return common.create_volume(volume)
finally:
self._logout(common)
@utils.trace
def create_cloned_volume(self, volume, src_vref):
"""Clone an existing volume."""
common = self._login()
try:
return common.create_cloned_volume(volume, src_vref)
finally:
self._logout(common)
@utils.trace
def delete_volume(self, volume):
common = self._login()
try:
common.delete_volume(volume)
finally:
self._logout(common)
@utils.trace
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot.
TODO: support using the size from the user.
"""
common = self._login()
try:
return common.create_volume_from_snapshot(volume, snapshot)
finally:
self._logout(common)
@utils.trace
def create_snapshot(self, snapshot):
common = self._login()
try:
common.create_snapshot(snapshot)
finally:
self._logout(common)
@utils.trace
def delete_snapshot(self, snapshot):
common = self._login()
try:
common.delete_snapshot(snapshot)
finally:
self._logout(common)
@utils.trace
def extend_volume(self, volume, new_size):
common = self._login()
try:
common.extend_volume(volume, new_size)
finally:
self._logout(common)
@utils.trace
def create_group(self, context, group):
common = self._login()
try:
return common.create_group(context, group)
finally:
self._logout(common)
@utils.trace
def create_group_from_src(self, context, group, volumes,
group_snapshot=None, snapshots=None,
source_group=None, source_vols=None):
common = self._login()
try:
return common.create_group_from_src(
context, group, volumes, group_snapshot, snapshots,
source_group, source_vols)
finally:
self._logout(common)
@utils.trace
def delete_group(self, context, group, volumes):
common = self._login()
try:
return common.delete_group(context, group, volumes)
finally:
self._logout(common)
@utils.trace
def update_group(self, context, group, add_volumes=None,
remove_volumes=None):
common = self._login()
try:
return common.update_group(context, group, add_volumes,
remove_volumes)
finally:
self._logout(common)
@utils.trace
def create_group_snapshot(self, context, group_snapshot, snapshots):
common = self._login()
try:
return common.create_group_snapshot(context, group_snapshot,
snapshots)
finally:
self._logout(common)
@utils.trace
def delete_group_snapshot(self, context, group_snapshot, snapshots):
common = self._login()
try:
return common.delete_group_snapshot(context, group_snapshot,
snapshots)
finally:
self._logout(common)
@utils.trace
def manage_existing(self, volume, existing_ref):
common = self._login()
try:
return common.manage_existing(volume, existing_ref)
finally:
self._logout(common)
@utils.trace
def manage_existing_snapshot(self, snapshot, existing_ref):
common = self._login()
try:
return common.manage_existing_snapshot(snapshot, existing_ref)
finally:
self._logout(common)
@utils.trace
def manage_existing_get_size(self, volume, existing_ref):
common = self._login()
try:
return common.manage_existing_get_size(volume, existing_ref)
finally:
self._logout(common)
@utils.trace
def manage_existing_snapshot_get_size(self, snapshot, existing_ref):
common = self._login()
try:
return common.manage_existing_snapshot_get_size(snapshot,
existing_ref)
finally:
self._logout(common)
@utils.trace
def unmanage(self, volume):
common = self._login()
try:
common.unmanage(volume)
finally:
self._logout(common)
@utils.trace
def unmanage_snapshot(self, snapshot):
common = self._login()
try:
common.unmanage_snapshot(snapshot)
finally:
self._logout(common)
@utils.trace
def retype(self, context, volume, new_type, diff, host):
"""Convert the volume to be of the new type."""
common = self._login()
try:
return common.retype(volume, new_type, diff, host)
finally:
self._logout(common)
@utils.trace
def migrate_volume(self, context, volume, host):
if volume['status'] == 'in-use':
protocol = host['capabilities']['storage_protocol']
if protocol != self.protocol:
LOG.debug("3PAR %(protocol)s driver cannot migrate in-use "
"volume to a host with "
"storage_protocol=%(storage_protocol)s",
{'protocol': self.protocol,
'storage_protocol': protocol})
return False, None
common = self._login()
try:
return common.migrate_volume(volume, host)
finally:
self._logout(common)
@utils.trace
def update_migrated_volume(self, context, volume, new_volume,
original_volume_status):
"""Update the name of the migrated volume to it's new ID."""
common = self._login()
try:
return common.update_migrated_volume(context, volume, new_volume,
original_volume_status)
finally:
self._logout(common)
@utils.trace
def get_pool(self, volume):
common = self._login()
try:
return common.get_cpg(volume)
except hpeexceptions.HTTPNotFound:
reason = (_("Volume %s doesn't exist on array.") % volume)
LOG.error(reason)
raise exception.InvalidVolume(reason)
finally:
self._logout(common)
@utils.trace
def revert_to_snapshot(self, context, volume, snapshot):
"""Revert volume to snapshot."""
common = self._login()
try:
common.revert_to_snapshot(volume, snapshot)
finally:
self._logout(common)
@utils.trace
def failover_host(self, context, volumes, secondary_id=None, groups=None):
"""Force failover to a secondary replication target."""
common = self._login(timeout=30)
try:
# Update the active_backend_id in the driver and return it.
active_backend_id, volume_updates, group_update_list = (
common.failover_host(
context, volumes, secondary_id, groups))
self._active_backend_id = active_backend_id
return active_backend_id, volume_updates, group_update_list
finally:
self._logout(common)
def enable_replication(self, context, group, volumes):
"""Enable replication for a group.
:param context: the context
:param group: the group object
:param volumes: the list of volumes
:returns: model_update, None
"""
common = self._login()
try:
return common.enable_replication(context, group, volumes)
finally:
self._logout(common)
def disable_replication(self, context, group, volumes):
"""Disable replication for a group.
:param context: the context
:param group: the group object
:param volumes: the list of volumes
:returns: model_update, None
"""
common = self._login()
try:
return common.disable_replication(context, group, volumes)
finally:
self._logout(common)
def failover_replication(self, context, group, volumes,
secondary_backend_id=None):
"""Failover replication for a group.
:param context: the context
:param group: the group object
:param volumes: the list of volumes
:param secondary_backend_id: the secondary backend id - default None
:returns: model_update, vol_model_updates
"""
common = self._login()
try:
return common.failover_replication(
context, group, volumes, secondary_backend_id)
finally:
self._logout(common)
def do_setup(self, context):
common = self._init_common()
common.do_setup(context)
self._check_flags(common)
common.check_for_setup_error()
self._do_setup(common)
def _do_setup(self, common):
pass
def create_export(self, context, volume, connector):
pass
def ensure_export(self, context, volume):
pass
def remove_export(self, context, volume):
pass
def terminate_connection(self, volume, connector, **kwargs):
pass
def initialize_connection(self, volume, connector):
pass
@utils.trace
def _init_vendor_properties(self):
"""Create a dictionary of vendor unique properties.
This method creates a dictionary of vendor unique properties
and returns both created dictionary and vendor name.
Returned vendor name is used to check for name of vendor
unique properties.
- Vendor name shouldn't include colon(:) because of the separator
and it is automatically replaced by underscore(_).
ex. abc:d -> abc_d
- Vendor prefix is equal to vendor name.
ex. abcd
- Vendor unique properties must start with vendor prefix + ':'.
ex. abcd:maxIOPS
Each backend driver needs to override this method to expose
its own properties using _set_property() like this:
self._set_property(
properties,
"vendorPrefix:specific_property",
"Title of property",
_("Description of property"),
"type")
: return dictionary of vendor unique properties
: return vendor name
prefix: HPE:3PAR --> HPE_3PAR
"""
properties = {}
valid_prov_values = ['thin', 'full', 'dedup']
valid_persona_values = ['2 - Generic-ALUA',
'1 - Generic',
'3 - Generic-legacy',
'4 - HPEUX-legacy',
'5 - AIX-legacy',
'6 - EGENERA',
'7 - ONTAP-legacy',
'8 - VMware',
'9 - OpenVMS',
'10 - HPEUX',
'11 - WindowsServer']
self._set_property(
properties,
"HPE:3PAR:hpe3par:snap_cpg",
"Snap CPG Extra-specs.",
_("Specifies the Snap CPG for a volume type. It overrides the "
"hpe3par_cpg_snap setting. Defaults to the hpe3par_cpg_snap "
"setting in the cinder.conf file. If hpe3par_cpg_snap is not "
"set, it defaults to the hpe3par_cpg setting."),
"string")
self._set_property(
properties,
"HPE:3PAR:hpe3par:persona",
"Host Persona Extra-specs.",
_("Specifies the host persona property for a volume type. It "
"overrides the hpe3par_cpg_snap setting. Defaults to the "
"hpe3par_cpg_snap setting in the cinder.conf file. "
"If hpe3par_cpg_snap is not set, "
"it defaults to the hpe3par_cpg setting."),
"string",
enum=valid_persona_values,
default="2 - Generic-ALUA")
self._set_property(
properties,
"HPE:3PAR:hpe3par:vvs",
"Virtual Volume Set Extra-specs.",
_("The virtual volume set name that has been set up by the "
"administrator that would have predefined QoS rules "
"associated with it. If you specify extra_specs "
"hpe3par:vvs, the qos_specs minIOPS, maxIOPS, minBWS, "
"and maxBWS settings are ignored."),
"string")
self._set_property(
properties,
"HPE:3PAR:hpe3par:flash_cache",
"Flash cache Extra-specs.",
_("Enables Flash cache setting for a volume type."),
"boolean",
default=False)
self._set_property(
properties,
"HPE:3PAR:hpe3par:provisioning",
"Storage Provisioning Extra-specs.",
_("Specifies the provisioning for a volume type."),
"string",
enum=valid_prov_values,
default="thin")
self._set_property(
properties,
"HPE:3PAR:hpe3par:compression",
"Storage Provisioning Extra-specs.",
_("Enables compression for a volume type. "
"Minimum requirement of 3par OS version is 3.3.1 "
"with SSD drives only. "
"Volume size must have > 16 GB to enable "
"compression on volume. "
"A full provisioned volume cannot be compressed."),
"boolean",
default=False)
self._set_property(
properties,
"HPE:3PAR:replication_enabled",
"Volume Replication Extra-specs.",
_("The valid value is: <is> True "
"If True, the volume is to be replicated, if supported, "
"by the backend driver. If the option is not specified or "
"false, then replication is not enabled. This option is "
"required to enable replication."),
"string",
enum=["<is> True"],
default=False)
self._set_property(
properties,
"HPE:3PAR:replication:mode",
"Replication Mode Extra-specs.",
_("Sets the replication mode for 3par."),
"string",
enum=["sync", "periodic"],
default="periodic")
self._set_property(
properties,
"HPE:3PAR:replication:sync_period",
"Sync Period for Volume Replication Extra-specs.",
_("Sets the time interval for synchronization. "
"Only needed if replication:mode is periodic."),
"integer",
default=900)
self._set_property(
properties,
"HPE:3PAR:replication:retention_count",
"Retention Count for Replication Extra-specs.",
_("Sets the number of snapshots that will be "
"saved on the primary array."),
"integer",
default=5)
self._set_property(
properties,
"HPE:3PAR:replication:remote_retention_count",
"Remote Retention Count for Replication Extra-specs.",
_("Sets the number of snapshots that will be "
"saved on the secondary array."),
"integer",
default=5)
# ###### QoS Settings ###### #
self._set_property(
properties,
"HPE:3PAR:minIOPS",
"Minimum IOPS QoS.",
_("Sets the QoS, I/O issue count minimum goal. "
"If not specified, there is no limit on I/O issue count."),
"integer")
self._set_property(
properties,
"HPE:3PAR:maxIOPS",
"Maximum IOPS QoS.",
_("Sets the QoS, I/O issue count rate limit. "
"If not specified, there is no limit on I/O issue count."),
"integer")
self._set_property(
properties,
"HPE:3PAR:minBWS",
"Minimum Bandwidth QoS.",
_("Sets the QoS, I/O issue bandwidth minimum goal. "
"If not specified, there is no limit on "
"I/O issue bandwidth rate."),
"integer")
self._set_property(
properties,
"HPE:3PAR:maxBWS",
"Maximum Bandwidth QoS.",
_("Sets the QoS, I/O issue bandwidth rate limit. "
"If not specified, there is no limit on I/O issue "
"bandwidth rate."),
"integer")
self._set_property(
properties,
"HPE:3PAR:latency",
"Latency QoS.",
_("Sets the latency goal in milliseconds."),
"integer")
self._set_property(
properties,
"HPE:3PAR:priority",
"Priority QoS.",
_("Sets the priority of the QoS rule over other rules."),
"string",
enum=["low", "normal", "high"],
default="normal")
return properties, 'HPE:3PAR'
| phenoxim/cinder | cinder/volume/drivers/hpe/hpe_3par_base.py | Python | apache-2.0 | 22,301 |
import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="arpa_linker",
version="0.6.0",
author="Erkki Heino",
description="Tool for linking rdf datasets to other datasets using ARPA",
license="MIT",
keywords="rdf",
url="http://semanticcomputing.github.io/python-arpa-linker/",
long_description=read('README.md'),
packages=['arpa_linker'],
install_requires=[
'rdflib >= 4.2.0',
'requests >= 2.7.0'
],
)
| SemanticComputing/python-arpa-linker | setup.py | Python | mit | 549 |
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# tvalacarta - XBMC Plugin
# Canal para Ecuador TV
# http://blog.tvalacarta.info/plugin-xbmc/tvalacarta/
#------------------------------------------------------------
import urlparse,re
import urllib
import os
from core import logger
from core import scrapertools
from core.item import Item
DEBUG = False
CHANNELNAME = "ecuadortv"
def isGeneric():
return True
def mainlist(item):
logger.info("tvalacarta.channels.ecuadortv mainlist")
return programas(item)
def programas(item):
logger.info("tvalacarta.channels.ecuadortv canal")
item.url = "http://www.ecuadortv.ec/television"
itemlist = []
# Descarga la página
data = scrapertools.cache_page(item.url)
'''
<div class="field field-name-field-icon field-type-image field-label-hidden">
<div class="field-items">
<div class="field-item even">
<a href="/programas/al-medios-dia">
<img typeof="foaf:Image" src="http://www.ecuadortv.ec/sites/default/files/styles/program_menu_item/public/program/almediodiaweb_0.png?itok=wv9Isyhi" width="155" height="105" alt="" />
</a>
</div>
</div>
</div>
<div class="field field-name-title field-type-ds field-label-hidden">
<div class="field-items">
<div class="field-item even" property="dc:title"
><h2>Al Medio Día </h2></div></div></div></div>
'''
# Extrae las zonas de los programas
patron = '<div class="field-item even"[^<]+'
patron += '<a href="([^"]+)"[^<]+'
patron += '<img typeof="foaf.Image" src="([^"]+)".*?'
patron += '<h2>([^<]+)</h2>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
title = scrapedtitle.strip()
url = urlparse.urljoin(item.url,scrapedurl)
thumbnail = scrapedthumbnail
plot = ""
itemlist.append( Item(channel=CHANNELNAME, title=title , url=url, thumbnail=thumbnail, plot=plot, action="episodios", show=title, folder=True) )
return itemlist
def episodios(item):
logger.info("tvalacarta.channels.ecuadortv episodios")
itemlist = []
# Descarga la página
data = scrapertools.cache_page(item.url)
'''
<div class="item-thumbnail">
<div class="field field-name-rtv-video-thumb field-type-ds field-label-hidden">
<div class="field-items">
<div class="field-item even">
<a href="/rtv/streaming/vod/46056" class="play-trigger use-ajax" data-video-id="FTb5jhfjJ-Y">
<span class="img">
<img src="http://img.youtube.com/vi/FTb5jhfjJ-Y/mqdefault.jpg" alt="" width="278" height="190" />
</span>
<span class="play-button play_big"> </span></a></div></div></div> </div>
<div class="slider_caption display_none">
<div class="field field-name-title field-type-ds field-label-hidden">
<div class="field-items">
<div class="field-item even" property="dc:title">
<h2>Palabra Amazónica - cap. 08
</h2>
</div>
</div>
</div>
<div class="field field-name-field-chapter field-type-taxonomy-term-reference field-label-above">
<div class="field-label">Capítulo:
</div>
<div class="field-items"><div class="field-item even"><span class="lineage-item lineage-item-level-0">8</span></div></div></div> </div>
'''
'''
<div class="slider_caption display_none">
<div class="field field-name-title field-type-ds field-label-hidden">
<div class="field-items">
<div class="field-item even" property="dc:title">
<h2>Ecuador Multicolor
</h2>
</div>
</div>
</div>
<div class="field field-name-rtv-description field-type-ds field-label-hidden">
<div class="field-items">
<div class="field-item even">
<p>
<span style="font-size:16px;">Cantón Pillaro - II parte</span></p>
'''
# Extrae las zonas de los videos
patron = '<div class="item-thumbnail"[^<]+'
patron += '<div class="field[^<]+'
patron += '<div class="field[^<]+'
patron += '<div class="field[^<]+'
patron += '<a href="[^"]+" class="[^"]+" data-video-id="([^"]+)"[^<]+'
patron += '<span class="img"[^<]+'
patron += '<img src="([^"]+)"[^<]+'
patron += '</span[^<]+'
patron += '<span[^<]+</span[^<]+</a[^<]+</div[^<]+</div[^<]+</div[^<]+</div[^<]+'
patron += '<div class="slider_caption[^<]+'
patron += '<div class="field[^<]+'
patron += '<div class="field[^<]+'
patron += '<div class="field[^<]+'
patron += '<h2>([^<]+)</h2'
matches = re.compile(patron,re.DOTALL).findall(data)
for youtube_id,scrapedthumbnail,scrapedtitle in matches:
title = scrapedtitle
url = "https://www.youtube.com/watch?v="+youtube_id
thumbnail = scrapedthumbnail
plot = ""
itemlist.append( Item(channel=CHANNELNAME, title=title , url=url, thumbnail=thumbnail, plot=plot, action="play", server="youtube", show=item.show, folder=False) )
return itemlist
# Verificación automática de canales: Esta función debe devolver "True" si todo está ok en el canal.
def test():
# Comprueba que la primera opción tenga algo
categorias_items = mainlist(Item())
programas_items = programas(categorias_items[0])
episodios_items = episodios(programas_items[0])
if len(episodios_items)>0:
return True
return False | uannight/reposan | plugin.video.tvalacarta/channels/ecuadortv.py | Python | gpl-2.0 | 5,395 |
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from .views import LatestView, ByView
urlpatterns = [
url(r'^latest$', LatestView.as_view(), name='history-latest'),
url(r'^by/user/(?P<user_id>\d+)$', ByView.as_view(), name='history-by-user'),
url(r'^by/ct/(?P<ct_id>\d+)$', ByView.as_view(), name='history-by-ct'),
url(r'^by/ct/(?P<ct_id>\d+)/id/(?P<id>\d+)$', ByView.as_view(), name='history-by-id'),
]
| futurice/django-history | djangohistory/urls.py | Python | bsd-3-clause | 492 |
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
from oslo_messaging._drivers.zmq_driver import zmq_async
@six.add_metaclass(abc.ABCMeta)
class BaseProxy(object):
"""Base TCP-proxy.
TCP-proxy redirects messages received by TCP from clients to servers
over IPC. Consists of TCP-frontend and IPC-backend objects. Runs
in async executor.
"""
def __init__(self, conf, context):
super(BaseProxy, self).__init__()
self.conf = conf
self.context = context
self.executor = zmq_async.get_executor(self.run,
zmq_concurrency='native')
@abc.abstractmethod
def run(self):
"""Main execution point of the proxy"""
def start(self):
self.executor.execute()
def stop(self):
self.executor.stop()
def wait(self):
self.executor.wait()
| dukhlov/oslo.messaging | oslo_messaging/_drivers/zmq_driver/broker/zmq_base_proxy.py | Python | apache-2.0 | 1,462 |
from flask import Flask
from flask import render_template
from flask import request
import json
import dbconfig
if dbconfig.test:
from mockdbhelper import MockDBHelper as DBHelper
else:
from dbhelper import DBHelper
app = Flask(__name__)
DB = DBHelper()
@app.route("/")
def home():
crimes = DB.get_all_crimes()
crimes = json.dumps(crimes)
return render_template("home.html", crimes=crimes)
@app.route("/submitcrime", methods=['POST'])
def submitcrime():
category = request.form.get("category")
date = request.form.get("date")
latitude = float(request.form.get("latitude"))
longitude = float(request.form.get("longitude"))
description = request.form.get("description")
DB.add_crime(category, date, latitude, longitude, description)
return home()
if __name__ == '__main__':
app.run(debug=True)
| nikitabrazhnik/flask2 | Module 1/Chapter07/crimemap.py | Python | mit | 851 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystone import exception
def check_length(property_name, value, min_length=1, max_length=64):
if len(value) < min_length:
if min_length == 1:
msg = "%s cannot be empty." % property_name
else:
msg = ("%(property_name)s cannot be less than "
"%(min_length)s characters.") % locals()
raise exception.ValidationError(msg)
if len(value) > max_length:
msg = ("%(property_name)s should not be greater than "
"%(max_length)s characters.") % locals()
raise exception.ValidationError(msg)
def check_type(property_name, value, expected_type, display_expected_type):
if not isinstance(value, expected_type):
msg = "%(property_name)s is not a %(display_expected_type)s" % locals()
raise exception.ValidationError(msg)
def tenant_name(name):
check_type("Tenant name", name, basestring, "string or unicode")
name = name.strip()
check_length("Tenant name", name)
return name
def user_name(name):
check_type("User name", name, basestring, "string or unicode")
name = name.strip()
check_length("User name", name)
return name
| tylertian/Openstack | openstack F/keystone/keystone/clean.py | Python | apache-2.0 | 1,803 |
# Definition of the version number
import os
from io import open as io_open
__all__ = ["__version__"]
# major, minor, patch, -extra
version_info = 4, 15, 0
# Nice string for the version
__version__ = '.'.join(map(str, version_info))
# auto -extra based on commit hash (if not tagged as release)
scriptdir = os.path.dirname(__file__)
gitdir = os.path.abspath(os.path.join(scriptdir, "..", ".git"))
if os.path.isdir(gitdir): # pragma: nocover
extra = None
# Open config file to check if we are in tqdm project
with io_open(os.path.join(gitdir, "config"), 'r') as fh_config:
if 'tqdm' in fh_config.read():
# Open the HEAD file
with io_open(os.path.join(gitdir, "HEAD"), 'r') as fh_head:
extra = fh_head.readline().strip()
# in a branch => HEAD points to file containing last commit
if 'ref:' in extra:
# reference file path
ref_file = extra[5:]
branch_name = ref_file.rsplit('/', 1)[-1]
ref_file_path = os.path.abspath(os.path.join(gitdir, ref_file))
# check that we are in git folder
# (by stripping the git folder from the ref file path)
if os.path.relpath(
ref_file_path, gitdir).replace('\\', '/') != ref_file:
# out of git folder
extra = None
else:
# open the ref file
with io_open(ref_file_path, 'r') as fh_branch:
commit_hash = fh_branch.readline().strip()
extra = commit_hash[:8]
if branch_name != "master":
extra += '.' + branch_name
# detached HEAD mode, already have commit hash
else:
extra = extra[:8]
# Append commit hash (and branch) to version string if not tagged
if extra is not None:
try:
with io_open(os.path.join(gitdir, "refs", "tags",
'v' + __version__)) as fdv:
if fdv.readline().strip()[:8] != extra[:8]:
__version__ += '-' + extra
except Exception as e:
if "No such file" not in str(e):
raise
| HesselTjeerdsma/Cyber-Physical-Pacman-Game | Algor/flask/lib/python2.7/site-packages/tqdm/_version.py | Python | apache-2.0 | 2,318 |
# -*- encoding: utf-8 -*-
from .base import *
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'temp.db', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# simple engine (dev only)
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.simple_backend.SimpleEngine',
},
}
#HAYSTACK_CONNECTIONS = {
# 'default': {
# 'BATCH_SIZE': 100,
# 'ENGINE': 'haystack.backends.elasticsearch_backend.ElasticsearchSearchEngine',
# 'INDEX_NAME': 'example_search',
# 'TIMEOUT': 60 * 5,
# 'URL': 'http://127.0.0.1:9200/',
# },
#}
| pkimber/search | example_search/dev_test.py | Python | apache-2.0 | 1,119 |
from numpy import arange, power, genfromtxt
from define_my_consts import sigma, L_CO2, L_CO2_175, s_CO2, mu_CO2, R, beta, ro_CO2, epsilon, Cp_Co2ice
def CO2_phase_diagram(Trange):
Temp = Trange[0] + arange(Trange[-1]-Trange[0]+1)
# --- CO2 phase diagram for T<-56.4C ---
Pres = 0.01316*power(10.0, -1354.210/Temp+8.69903+0.001588*Temp-(4.5107*power(10.,-6))*Temp*Temp) # find out where this is from
return Temp, Pres*1.01325*1000.0 # temperaturer is in K, pressure is in mb
def CO2_phase_diagram_one(T):
# --- CO2 phase diagram for T<-56.4C ---
Pres = 0.01316*power(10.0, -1354.210/T+8.69903+0.001588*T-(4.5107*power(10.,-6))*T*T) # find out where this is from
return Pres*1.01325*1000.0 # pressure is in mb
def CO2_eq_T(P_eq): # give pressure in Pa
T, P = CO2_phase_diagram([10, 215])
from numpy import interp
T_eq = interp(P_eq/100.0, P, T)
return T_eq
def CO2_eq_T_Konrad(Pressure): # give pressure in Pa
from math import log
d1 =100.0
d2 =3148.0
d3 =23.102
Tice = d2/(d3 - log(Pressure/d1))
return Tice
def CO2_sublime_mass(Energy, dT): # energy per unit area per time, say in W/m2
m_CO2 = Energy/(s_CO2*dT + L_CO2_175 ) * mu_CO2 # in kg/m2/time
#dz_per_m2 = m_CO2/()
return m_CO2 # in kg/m2/time slot
def CO2_condence_mass(epsilonCO2, Tsurf):
m_CO2 = sigma * epsilonCO2 * Tsurf**4 / L_CO2 # in kg/m2/time
return m_CO2 # in kg/m2/time slot
def CO2_condence_flux_Konrad(wind_speed, Tsurf, Patm):
Peq = CO2_phase_diagram_one(Tsurf)*100.0
print 'Eq. vap pressure: ', Peq, ' Tsurf =', Tsurf, ' Patm = ', Patm
CO2_flux = wind_speed*beta*(mu_CO2/R/Tsurf)*(Patm - Peq)
return CO2_flux # in kg/m2/s
def Cp_CO2(T):
if T<73 or T>200: print 'This Cp is out of its T-range for CO2 ice!'
Cp = 349.0 + 4.8*T # from Washburn 1948
return Cp # in J kg-1 K-1
def CO2_flux_cond(Tsurf, P):
Tsub = CO2_eq_T(P)
dT = Tsurf - Tsub
dm = dT/L_CO2*Cp_CO2(Tsub)
return dm
| portyankina/little_helpers | CO2_phase_diagram.py | Python | bsd-3-clause | 2,025 |
from frowns import Smiles
mol = Smiles.smilin("c1ccccc1CCC1CC1")
index = 0
for cycle in mol.cycles:
print "cycle", index
print "\t", cycle.atoms
print "\t", cycle.bonds
index = index + 1
| tuffery/Frog2 | frowns/docs/examples/example8.py | Python | gpl-3.0 | 205 |
__author__ = 'odrulea'
from lib.devices import get_supported_metrics, get_supported_devices, RABBITMQ_ADDRESS, MOCK_DEVICE_ID
import argparse
import imp
import os
import yaml
import time
import threading
from lib.constants import colors
_SUPPORTED_DEVICES = get_supported_devices()
_SUPPORTED_METRICS = get_supported_metrics()
class AnalysisService(object):
"""
Subscribes and writes data to a file
Only supports Pika communication method for now, not pipes
"""
LOGNAME = "[Analysis Service] "
def __init__(self, device_name, device_id, rabbitmq_address=None, conf_path=None):
if rabbitmq_address is None:
raise ValueError(colors.FAIL + self.LOGNAME + "Pika subscriber needs to have a rabbitmq address!" + colors.ENDC)
# set vars
self.device_name = device_name
self.device_id = device_id
self.rabbitmq_address = rabbitmq_address
self.conf_path = conf_path
self.debug = False
# local relative filepath, used to load config file and to dynamically load classes
self.location = os.path.realpath( os.path.join(os.getcwd(), os.path.dirname(__file__)) )
# this will hold the config yaml info as an array
self.conf = None
# an intenral registry of all module threads running
self.modules = {}
# setup more vars
self.setup()
def setup(self):
# get config from yaml file (default = ./conf.yml)
# config file path is relative to root folder
settings_file_path = os.path.realpath( os.path.join( os.getcwdu(), self.conf_path) )
stream = file(settings_file_path, 'r')
# set local conf property from the yaml config file
self.conf = yaml.load(stream)
def start(self):
print colors.SILVER + self.LOGNAME + "Collecting data ... Ctl-C to stop." + colors.ENDC
# loop through each module and start them
# passing the settings from conf file to each
if "modules" in self.conf and len(self.conf["modules"]):
module_i = 0;
for module_conf in self.conf["modules"]:
# start each module
self.launchModule(module_conf, self.device_name, self.device_id, self.rabbitmq_address, module_i)
module_i += 1
else:
print self.LOGNAME + "No modules defined for analysis"
# this is here so that child threads can run
while True:
time.sleep(1)
def launchModule(self, module_conf, device_name, device_id, rabbitmq_address, module_index):
# module classname is required
if 'class' in module_conf:
moduleClassName = module_conf['class']
else:
raise ValueError(colors.FAIL + self.LOGNAME + "ERROR: class not defined for module: " + str(module_conf) + colors.ENDC)
# get module parameters for any operation at the service level (optional)
if 'settings' in module_conf:
module_settings = module_conf['settings']
# debug output
if 'debug' in module_settings and module_settings['debug'] == True:
# if any of the modules have debug turned on, turn on the service debug too
self.debug = True
#print colors.CYAN + \
# "\nModule: " + moduleClassName + "\n" \
# "Configuration: " + json.dumps(module_conf, indent=2) + "\n" + colors.ENDC
module_id = None
if 'id' in module_conf:
module_id = module_conf['id']
# dynamically import the module
module_filepath = os.path.join(self.location + "/modules", moduleClassName+'.py')
py_mod = imp.load_source(moduleClassName, module_filepath)
# instantiate the imported module
moduleInstance = getattr(py_mod, moduleClassName)(device_name=device_name, device_id=device_id,
rabbitmq_address=rabbitmq_address, module_conf=module_conf,
global_conf=self.conf, module_index=module_index)
# all modules should implement start() and stop()
thread = threading.Thread(target=moduleInstance.start)
thread.daemon = True
# assign the thread to internal registry and start it up
self.modules[module_id] = thread
self.modules[module_id].start()
#moduleInstance.stop()
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--device_id', required=True,
help="A unique ID to identify the device you are sending data from. "
"For example: 'octopicorn2015'")
parser.add_argument('-n', '--device_name', required=True,
help="The name of the device your are sending data from. "
"Supported devices are: %s" % _SUPPORTED_DEVICES)
parser.add_argument('-c', '--cloudbrain', default=RABBITMQ_ADDRESS,
help="The address of the CloudBrain instance you are sending data to.\n"
"Use " + RABBITMQ_ADDRESS + " to send data to our hosted service. \n"
"Otherwise use 'localhost' if running CloudBrain locally")
opts = parser.parse_args()
return opts
def main():
opts = parse_args()
device_name = opts.device_name
device_id = opts.device_id
cloudbrain_address = opts.cloudbrain
run(device_name,
device_id,
cloudbrain_address
)
def run(device_name='muse',
device_id=MOCK_DEVICE_ID,
cloudbrain_address=RABBITMQ_ADDRESS
):
service = AnalysisService(device_name=device_name,
device_id=device_id,
rabbitmq_address=cloudbrain_address
)
service.start()
if __name__ == "__main__":
main()
| octopicorn/bcikit | Analysis/AnalysisService.py | Python | agpl-3.0 | 5,988 |
"""
URLs for LMS
"""
from django.conf import settings
from django.conf.urls import patterns, include, url
from django.views.generic.base import RedirectView
from ratelimitbackend import admin
from django.conf.urls.static import static
import auth_exchange.views
from courseware.views.views import EnrollStaffView
from config_models.views import ConfigurationModelCurrentAPIView
from courseware.views.index import CoursewareIndex
from openedx.core.djangoapps.programs.models import ProgramsApiConfig
from openedx.core.djangoapps.self_paced.models import SelfPacedConfiguration
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from student.views import LogoutView
# Uncomment the next two lines to enable the admin:
if settings.DEBUG or settings.FEATURES.get('ENABLE_DJANGO_ADMIN_SITE'):
admin.autodiscover()
# Use urlpatterns formatted as within the Django docs with first parameter "stuck" to the open parenthesis
urlpatterns = (
'',
url(r'^$', 'branding.views.index', name="root"), # Main marketing page, or redirect to courseware
url(r'^dashboard$', 'student.views.dashboard', name="dashboard"),
url(r'^login_ajax$', 'student.views.login_user', name="login"),
url(r'^login_ajax/(?P<error>[^/]*)$', 'student.views.login_user'),
url(r'^email_confirm/(?P<key>[^/]*)$', 'student.views.confirm_email_change'),
url(r'^event$', 'track.views.user_track'),
url(r'^performance$', 'performance.views.performance_log'),
url(r'^segmentio/event$', 'track.views.segmentio.segmentio_event'),
# TODO: Is this used anymore? What is STATIC_GRAB?
url(r'^t/(?P<template>[^/]*)$', 'static_template_view.views.index'),
url(r'^accounts/manage_user_standing', 'student.views.manage_user_standing',
name='manage_user_standing'),
url(r'^accounts/disable_account_ajax$', 'student.views.disable_account_ajax',
name="disable_account_ajax"),
url(r'^logout$', LogoutView.as_view(), name='logout'),
url(r'^create_account$', 'student.views.create_account', name='create_account'),
url(r'^activate/(?P<key>[^/]*)$', 'student.views.activate_account', name="activate"),
url(r'^password_reset/$', 'student.views.password_reset', name='password_reset'),
## Obsolete Django views for password resets
## TODO: Replace with Mako-ized views
url(r'^password_change/$', 'django.contrib.auth.views.password_change',
name='password_change'),
url(r'^password_change_done/$', 'django.contrib.auth.views.password_change_done',
name='password_change_done'),
url(r'^password_reset_confirm/(?P<uidb36>[0-9A-Za-z]+)-(?P<token>.+)/$',
'student.views.password_reset_confirm_wrapper',
name='password_reset_confirm'),
url(r'^password_reset_complete/$', 'django.contrib.auth.views.password_reset_complete',
name='password_reset_complete'),
url(r'^password_reset_done/$', 'django.contrib.auth.views.password_reset_done',
name='password_reset_done'),
url(r'^heartbeat$', include('heartbeat.urls')),
# Note: these are older versions of the User API that will eventually be
# subsumed by api/user listed below.
url(r'^user_api/', include('openedx.core.djangoapps.user_api.legacy_urls')),
url(r'^notifier_api/', include('notifier_api.urls')),
url(r'^i18n/', include('django.conf.urls.i18n')),
# Feedback Form endpoint
url(r'^submit_feedback$', 'util.views.submit_feedback'),
# Enrollment API RESTful endpoints
url(r'^api/enrollment/v1/', include('enrollment.urls')),
# Courseware search endpoints
url(r'^search/', include('search.urls')),
# Course content API
url(r'^api/course_structure/', include('course_structure_api.urls', namespace='course_structure_api')),
# Course API
url(r'^api/courses/', include('course_api.urls')),
# User API endpoints
url(r'^api/user/', include('openedx.core.djangoapps.user_api.urls')),
# Bookmarks API endpoints
url(r'^api/bookmarks/', include('openedx.core.djangoapps.bookmarks.urls')),
# Profile Images API endpoints
url(r'^api/profile_images/', include('openedx.core.djangoapps.profile_images.urls')),
# Video Abstraction Layer used to allow video teams to manage video assets
# independently of courseware. https://github.com/edx/edx-val
url(r'^api/val/v0/', include('edxval.urls')),
url(r'^api/commerce/', include('commerce.api.urls', namespace='commerce_api')),
url(r'^api/credit/', include('openedx.core.djangoapps.credit.urls', app_name="credit", namespace='credit')),
url(r'^rss_proxy/', include('rss_proxy.urls', namespace='rss_proxy')),
url(r'^api/organizations/', include('organizations.urls', namespace='organizations')),
# Update session view
url(r'^lang_pref/session_language', 'lang_pref.views.update_session_language', name='session_language'),
# Multiple course modes and identity verification
# TODO Namespace these!
url(r'^course_modes/', include('course_modes.urls')),
url(r'^verify_student/', include('verify_student.urls')),
# URLs for API access management
url(r'^api-admin/', include('openedx.core.djangoapps.api_admin.urls', namespace='api_admin')),
)
urlpatterns += (
url(r'^dashboard/', include('learner_dashboard.urls')),
)
if settings.FEATURES["ENABLE_COMBINED_LOGIN_REGISTRATION"]:
# Backwards compatibility with old URL structure, but serve the new views
urlpatterns += (
url(r'^login$', 'student_account.views.login_and_registration_form',
{'initial_mode': 'login'}, name="signin_user"),
url(r'^register$', 'student_account.views.login_and_registration_form',
{'initial_mode': 'register'}, name="register_user"),
)
else:
# Serve the old views
urlpatterns += (
url(r'^login$', 'student.views.signin_user', name="signin_user"),
url(r'^register$', 'student.views.register_user', name="register_user"),
)
if settings.FEATURES["ENABLE_MOBILE_REST_API"]:
urlpatterns += (
url(r'^api/mobile/v0.5/', include('mobile_api.urls')),
)
if settings.FEATURES["ENABLE_OPENBADGES"]:
urlpatterns += (
url(r'^api/badges/v1/', include('badges.api.urls', app_name="badges", namespace="badges_api")),
)
js_info_dict = {
'domain': 'djangojs',
# We need to explicitly include external Django apps that are not in LOCALE_PATHS.
'packages': ('openassessment',),
}
# sysadmin dashboard, to see what courses are loaded, to delete & load courses
if settings.FEATURES["ENABLE_SYSADMIN_DASHBOARD"]:
urlpatterns += (
url(r'^sysadmin/', include('dashboard.sysadmin_urls')),
)
urlpatterns += (
url(r'^support/', include('support.urls', app_name="support", namespace='support')),
)
# Semi-static views (these need to be rendered and have the login bar, but don't change)
urlpatterns += (
url(r'^404$', 'static_template_view.views.render',
{'template': '404.html'}, name="404"),
)
# Favicon
favicon_path = configuration_helpers.get_value('favicon_path', settings.FAVICON_PATH) # pylint: disable=invalid-name
urlpatterns += (url(
r'^favicon\.ico$',
RedirectView.as_view(url=settings.STATIC_URL + favicon_path, permanent=True)
),)
# Semi-static views only used by edX, not by themes
if not settings.FEATURES["USE_CUSTOM_THEME"]:
urlpatterns += (
url(r'^blog$', 'static_template_view.views.render',
{'template': 'blog.html'}, name="blog"),
url(r'^contact$', 'static_template_view.views.render',
{'template': 'contact.html'}, name="contact"),
url(r'^donate$', 'static_template_view.views.render',
{'template': 'donate.html'}, name="donate"),
url(r'^faq$', 'static_template_view.views.render',
{'template': 'faq.html'}, name="faq"),
url(r'^help$', 'static_template_view.views.render',
{'template': 'help.html'}, name="help_edx"),
url(r'^jobs$', 'static_template_view.views.render',
{'template': 'jobs.html'}, name="jobs"),
url(r'^news$', 'static_template_view.views.render',
{'template': 'news.html'}, name="news"),
url(r'^press$', 'static_template_view.views.render',
{'template': 'press.html'}, name="press"),
url(r'^media-kit$', 'static_template_view.views.render',
{'template': 'media-kit.html'}, name="media-kit"),
url(r'^copyright$', 'static_template_view.views.render',
{'template': 'copyright.html'}, name="copyright"),
# Press releases
url(r'^press/([_a-zA-Z0-9-]+)$', 'static_template_view.views.render_press_release', name='press_release'),
)
# Only enable URLs for those marketing links actually enabled in the
# settings. Disable URLs by marking them as None.
for key, value in settings.MKTG_URL_LINK_MAP.items():
# Skip disabled URLs
if value is None:
continue
# These urls are enabled separately
if key == "ROOT" or key == "COURSES":
continue
# The MKTG_URL_LINK_MAP key specifies the template filename
template = key.lower()
if '.' not in template:
# Append STATIC_TEMPLATE_VIEW_DEFAULT_FILE_EXTENSION if
# no file extension was specified in the key
template = "%s.%s" % (template, settings.STATIC_TEMPLATE_VIEW_DEFAULT_FILE_EXTENSION)
# To allow theme templates to inherit from default templates,
# prepend a standard prefix
if settings.FEATURES["USE_CUSTOM_THEME"]:
template = "theme-" + template
# Make the assumption that the URL we want is the lowercased
# version of the map key
urlpatterns += (url(r'^%s$' % key.lower(),
'static_template_view.views.render',
{'template': template}, name=value),)
# Multicourse wiki (Note: wiki urls must be above the courseware ones because of
# the custom tab catch-all)
if settings.WIKI_ENABLED:
from wiki.urls import get_pattern as wiki_pattern
from django_notify.urls import get_pattern as notify_pattern
urlpatterns += (
# First we include views from course_wiki that we use to override the default views.
# They come first in the urlpatterns so they get resolved first
url('^wiki/create-root/$', 'course_wiki.views.root_create', name='root_create'),
url(r'^wiki/', include(wiki_pattern())),
url(r'^notify/', include(notify_pattern())),
# These urls are for viewing the wiki in the context of a course. They should
# never be returned by a reverse() so they come after the other url patterns
url(r'^courses/{}/course_wiki/?$'.format(settings.COURSE_ID_PATTERN),
'course_wiki.views.course_wiki_redirect', name="course_wiki"),
url(r'^courses/{}/wiki/'.format(settings.COURSE_KEY_REGEX), include(wiki_pattern())),
)
COURSE_URLS = patterns(
'',
url(
r'^look_up_registration_code$',
'instructor.views.registration_codes.look_up_registration_code',
name='look_up_registration_code',
),
url(
r'^registration_code_details$',
'instructor.views.registration_codes.registration_code_details',
name='registration_code_details',
),
)
urlpatterns += (
# jump_to URLs for direct access to a location in the course
url(
r'^courses/{}/jump_to/(?P<location>.*)$'.format(
settings.COURSE_ID_PATTERN,
),
'courseware.views.views.jump_to',
name='jump_to',
),
url(
r'^courses/{}/jump_to_id/(?P<module_id>.*)$'.format(
settings.COURSE_ID_PATTERN,
),
'courseware.views.views.jump_to_id',
name='jump_to_id',
),
# xblock Handler APIs
url(
r'^courses/{course_key}/xblock/{usage_key}/handler/(?P<handler>[^/]*)(?:/(?P<suffix>.*))?$'.format(
course_key=settings.COURSE_ID_PATTERN,
usage_key=settings.USAGE_ID_PATTERN,
),
'courseware.module_render.handle_xblock_callback',
name='xblock_handler',
),
url(
r'^courses/{course_key}/xblock/{usage_key}/handler_noauth/(?P<handler>[^/]*)(?:/(?P<suffix>.*))?$'.format(
course_key=settings.COURSE_ID_PATTERN,
usage_key=settings.USAGE_ID_PATTERN,
),
'courseware.module_render.handle_xblock_callback_noauth',
name='xblock_handler_noauth',
),
# xblock View API
# (unpublished) API that returns JSON with the HTML fragment and related resources
# for the xBlock's requested view.
url(
r'^courses/{course_key}/xblock/{usage_key}/view/(?P<view_name>[^/]*)$'.format(
course_key=settings.COURSE_ID_PATTERN,
usage_key=settings.USAGE_ID_PATTERN,
),
'courseware.module_render.xblock_view',
name='xblock_view',
),
# xblock Rendering View URL
# URL to provide an HTML view of an xBlock. The view type (e.g., student_view) is
# passed as a "view" parameter to the URL.
# Note: This is not an API. Compare this with the xblock_view API above.
url(
r'^xblock/{usage_key_string}$'.format(usage_key_string=settings.USAGE_KEY_PATTERN),
'courseware.views.views.render_xblock',
name='render_xblock',
),
# xblock Resource URL
url(
r'xblock/resource/(?P<block_type>[^/]+)/(?P<uri>.*)$',
'openedx.core.djangoapps.common_views.xblock.xblock_resource',
name='xblock_resource_url',
),
url(
r'^courses/{}/xqueue/(?P<userid>[^/]*)/(?P<mod_id>.*?)/(?P<dispatch>[^/]*)$'.format(
settings.COURSE_ID_PATTERN,
),
'courseware.module_render.xqueue_callback',
name='xqueue_callback',
),
url(
r'^change_setting$',
'student.views.change_setting',
name='change_setting',
),
# TODO: These views need to be updated before they work
url(r'^calculate$', 'util.views.calculate'),
url(r'^courses/?$', 'branding.views.courses', name="courses"),
url(
r'^change_enrollment$',
'student.views.change_enrollment',
name='change_enrollment',
),
url(
r'^change_email_settings$',
'student.views.change_email_settings',
name='change_email_settings',
),
#About the course
url(
r'^courses/{}/about$'.format(
settings.COURSE_ID_PATTERN,
),
'courseware.views.views.course_about',
name='about_course',
),
url(
r'^courses/{}/enroll_staff$'.format(
settings.COURSE_ID_PATTERN,
),
EnrollStaffView.as_view(),
name='enroll_staff',
),
#Inside the course
url(
r'^courses/{}/$'.format(
settings.COURSE_ID_PATTERN,
),
'courseware.views.views.course_info',
name='course_root',
),
url(
r'^courses/{}/info$'.format(
settings.COURSE_ID_PATTERN,
),
'courseware.views.views.course_info',
name='info',
),
# TODO arjun remove when custom tabs in place, see courseware/courses.py
url(
r'^courses/{}/syllabus$'.format(
settings.COURSE_ID_PATTERN,
),
'courseware.views.views.syllabus',
name='syllabus',
),
# Survey associated with a course
url(
r'^courses/{}/survey$'.format(
settings.COURSE_ID_PATTERN,
),
'courseware.views.views.course_survey',
name='course_survey',
),
url(
r'^courses/{}/book/(?P<book_index>\d+)/$'.format(
settings.COURSE_ID_PATTERN,
),
'staticbook.views.index',
name='book',
),
url(
r'^courses/{}/book/(?P<book_index>\d+)/(?P<page>\d+)$'.format(
settings.COURSE_ID_PATTERN,
),
'staticbook.views.index',
name='book',
),
url(
r'^courses/{}/pdfbook/(?P<book_index>\d+)/$'.format(
settings.COURSE_ID_PATTERN,
),
'staticbook.views.pdf_index',
name='pdf_book',
),
url(
r'^courses/{}/pdfbook/(?P<book_index>\d+)/(?P<page>\d+)$'.format(
settings.COURSE_ID_PATTERN,
),
'staticbook.views.pdf_index',
name='pdf_book',
),
url(
r'^courses/{}/pdfbook/(?P<book_index>\d+)/chapter/(?P<chapter>\d+)/$'.format(
settings.COURSE_ID_PATTERN,
),
'staticbook.views.pdf_index',
name='pdf_book',
),
url(
r'^courses/{}/pdfbook/(?P<book_index>\d+)/chapter/(?P<chapter>\d+)/(?P<page>\d+)$'.format(
settings.COURSE_ID_PATTERN,
),
'staticbook.views.pdf_index',
name='pdf_book',
),
url(
r'^courses/{}/htmlbook/(?P<book_index>\d+)/$'.format(
settings.COURSE_ID_PATTERN,
),
'staticbook.views.html_index',
name='html_book',
),
url(
r'^courses/{}/htmlbook/(?P<book_index>\d+)/chapter/(?P<chapter>\d+)/$'.format(
settings.COURSE_ID_PATTERN,
),
'staticbook.views.html_index',
name='html_book',
),
url(
r'^courses/{}/courseware/?$'.format(
settings.COURSE_ID_PATTERN,
),
CoursewareIndex.as_view(),
name='courseware',
),
url(
r'^courses/{}/courseware/(?P<chapter>[^/]*)/$'.format(
settings.COURSE_ID_PATTERN,
),
CoursewareIndex.as_view(),
name='courseware_chapter',
),
url(
r'^courses/{}/courseware/(?P<chapter>[^/]*)/(?P<section>[^/]*)/$'.format(
settings.COURSE_ID_PATTERN,
),
CoursewareIndex.as_view(),
name='courseware_section',
),
url(
r'^courses/{}/courseware/(?P<chapter>[^/]*)/(?P<section>[^/]*)/(?P<position>[^/]*)/?$'.format(
settings.COURSE_ID_PATTERN,
),
CoursewareIndex.as_view(),
name='courseware_position',
),
url(
r'^courses/{}/progress$'.format(
settings.COURSE_ID_PATTERN,
),
'courseware.views.views.progress',
name='progress',
),
# Takes optional student_id for instructor use--shows profile as that student sees it.
url(
r'^courses/{}/progress/(?P<student_id>[^/]*)/$'.format(
settings.COURSE_ID_PATTERN,
),
'courseware.views.views.progress',
name='student_progress',
),
# For the instructor
url(
r'^courses/{}/instructor$'.format(
settings.COURSE_ID_PATTERN,
),
'instructor.views.instructor_dashboard.instructor_dashboard_2',
name='instructor_dashboard',
),
url(
r'^courses/{}/set_course_mode_price$'.format(
settings.COURSE_ID_PATTERN,
),
'instructor.views.instructor_dashboard.set_course_mode_price',
name='set_course_mode_price',
),
url(
r'^courses/{}/instructor/api/'.format(
settings.COURSE_ID_PATTERN,
),
include('instructor.views.api_urls')),
url(
r'^courses/{}/remove_coupon$'.format(
settings.COURSE_ID_PATTERN,
),
'instructor.views.coupons.remove_coupon',
name='remove_coupon',
),
url(
r'^courses/{}/add_coupon$'.format(
settings.COURSE_ID_PATTERN,
),
'instructor.views.coupons.add_coupon',
name='add_coupon',
),
url(
r'^courses/{}/update_coupon$'.format(
settings.COURSE_ID_PATTERN,
),
'instructor.views.coupons.update_coupon',
name='update_coupon',
),
url(
r'^courses/{}/get_coupon_info$'.format(
settings.COURSE_ID_PATTERN,
),
'instructor.views.coupons.get_coupon_info',
name='get_coupon_info',
),
url(
r'^courses/{}/'.format(
settings.COURSE_ID_PATTERN,
),
include(COURSE_URLS)
),
# Cohorts management
url(
r'^courses/{}/cohorts/settings$'.format(
settings.COURSE_KEY_PATTERN,
),
'openedx.core.djangoapps.course_groups.views.course_cohort_settings_handler',
name='course_cohort_settings',
),
url(
r'^courses/{}/cohorts/(?P<cohort_id>[0-9]+)?$'.format(
settings.COURSE_KEY_PATTERN,
),
'openedx.core.djangoapps.course_groups.views.cohort_handler',
name='cohorts',
),
url(
r'^courses/{}/cohorts/(?P<cohort_id>[0-9]+)$'.format(
settings.COURSE_KEY_PATTERN,
),
'openedx.core.djangoapps.course_groups.views.users_in_cohort',
name='list_cohort',
),
url(
r'^courses/{}/cohorts/(?P<cohort_id>[0-9]+)/add$'.format(
settings.COURSE_KEY_PATTERN,
),
'openedx.core.djangoapps.course_groups.views.add_users_to_cohort',
name='add_to_cohort',
),
url(
r'^courses/{}/cohorts/(?P<cohort_id>[0-9]+)/delete$'.format(
settings.COURSE_KEY_PATTERN,
),
'openedx.core.djangoapps.course_groups.views.remove_user_from_cohort',
name='remove_from_cohort',
),
url(
r'^courses/{}/cohorts/debug$'.format(
settings.COURSE_KEY_PATTERN,
),
'openedx.core.djangoapps.course_groups.views.debug_cohort_mgmt',
name='debug_cohort_mgmt',
),
url(
r'^courses/{}/cohorts/topics$'.format(
settings.COURSE_KEY_PATTERN,
),
'openedx.core.djangoapps.course_groups.views.cohort_discussion_topics',
name='cohort_discussion_topics',
),
url(
r'^courses/{}/verified_track_content/settings'.format(
settings.COURSE_KEY_PATTERN,
),
'verified_track_content.views.cohorting_settings',
name='verified_track_cohorting',
),
url(
r'^courses/{}/notes$'.format(
settings.COURSE_ID_PATTERN,
),
'notes.views.notes',
name='notes',
),
url(
r'^courses/{}/notes/'.format(
settings.COURSE_ID_PATTERN,
),
include('notes.urls')
),
# LTI endpoints listing
url(
r'^courses/{}/lti_rest_endpoints/'.format(
settings.COURSE_ID_PATTERN,
),
'courseware.views.views.get_course_lti_endpoints',
name='lti_rest_endpoints',
),
# Student account
url(
r'^account/',
include('student_account.urls')
),
# Student profile
url(
r'^u/(?P<username>[\w.@+-]+)$',
'student_profile.views.learner_profile',
name='learner_profile',
),
# Student Notes
url(
r'^courses/{}/edxnotes'.format(
settings.COURSE_ID_PATTERN,
),
include('edxnotes.urls'),
name='edxnotes_endpoints',
),
url(
r'^api/branding/v1/',
include('branding.api_urls')
),
)
if settings.FEATURES["ENABLE_TEAMS"]:
# Teams endpoints
urlpatterns += (
url(
r'^api/team/',
include('lms.djangoapps.teams.api_urls')
),
url(
r'^courses/{}/teams'.format(
settings.COURSE_ID_PATTERN,
),
include('lms.djangoapps.teams.urls'),
name='teams_endpoints',
),
)
# allow course staff to change to student view of courseware
if settings.FEATURES.get('ENABLE_MASQUERADE'):
urlpatterns += (
url(
r'^courses/{}/masquerade$'.format(
settings.COURSE_KEY_PATTERN,
),
'courseware.masquerade.handle_ajax',
name='masquerade_update',
),
)
urlpatterns += (
url(
r'^courses/{}/generate_user_cert'.format(
settings.COURSE_ID_PATTERN,
),
'courseware.views.views.generate_user_cert',
name='generate_user_cert',
),
)
# discussion forums live within courseware, so courseware must be enabled first
if settings.FEATURES.get('ENABLE_DISCUSSION_SERVICE'):
urlpatterns += (
url(
r'^api/discussion/',
include('discussion_api.urls')
),
url(
r'^courses/{}/discussion/'.format(
settings.COURSE_ID_PATTERN,
),
include('django_comment_client.urls')
),
url(
r'^notification_prefs/enable/',
'notification_prefs.views.ajax_enable'
),
url(
r'^notification_prefs/disable/',
'notification_prefs.views.ajax_disable'
),
url(
r'^notification_prefs/status/',
'notification_prefs.views.ajax_status'
),
url(
r'^notification_prefs/unsubscribe/(?P<token>[a-zA-Z0-9-_=]+)/',
'notification_prefs.views.set_subscription',
{
'subscribe': False,
},
name='unsubscribe_forum_update',
),
url(
r'^notification_prefs/resubscribe/(?P<token>[a-zA-Z0-9-_=]+)/',
'notification_prefs.views.set_subscription',
{
'subscribe': True,
},
name='resubscribe_forum_update',
),
)
urlpatterns += (
# This MUST be the last view in the courseware--it's a catch-all for custom tabs.
url(
r'^courses/{}/(?P<tab_slug>[^/]+)/$'.format(
settings.COURSE_ID_PATTERN,
),
'courseware.views.views.static_tab',
name='static_tab',
),
)
if settings.FEATURES.get('ENABLE_STUDENT_HISTORY_VIEW'):
urlpatterns += (
url(
r'^courses/{}/submission_history/(?P<student_username>[^/]*)/(?P<location>.*?)$'.format(
settings.COURSE_ID_PATTERN
),
'courseware.views.views.submission_history',
name='submission_history',
),
)
if settings.FEATURES.get('CLASS_DASHBOARD'):
urlpatterns += (
url(r'^class_dashboard/', include('class_dashboard.urls')),
)
if settings.DEBUG or settings.FEATURES.get('ENABLE_DJANGO_ADMIN_SITE'):
## Jasmine and admin
urlpatterns += (url(r'^admin/', include(admin.site.urls)),)
if settings.FEATURES.get('AUTH_USE_OPENID'):
urlpatterns += (
url(r'^openid/login/$', 'django_openid_auth.views.login_begin', name='openid-login'),
url(r'^openid/complete/$', 'external_auth.views.openid_login_complete', name='openid-complete'),
url(r'^openid/logo.gif$', 'django_openid_auth.views.logo', name='openid-logo'),
)
if settings.FEATURES.get('AUTH_USE_SHIB'):
urlpatterns += (
url(r'^shib-login/$', 'external_auth.views.shib_login', name='shib-login'),
)
if settings.FEATURES.get('AUTH_USE_CAS'):
urlpatterns += (
url(r'^cas-auth/login/$', 'external_auth.views.cas_login', name="cas-login"),
url(r'^cas-auth/logout/$', 'django_cas.views.logout', {'next_page': '/'}, name="cas-logout"),
)
if settings.FEATURES.get('RESTRICT_ENROLL_BY_REG_METHOD'):
urlpatterns += (
url(r'^course_specific_login/{}/$'.format(settings.COURSE_ID_PATTERN),
'external_auth.views.course_specific_login', name='course-specific-login'),
url(r'^course_specific_register/{}/$'.format(settings.COURSE_ID_PATTERN),
'external_auth.views.course_specific_register', name='course-specific-register'),
)
# Shopping cart
urlpatterns += (
url(r'^shoppingcart/', include('shoppingcart.urls')),
url(r'^commerce/', include('commerce.urls', namespace='commerce')),
)
# Embargo
if settings.FEATURES.get('EMBARGO'):
urlpatterns += (
url(r'^embargo/', include('embargo.urls')),
)
# Survey Djangoapp
urlpatterns += (
url(r'^survey/', include('survey.urls')),
)
if settings.FEATURES.get('AUTH_USE_OPENID_PROVIDER'):
urlpatterns += (
url(r'^openid/provider/login/$', 'external_auth.views.provider_login', name='openid-provider-login'),
url(
r'^openid/provider/login/(?:.+)$',
'external_auth.views.provider_identity',
name='openid-provider-login-identity'
),
url(r'^openid/provider/identity/$', 'external_auth.views.provider_identity', name='openid-provider-identity'),
url(r'^openid/provider/xrds/$', 'external_auth.views.provider_xrds', name='openid-provider-xrds')
)
if settings.FEATURES.get('ENABLE_OAUTH2_PROVIDER'):
urlpatterns += (
# These URLs dispatch to django-oauth-toolkit or django-oauth2-provider as appropriate.
# Developers should use these routes, to maintain compatibility for existing client code
url(r'^oauth2/', include('lms.djangoapps.oauth_dispatch.urls')),
# These URLs contain the django-oauth2-provider default behavior. It exists to provide
# URLs for django-oauth2-provider to call using reverse() with the oauth2 namespace, and
# also to maintain support for views that have not yet been wrapped in dispatch views.
url(r'^oauth2/', include('edx_oauth2_provider.urls', namespace='oauth2')),
# The /_o/ prefix exists to provide a target for code in django-oauth-toolkit that
# uses reverse() with the 'oauth2_provider' namespace. Developers should not access these
# views directly, but should rather use the wrapped views at /oauth2/
url(r'^_o/', include('oauth2_provider.urls', namespace='oauth2_provider')),
)
if settings.FEATURES.get('ENABLE_LMS_MIGRATION'):
urlpatterns += (
url(r'^migrate/modules$', 'lms_migration.migrate.manage_modulestores'),
url(r'^migrate/reload/(?P<reload_dir>[^/]+)$', 'lms_migration.migrate.manage_modulestores'),
url(
r'^migrate/reload/(?P<reload_dir>[^/]+)/(?P<commit_id>[^/]+)$',
'lms_migration.migrate.manage_modulestores'
),
url(r'^gitreload$', 'lms_migration.migrate.gitreload'),
url(r'^gitreload/(?P<reload_dir>[^/]+)$', 'lms_migration.migrate.gitreload'),
)
if settings.FEATURES.get('ENABLE_SQL_TRACKING_LOGS'):
urlpatterns += (
url(r'^event_logs$', 'track.views.view_tracking_log'),
url(r'^event_logs/(?P<args>.+)$', 'track.views.view_tracking_log'),
)
if settings.FEATURES.get('ENABLE_SERVICE_STATUS'):
urlpatterns += (
url(r'^status/', include('service_status.urls')),
)
if settings.FEATURES.get('ENABLE_INSTRUCTOR_BACKGROUND_TASKS'):
urlpatterns += (
url(
r'^instructor_task_status/$',
'instructor_task.views.instructor_task_status',
name='instructor_task_status'
),
)
if settings.FEATURES.get('RUN_AS_ANALYTICS_SERVER_ENABLED'):
urlpatterns += (
url(r'^edinsights_service/', include('edinsights.core.urls')),
)
if settings.FEATURES.get('ENABLE_DEBUG_RUN_PYTHON'):
urlpatterns += (
url(r'^debug/run_python$', 'debug.views.run_python'),
)
urlpatterns += (
url(r'^debug/show_parameters$', 'debug.views.show_parameters'),
)
# enable automatic login
if settings.FEATURES.get('AUTOMATIC_AUTH_FOR_TESTING'):
urlpatterns += (
url(r'^auto_auth$', 'student.views.auto_auth'),
)
# Third-party auth.
if settings.FEATURES.get('ENABLE_THIRD_PARTY_AUTH'):
urlpatterns += (
url(r'', include('third_party_auth.urls')),
url(r'api/third_party_auth/', include('third_party_auth.api.urls')),
# NOTE: The following login_oauth_token endpoint is DEPRECATED.
# Please use the exchange_access_token endpoint instead.
url(r'^login_oauth_token/(?P<backend>[^/]+)/$', 'student.views.login_oauth_token'),
)
# OAuth token exchange
if settings.FEATURES.get('ENABLE_OAUTH2_PROVIDER'):
urlpatterns += (
url(
r'^oauth2/login/$',
auth_exchange.views.LoginWithAccessTokenView.as_view(),
name="login_with_access_token"
),
)
urlpatterns += (
url(r'^labster_license/', include('labster_course_license.webhook_urls')),
url(r'^labster/api/users/', include('labster_course_license.user_urls')),
)
urlpatterns += (
url(r'^vouchers/', include('labster_vouchers.urls')),
url(r'^labster/api/', include('lms.djangoapps.labster_enroll.urls')),
)
# Certificates
urlpatterns += (
url(r'^certificates/', include('certificates.urls', app_name="certificates", namespace="certificates")),
# Backwards compatibility with XQueue, which uses URLs that are not prefixed with /certificates/
url(r'^update_certificate$', 'certificates.views.update_certificate'),
url(r'^update_example_certificate$', 'certificates.views.update_example_certificate'),
url(r'^request_certificate$', 'certificates.views.request_certificate'),
# REST APIs
url(r'^api/certificates/',
include('lms.djangoapps.certificates.apis.urls', namespace='certificates_api')),
)
urlpatterns += (
url(r'^courses/{}/'.format(settings.COURSE_ID_PATTERN), include('labster_course_license.urls')),
)
# XDomain proxy
urlpatterns += (
url(r'^xdomain_proxy.html$', 'cors_csrf.views.xdomain_proxy', name='xdomain_proxy'),
)
# Custom courses on edX (CCX) URLs
if settings.FEATURES["CUSTOM_COURSES_EDX"]:
urlpatterns += (
url(r'^courses/{}/'.format(settings.COURSE_ID_PATTERN),
include('ccx.urls')),
url(r'^api/ccx/', include('lms.djangoapps.ccx.api.urls', namespace='ccx_api')),
)
# Access to courseware as an LTI provider
if settings.FEATURES.get("ENABLE_LTI_PROVIDER"):
urlpatterns += (
url(r'^lti_provider/', include('lti_provider.urls')),
)
urlpatterns += (
url(r'config/self_paced', ConfigurationModelCurrentAPIView.as_view(model=SelfPacedConfiguration)),
url(r'config/programs', ConfigurationModelCurrentAPIView.as_view(model=ProgramsApiConfig)),
)
urlpatterns = patterns(*urlpatterns)
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += static(
settings.PROFILE_IMAGE_BACKEND['options']['base_url'],
document_root=settings.PROFILE_IMAGE_BACKEND['options']['location']
)
urlpatterns += url(r'^template/(?P<template>.+)$', 'openedx.core.djangoapps.debug.views.show_reference_template'),
if 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns += (
url(r'^__debug__/', include(debug_toolbar.urls)),
)
# Custom error pages
handler404 = 'static_template_view.views.render_404'
handler500 = 'static_template_view.views.render_500'
# display error page templates, for testing purposes
urlpatterns += (
url(r'^404$', handler404),
url(r'^500$', handler500),
)
# include into our URL patterns the HTTP REST API that comes with edx-proctoring.
urlpatterns += (
url(r'^api/', include('edx_proctoring.urls')),
)
if settings.FEATURES.get('ENABLE_FINANCIAL_ASSISTANCE_FORM'):
urlpatterns += (
url(
r'^financial-assistance/$',
'courseware.views.views.financial_assistance',
name='financial_assistance'
),
url(
r'^financial-assistance/apply/$',
'courseware.views.views.financial_assistance_form',
name='financial_assistance_form'
),
url(
r'^financial-assistance/submit/$',
'courseware.views.views.financial_assistance_request',
name='submit_financial_assistance_request'
)
)
| Livit/Livit.Learn.EdX | lms/urls.py | Python | agpl-3.0 | 35,570 |
#!/usr/bin/env python3
#
# grmpy documentation build configuration file, created by
# sphinx-quickstart on Fri Aug 18 13:05:32 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
# Set variable so that todos are shown in local build
on_rtd = os.environ.get("READTHEDOCS") == "True"
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(".."))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
"sphinx.ext.ifconfig",
"sphinx.ext.viewcode",
"sphinxcontrib.bibtex",
"sphinx.ext.imgconverter",
]
bibtex_bibfiles = ["source/refs.bib"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "grmpy"
copyright_ = "2018, grmpy-dev team"
author = "grmpy-dev team"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "1.0"
# The full version, including alpha/beta/rc tags.
release = "1.0"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing. We
# want to supress the output on readthedocs.
if on_rtd:
todo_include_todos = False
else:
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "grmpydoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
"pointsize": "12pt",
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
"figure_align": "htbp",
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "grmpy.tex", "grmpy Documentation", "Development Team", "manual")
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "grmpy", "grmpy Documentation", [author], 1)]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"grmpy",
"grmpy Documentation",
author,
"grmpy",
"One line description of project.",
"Miscellaneous",
)
]
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright_
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ["search.html"]
| grmToolbox/grmpy | docs/conf.py | Python | mit | 5,902 |
"""
https://github.com/renskiy/fabricio/blob/master/examples/service/kubernetes
"""
import fabricio
from fabric import api as fab
from fabricio import tasks, kubernetes
from fabricio.misc import AvailableVagrantHosts
from six.moves import filter
hosts = AvailableVagrantHosts(guest_network_interface='eth1')
service = tasks.DockerTasks(
service=kubernetes.Configuration(
name='my-service',
options={
# `kubectl apply` options
'filename': 'configuration.yml',
},
),
hosts=hosts,
# rollback_command=True, # show `rollback` command in the list
# migrate_commands=True, # show `migrate` and `migrate-back` commands in the list
# backup_commands=True, # show `backup` and `restore` commands in the list
# pull_command=True, # show `pull` command in the list
# update_command=True, # show `update` command in the list
# revert_command=True, # show `revert` command in the list
# destroy_command=True, # show `destroy` command in the list
)
@fab.task(name='k8s-init')
@fab.serial
def k8s_init():
"""
create Kubernetes cluster
"""
def init():
if not init.join_command:
initialization = list(filter(None, fabricio.run(
'kubeadm init '
'--apiserver-advertise-address {0} '
'--pod-network-cidr 10.244.0.0/16'
''.format(fab.env.host),
sudo=True,
quiet=False,
).splitlines()))
init.join_command = initialization[-1].strip()
# master setup
fabricio.run('mkdir -p $HOME/.kube')
fabricio.run('cp /etc/kubernetes/admin.conf /home/vagrant/.kube/config', sudo=True)
fabricio.run('chown vagrant /home/vagrant/.kube/config', sudo=True)
# install Kubernetes network plugin
fabricio.run(
'kubectl apply --filename /vagrant/kube-rbac.yml '
'&& kubectl apply --filename /vagrant/kube-canal.yml --validate=false',
quiet=False,
)
else:
fabricio.run(init.join_command, quiet=False, sudo=True)
init.join_command = None
with fab.settings(hosts=hosts):
fab.execute(init)
@fab.task(name='k8s-reset')
def k8s_reset():
"""
reset Kubernetes cluster
"""
def reset():
fabricio.run('kubeadm reset --force', sudo=True, quiet=False)
with fab.settings(hosts=hosts):
fab.execute(reset)
| renskiy/fabricio | examples/service/kubernetes/fabfile.py | Python | mit | 2,512 |
import sys
def error(m):
sys.stderr.write(str(m) + '\n')
| vim-scripts/Threesome | autoload/threesomelib/util/io.py | Python | mit | 63 |
from math import factorial
def fsum(n): return sum(factorial(int(d)) for d in str(n))
print(sum(n for n in range(3, 45000) if n == fsum(n)))
| jokkebk/euler | p34.py | Python | mit | 143 |
import os
from subprocess import Popen, PIPE
from selenium import webdriver
import time
abspath = lambda *p: os.path.abspath(os.path.join(*p))
ROOT = abspath(os.path.dirname(__file__))
def execute_command(command):
result = Popen(command, shell=True, stdout=PIPE).stdout.read()
if len(result) > 0 and not result.isspace():
raise Exception(result)
def do_screen_capturing(url, screen_path, width, height):
print "Capturing screen.."
driver = webdriver.PhantomJS()
# it save service log file in same directory
# if you want to have log file stored else where
# initialize the webdriver.PhantomJS() as
# driver = webdriver.PhantomJS(service_log_path='/var/log/phantomjs/ghostdriver.log')
driver.set_script_timeout(30)
if width and height:
driver.set_window_size(width, height)
driver.get(url)
time.sleep(10)
driver.save_screenshot(screen_path)
def do_crop(params):
print "Croping captured image.."
command = [
'convert',
params['screen_path'],
'-crop', '%sx%s+%s+%s' % (params['width'], params['height'], params['xoffset'], params['yoffset']),
params['crop_path']
]
execute_command(' '.join(command))
def do_thumbnail(params):
print "Generating thumbnail from croped captured image.."
command = [
'convert',
params['crop_path'],
'-filter', 'Lanczos',
'-thumbnail', '%sx%s' % (params['width'], params['height']),
params['thumbnail_path']
]
execute_command(' '.join(command))
def get_screen_shot(**kwargs):
url = kwargs['url']
width = int(kwargs.get('width', 1024)) # screen width to capture
height = int(kwargs.get('height', 768)) # screen height to capture
filename = kwargs.get('filename', 'screen.png') # file name e.g. screen.png
path = kwargs.get('path', ROOT) # directory path to store screen
crop = kwargs.get('crop', False) # crop the captured screen
crop_width = int(kwargs.get('crop_width', width)) # the width of crop screen
crop_height = int(kwargs.get('crop_height', height)) # the height of crop screen
crop_x_offset = int(kwargs.get('crop_x_offset', height)) # the height of crop screen
crop_y_offset = int(kwargs.get('crop_y_offset', height)) # the height of crop screen
crop_replace = kwargs.get('crop_replace', False) # does crop image replace original screen capture?
thumbnail = kwargs.get('thumbnail', False) # generate thumbnail from screen, requires crop=True
thumbnail_width = int(kwargs.get('thumbnail_width', width)) # the width of thumbnail
thumbnail_height = int(kwargs.get('thumbnail_height', height)) # the height of thumbnail
thumbnail_replace = kwargs.get('thumbnail_replace', False) # does thumbnail image replace crop image?
screen_path = abspath(path, filename)
crop_path = thumbnail_path = screen_path
if thumbnail and not crop:
raise Exception, 'Thumnail generation requires crop image, set crop=True'
do_screen_capturing(url, screen_path, width, height)
if crop:
if not crop_replace:
crop_path = abspath(path, 'crop_'+filename)
params = {
'width': crop_width, 'height': crop_height,
'xoffset': crop_x_offset, 'yoffset': crop_y_offset,
'crop_path': crop_path, 'screen_path': screen_path}
do_crop(params)
if thumbnail:
if not thumbnail_replace:
thumbnail_path = abspath(path, 'thumbnail_'+filename)
params = {
'width': thumbnail_width, 'height': thumbnail_height,
'thumbnail_path': thumbnail_path, 'crop_path': crop_path}
do_thumbnail(params)
return screen_path, crop_path, thumbnail_path
def get_screenshot_for_url(url):
screen_path, crop_path, thumbnail_path = get_screen_shot(
url=url, filename='sof.png',
crop=True, crop_replace=False,
crop_width=950, crop_height=650,
crop_x_offset=0, crop_y_offset=80,
thumbnail=True, thumbnail_replace=False,
thumbnail_width=200, thumbnail_height=150,
)
return crop_path
if __name__ == '__main__':
'''
Requirements:
Install NodeJS
Using Node's package manager install phantomjs: npm -g install phantomjs
install selenium (in your virtualenv, if you are using that)
install imageMagick
add phantomjs to system path (on windows)
'''
url = 'http://brocascoconut.com/gmaps'
screen_path, crop_path, thumbnail_path = get_screen_shot(
url=url, filename='sof.png',
crop=True, crop_replace=False,
crop_width=950, crop_height=650,
crop_x_offset=0, crop_y_offset=80,
thumbnail=True, thumbnail_replace=False,
thumbnail_width=200, thumbnail_height=150,
)
print 'crop_path: {}'.format(crop_path) | mhfowler/brocascoconut | bots/screenshot.py | Python | mit | 4,885 |
# -*- coding: utf-8 -*-
#
# boing/net/tcp.py -
#
# Authors: Nicolas Roussel (nicolas.roussel@inria.fr)
# Paolo Olivo (paolo.olivo@inria.fr)
#
# Copyright © INRIA
#
# See the file LICENSE for information on usage and redistribution of
# this file, and for a DISCLAIMER OF ALL WARRANTIES.
import logging
import socket as _socket
from PyQt4.QtNetwork import QAbstractSocket, QHostAddress, QTcpSocket, QTcpServer
import boing.net.ip as ip
from boing.utils.url import URL
class TcpSocket(QTcpSocket):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.logger = logging.getLogger("TcpSocket.%d"%id(self))
self.error.connect(self.__error)
def __error(self, error):
if error!=QAbstractSocket.RemoteHostClosedError:
raise RuntimeError(self.errorString())
def connect(self, host, port, family=None):
"""Raises Exception if host cannot be resolved."""
host, port = ip.resolve(host, port,
family if family is not None else 0,
_socket.SOCK_STREAM)[:2]
self.connectToHost(host, port)
return self
def family(self):
addr = self.localAddress()
if addr.protocol()==QAbstractSocket.IPv4Protocol:
family = ip.PF_INET
elif addr.protocol()==QAbstractSocket.IPv6Protocol:
family = ip.PF_INET6
else: family = None
return family
def name(self):
"""Return the server socket’s address (host, port)."""
return ip.addrToString(self.localAddress()), self.localPort()
def peerName(self):
return ip.addrToString(self.peerAddress()), self.peerPort()
def peerUrl(self):
return URL("tcp://%s:%d"%self.peerName()) if self.family()==ip.PF_INET \
else URL("tcp://[%s]:%d"%self.peerName())
def read(self):
return self.receive()
def receive(self):
size = self.bytesAvailable()
if size>0: return self.readData(size)
else: return bytes()
def receiveFrom(self):
size = self.bytesAvailable()
if size>0: return self.readData(size), self.peerName()
else: return bytes(), None
def setOption(self, option):
if option=="nodelay":
self.setSocketOption(QAbstractSocket.LowDelayOption, 1)
def send(self, data):
if self.state()==QAbstractSocket.ConnectedState:
return self.write(data)
else:
self.logger.warning("send method invoked on disconnected socket.")
return 0
def url(self):
return URL("tcp://%s:%d"%self.name()) if self.family()==ip.PF_INET \
else URL("tcp://[%s]:%d"%self.name())
# -------------------------------------------------------------------------
def TcpConnection(url, family=None):
"""Raises Exception if host cannot be resolved."""
if not isinstance(url, URL): url = URL(url)
if not url.site.host:
raise ValueError("Target host is mandatory: %s"%url)
elif url.site.port==0:
raise ValueError("Target port is mandatory: %s"%url)
else:
socket = TcpSocket()
socket.connect(url.site.host, url.site.port, family)
return socket
# -------------------------------------------------------------------
class TcpServer(QTcpServer):
def __init__(self, host=None, port=0, family=None,
maxconnections=30, factory=TcpSocket, options=tuple(),
*args, **kwargs):
"""Raises Exception if TCP socket cannot be bound at specified
host and port."""
super().__init__(*args, **kwargs)
self.__factory = factory
self.__options = options if options is not None else tuple()
self.setMaxPendingConnections(maxconnections)
if not host:
if family==ip.PF_INET6: host = QHostAddress.AnyIPv6
else: host = QHostAddress.Any
if not QHostAddress(host) in (QHostAddress.Any,
QHostAddress.AnyIPv6):
host, port = ip.resolve(host, port,
family if family is not None else 0,
_socket.SOCK_STREAM)[:2]
if not self.listen(QHostAddress(host), int(port)):
raise Exception(self.errorString())
def incomingConnection(self, descriptor):
connection = self.__factory(self)
for option in self.__options: connection.setOption(option)
connection.setSocketDescriptor(descriptor)
self.addPendingConnection(connection)
def family(self):
addr = self.serverAddress()
if addr.protocol()==QAbstractSocket.IPv4Protocol:
family = ip.PF_INET
elif addr.protocol()==QAbstractSocket.IPv6Protocol:
family = ip.PF_INET6
else: family = None
return family
def name(self):
"""Return the server socket’s address (host, port)."""
return ip.addrToString(self.serverAddress()), self.serverPort()
def url(self):
"""Return the socket's URL, i.e. tcp://<host>:<port>."""
return URL("tcp://%s:%d"%self.name()) if self.family()==ip.PF_INET \
else URL("tcp://[%s]:%d"%self.name())
# -------------------------------------------------------------------
class EchoSocket(TcpSocket):
def __init__(self, parent=None):
TcpSocket.__init__(self, parent)
self.logger = logging.getLogger("EchoSocket.%d"%id(self))
self.readyRead.connect(self.__echoData)
self.disconnected.connect(self.__disconnected)
def setSocketDescriptor(self, descriptor):
TcpSocket.setSocketDescriptor(self, descriptor)
if self.state()==QAbstractSocket.ConnectedState:
self.logger.debug("New client: %s"%str(self.peerName()))
def __disconnected(self):
self.logger.debug("Lost client: %s"%str(self.peerName()))
def __echoData(self):
data, peer = self.receiveFrom()
self.logger.debug("%s: %s"%(peer, data))
self.send(data)
def EchoServer(host=None, port=0, family=None, maxconnections=30):
return TcpServer(host, port, family, maxconnections, EchoSocket)
| olivopaolo/boing | boing/net/tcp.py | Python | gpl-2.0 | 6,209 |
"""
Programmer : EOF
E-mail : jasonleaster@163.com
File : svm.py
Date : 2015.12.13
You know ... It's hard time but it's not too bad to say give up.
"""
import numpy
class SVM:
def __init__(self, Mat, Tag, C = 2, MAXITER = 200):
self._Mat = numpy.array(Mat)
self._Tag = numpy.array(Tag).flatten()
self.SampleDem = self._Mat.shape[0]
self.SampleNum = self._Mat.shape[1]
# Castiagte factor
self.C = C
# Each sample point have a lagrange factor
self.alpha = numpy.array([0.0 for i in range(self.SampleNum)])
# The expected weight vector which we want the machine to learn
self.W = numpy.array([0.0 for i in range(self.SampleDem)])
# intercept
self.b = 0.0
# Difference between the expected output and output of current machine
self.E = numpy.array([0.0 for i in range(self.SampleNum)])
self.Kernel = self.Linear_Kernel
# Bool value for sample point is a Supported Vector or not
self.SupVec = [False for i in range(self.SampleNum)]
# Points which are selected in current time.
self.P1 = None
self.P2 = None
#Max times for training SVM
self.MAXITER = MAXITER
"""
Linear Kernel which will compute the
inner product of point @i and @j. K(i,j)
"""
def Linear_Kernel(self, i, j):
summer = 0.0
for d in range(self.SampleDem):
summer += self._Mat[d][i] * self._Mat[d][j]
return summer
"""
Current output for sample point @i
"""
def G(self, i):
summer = 0.0
for j in range(self.SampleNum):
summer += self.alpha[j] * self._Tag[j] * self.Kernel(i,j)
summer += self.b
return summer
"""
update the cost for prediction when x-i(Mat[:, i]) as input.
where @i is not the index of current selected point(P1, P2).
"""
def updateE(self, i):
self.E[i] = self.G(i) - self._Tag[i]
"""
@findFirstVar() function will help us to find the first Variable
which's alpha value wanted to be updated. We return the index of
that point as @P1
"""
def findFirstVar(self):
firstPointIndex = None
b_KKTcond_Points = []
for i in range(self.SampleNum):
if i == self.P1 or i == self.P2:
continue
self.updateE(i)
for i in range(self.SampleNum):
if 0 < self.alpha[i] and self.alpha[i] < self.C:
if self.G(i) * self._Tag[i] != 1:
b_KKTcond_Points.append(i)
# if there is not point on the boundary break the KKT-condition
if len(b_KKTcond_Points) == 0:
for i in range(self.SampleNum):
if self.alpha[i] == 0 and self._Tag[i] * self.G(i) < 1:
b_KKTcond_Points.append(i)
elif self.alpha[i] == self.C and self._Tag[i] * self.G(i) > 1:
b_KKTcond_Points.append(i)
maxE = 0.0
for i in b_KKTcond_Points:
if abs(maxE) < abs(self.E[i]):
firstPointIndex = i
maxE = self.E[i]
return firstPointIndex
"""
Find the second variable which's alpha value want to be updated
"""
def findSecondVar(self, firstPointIndex):
secondPointIndex = None
val = 0
if self.E[firstPointIndex] < 0:
maxVal = self.E[firstPointIndex]
for i in range(self.SampleNum):
if self.E[i] > maxVal:
maxVal = self.E[i]
secondPointIndex = i
else:
minVal = self.E[firstPointIndex]
for i in range(self.SampleNum):
if self.E[i] < minVal:
minVal = self.E[i]
secondPointIndex = i
return secondPointIndex
"""
@optimal() function will update the alpha value of the
two selected points which could be indexed by @P1 and @P2.
@P1 and @P2 are index of the first selected point
and the second selected point. You can get the point
by self._Mat[:, P1] and self._Mat[:, P2]
@L : lowest boundary of current optimal problem
@H : highest boundary of current optimal problem
"""
def optimal(self, P1, P2):
if self._Tag[P1] != self._Tag[P2]:
k = self.alpha[P2] - self.alpha[P1]
L = max(0.0, k)
H = min(self.C, self.C + k)
else:
k = self.alpha[P2] + self.alpha[P1]
L = max(0.0, k - self.C)
H = min(self.C, k)
K11 = self.Kernel(P1, P1)
K22 = self.Kernel(P2, P2)
K12 = self.Kernel(P1, P2)
yita = K11 + K22 - 2*K12
old_alpha_P1 = self.alpha[P1]
old_alpha_P2 = self.alpha[P2]
# candidate for new alpha_2
new_alpha_unc_P2 = old_alpha_P2 + \
(self._Tag[P2] * (self.E[P1] - self.E[P2]) /yita)
if new_alpha_unc_P2 > H:
new_alpha_P2 = H
elif new_alpha_unc_P2 < L:
new_alpha_P2 = L
else:
new_alpha_P2 = new_alpha_unc_P2
new_alpha_P1 = old_alpha_P1 + self._Tag[P1] * self._Tag[P2] * \
(old_alpha_P2 - new_alpha_P2)
b_P1_new = - self.E[P1]\
- self._Tag[P1] * K11 * (new_alpha_P1 - old_alpha_P1) \
- self._Tag[P2] * K12 * (new_alpha_P2 - old_alpha_P2) \
+ self.b
b_P2_new = - self.E[P2] \
- self._Tag[P1] * K12 * (new_alpha_P1 - old_alpha_P1) \
- self._Tag[P2] * K22 * (new_alpha_P2 - old_alpha_P2) \
+ self.b
"""
Attention!
If there difference between the old alpha and the new alpha,
we should choose another P1 or P2. We DON'T need to drop ALL
two old selected point but anyone also will be ok.
"""
if new_alpha_P1 == self.alpha[P1] or new_alpha_P2 == self.alpha[P2]:
old_P1 = P1
old_P2 = P2
while P1 == P2 or (P1 == old_P1 and P2 == old_P2):
P1 = numpy.random.randint(self.SampleNum)
P2 = numpy.random.randint(self.SampleNum)
self.P1 = P1
self.P2 = P2
# optimal the alpha for selected P1 and P2 recusively.
self.optimal(P1, P2)
return
if 0 < new_alpha_P1 and new_alpha_P1 < self.C and \
0 < new_alpha_P2 and new_alpha_P2 < self.C:
if abs(b_P1_new - b_P2_new) > 0.01:
print "Attention! Maybe ERROR :( b1 == b2"
if new_alpha_P1 == 0 or new_alpha_P1 == self.C or \
new_alpha_P2 == 0 or new_alpha_P2 == self.C:
self.b = (b_P1_new + b_P2_new)/2
else:
self.b = b_P1_new
self.alpha[P1] = new_alpha_P1
self.alpha[P2] = new_alpha_P2
for i in range(self.SampleNum):
if 0 < self.alpha[i] and self.alpha[i] < self.C:
self.SupVec[i] = True
else:
self.SupVec[i] = False
"""
update the new cost value E-i for P1 and P2
"""
summer = 0.0
for j in range(self.SampleNum):
if self.SupVec[j] == True:
summer += self.alpha[j] * self._Tag[j] * self.Kernel(P1, j)
new_E_P1 = summer + self.b - self._Tag[P1]
summer = 0.0
for j in range(self.SampleNum):
if self.SupVec[j] == True:
summer += self.alpha[j] * self._Tag[j] * self.Kernel(P2, j)
new_E_P2 = summer + self.b - self._Tag[P2]
self.E[P1] = new_E_P1
self.E[P2] = new_E_P2
def train(self):
times = 0
while self.run_or_not():
times += 1
print "Training time:", times
if times == self.MAXITER:
break
P1 = self.findFirstVar()
P2 = self.findSecondVar(P1)
self.P1 = P1
self.P2 = P2
self.optimal(P1, P2)
"""
Here we finished training.
We calculate the hyper-plane vector self.W and intercept self.b
"""
for i in range(self.SampleNum):
self.W += self.alpha[i] * self._Mat[:, i] * self._Tag[i]
for j in range(self.SampleNum):
if 0 < self.alpha[j] and self.alpha[j] < self.C:
summer = 0.0
for i in range(self.SampleNum):
summer += self._Tag[i] * self.alpha[i] * self.Kernel(i, j)
self.b = self._Tag[j] - summer
print "Congradulation! Traning finished successfully."
print self.W, self.b
break
def run_or_not(self):
summer = 0.0
for i in range(self.SampleNum):
summer += self.alpha[i] * self._Tag[i]
if summer != 0:
return True
for i in range(self.SampleNum):
if self.alpha[i] < 0 or self.alpha[i] > self.C:
return True
for i in range(self.SampleNum):
if self.alpha[i] == 0:
if self._Tag[i] * self.G(i) < 1:
return True
elif self.alpha[i] == self.C:
if self._Tag[i] * self.G(i) > 1:
return True
elif 0 < self.alpha[i] and self.alpha[i] < self.C:
if self._Tag[i] * self.G(i) != 1:
return True
return False
| jasonleaster/Machine_Learning | SVM/svm.py | Python | gpl-2.0 | 9,610 |
# Copyright (C) 2010-2017 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from optparse import make_option
from django.core.management.base import CommandError
from synnefo.logic import servers
from synnefo.management import common
from snf_django.management.utils import parse_bool
from snf_django.management.commands import RemoveCommand
from synnefo.db import transaction
class Command(RemoveCommand):
can_import_settings = True
args = "<port_id> [<port_id> ...]"
help = "Remove a port from the Database and from the VMs attached to"
command_option_list = RemoveCommand.command_option_list + (
make_option(
"--wait",
dest="wait",
default="True",
choices=["True", "False"],
metavar="True|False",
help="Wait for Ganeti jobs to complete. [Default: True]"),
)
@common.convert_api_faults
def handle(self, *args, **options):
if not args:
raise CommandError("Please provide a port ID")
force = options['force']
message = "ports" if len(args) > 1 else "port"
self.confirm_deletion(force, message, args)
for port_id in args:
self.stdout.write("\n")
try:
with transaction.atomic():
port = common.get_resource(
"port", port_id, for_update=True)
machine = port.machine
servers._delete_port(port)
wait = parse_bool(options["wait"])
if machine is not None:
common.wait_server_task(machine, wait,
stdout=self.stdout)
else:
self.stdout.write("Successfully removed port %s\n" % port)
except CommandError as e:
self.stdout.write("Error -- %s\n" % e.message)
| grnet/synnefo | snf-cyclades-app/synnefo/logic/management/commands/port-remove.py | Python | gpl-3.0 | 2,495 |
# -*- coding: utf-8 -*-
from sqlalchemy.ext.declarative import declarative_base
from tgext.pluggable import PluggableSession
DBSession = PluggableSession()
DeclarativeBase = declarative_base()
def init_model(app_session):
DBSession.configure(app_session)
from models import *
| nomed/cashup | cashup/model/__init__.py | Python | mit | 284 |
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import with_statement
from collections import OrderedDict
import numpy as np
import pyqtgraph as pg
import scipy
import six
from six.moves import range
from six.moves import zip
import acq4.util.functions as fn
import acq4.util.ptime as ptime
from acq4.devices.Camera import Camera, CameraTask
from acq4.util import Qt
from acq4.util.Mutex import Mutex
WIDTH = 512
HEIGHT = 512
class MockCamera(Camera):
def __init__(self, manager, config, name):
self.camLock = Mutex(Mutex.Recursive) # Lock to protect access to camera
self.ringSize = 100
self.frameId = 0
self.noise = np.random.normal(size=10000000, loc=100, scale=10) # pre-generate noise for use in images
if "images" in config:
self.bgData = {}
self.bgInfo = {}
for obj, filename in config["images"].items():
file = manager.fileHandle(filename)
ma = file.read()
self.bgData[obj] = ma.asarray()
self.bgInfo[obj] = file.info().deepcopy()
self.bgInfo[obj]["depths"] = ma.xvals(0)
else:
self.bgData = mandelbrot(width=WIDTH * 5, maxIter=60).astype(np.float32)
self.bgInfo = None
self.background = None
self.params = OrderedDict(
[
("triggerMode", "Normal"),
("exposure", 0.001),
# ("binning", (1, 1)),
# ("region", (0, 0, WIDTH, WIDTH)),
("binningX", 1),
("binningY", 1),
("regionX", 0),
("regionY", 0),
("regionW", WIDTH),
("regionH", HEIGHT),
("gain", 1.0),
("sensorSize", (WIDTH, HEIGHT)),
("bitDepth", 16),
]
)
self.paramRanges = OrderedDict(
[
("triggerMode", (["Normal", "TriggerStart"], True, True, [])),
("exposure", ((0.001, 10.0), True, True, [])),
# ("binning", ([range(1, 10), range(1, 10)], True, True, [])),
# ("region", ([(0, WIDTH - 1), (0, HEIGHT - 1), (1, WIDTH), (1, HEIGHT)], True, True, [])),
("binningX", (list(range(1, 10)), True, True, [])),
("binningY", (list(range(1, 10)), True, True, [])),
("regionX", ((0, WIDTH - 1), True, True, ["regionW"])),
("regionY", ((0, HEIGHT - 1), True, True, ["regionH"])),
("regionW", ((1, WIDTH), True, True, ["regionX"])),
("regionH", ((1, HEIGHT), True, True, ["regionY"])),
("gain", ((0.1, 10.0), True, True, [])),
("sensorSize", (None, False, True, [])),
("bitDepth", (None, False, True, [])),
]
)
self.groupParams = {
"binning": ("binningX", "binningY"),
"region": ("regionX", "regionY", "regionW", "regionH"),
}
sig = np.random.normal(size=(WIDTH, HEIGHT), loc=1.0, scale=0.3)
sig = scipy.ndimage.gaussian_filter(sig, (3, 3))
sig[20:40, 20:40] += 1
sig[sig < 0] = 0
self.signal = sig
Camera.__init__(self, manager, config, name) # superclass will call setupCamera when it is ready.
self.acqBuffer = None
self.frameId = 0
self.lastIndex = None
self.lastFrameTime = None
self.stopOk = False
self.sigGlobalTransformChanged.connect(self.globalTransformChanged)
# generate list of mock cells
cells = np.zeros(
20,
dtype=[
("x", float),
("y", float),
("size", float),
("value", float),
("rate", float),
("intensity", float),
("decayTau", float),
],
)
cells["x"] = np.random.normal(size=cells.shape, scale=100e-6, loc=-1.5e-3)
cells["y"] = np.random.normal(size=cells.shape, scale=100e-6, loc=4.4e-3)
cells["size"] = np.random.normal(size=cells.shape, scale=2e-6, loc=10e-6)
cells["rate"] = np.random.lognormal(size=cells.shape, mean=0, sigma=1) * 1.0
cells["intensity"] = np.random.uniform(size=cells.shape, low=1000, high=10000)
cells["decayTau"] = np.random.uniform(size=cells.shape, low=15e-3, high=500e-3)
self.cells = cells
def setupCamera(self):
pass
def globalTransformChanged(self):
self.background = None
def startCamera(self):
self.lastFrameTime = ptime.time()
def stopCamera(self):
pass
def getNoise(self, shape):
n = shape[0] * shape[1]
s = np.random.randint(len(self.noise) - n)
d = self.noise[s : s + n]
d.shape = shape
return np.abs(d)
def getBackground(self):
if self.background is None:
w, h = self.params["sensorSize"]
tr = self.globalTransform()
if isinstance(self.bgData, dict):
# select data based on objective
obj = self.getObjective()
data = self.bgData[obj]
info = self.bgInfo[obj]
px = info["pixelSize"]
pz = info["depths"][1] - info["depths"][0]
m = Qt.QMatrix4x4()
pos = info["transform"]["pos"]
m.scale(1 / px[0], 1 / px[1], 1 / pz)
m.translate(-pos[0], -pos[1], -info["depths"][0])
tr2 = m * tr
origin = tr2.map(pg.Vector(0, 0, 0))
# print(origin)
origin = [int(origin.x()), int(origin.y()), origin.z()]
# slice data
camRect = Qt.QRect(origin[0], origin[1], w, h)
dataRect = Qt.QRect(0, 0, data.shape[1], data.shape[2])
overlap = camRect.intersected(dataRect)
tl = overlap.topLeft() - camRect.topLeft()
z = origin[2]
z1 = np.floor(z)
z2 = np.ceil(z)
s = (z - z1) / (z2 - z1)
z1 = int(np.clip(z1, 0, data.shape[0] - 1))
z2 = int(np.clip(z2, 0, data.shape[0] - 1))
src1 = data[
z1,
overlap.left() : overlap.left() + overlap.width(),
overlap.top() : overlap.top() + overlap.height(),
]
src2 = data[
z2,
overlap.left() : overlap.left() + overlap.width(),
overlap.top() : overlap.top() + overlap.height(),
]
src = src1 * (1 - s) + src2 * s
bg = np.empty((w, h), dtype=data.dtype)
bg[:] = 100
bg[tl.x() : tl.x() + overlap.width(), tl.y() : tl.y() + overlap.height()] = src
self.background = bg
# vectors = ([1, 0, 0], [0, 1, 0])
# self.background = pg.affineSlice(data, (w,h), origin, vectors, (1, 2, 0), order=1)
else:
tr = pg.SRTTransform(tr)
m = Qt.QTransform()
m.scale(3e6, 3e6)
m.translate(0.0005, 0.0005)
tr = tr * m
origin = tr.map(pg.Point(0, 0))
x = tr.map(pg.Point(1, 0)) - origin
y = tr.map(pg.Point(0, 1)) - origin
origin = np.array([origin.x(), origin.y()])
x = np.array([x.x(), x.y()])
y = np.array([y.x(), y.y()])
# slice fractal from pre-rendered data
vectors = (x, y)
self.background = pg.affineSlice(self.bgData, (w, h), origin, vectors, (0, 1), order=1)
return self.background
def pixelVectors(self):
tr = self.globalTransform()
origin = tr.map(pg.Point(0, 0))
x = tr.map(pg.Point(1, 0)) - origin
y = tr.map(pg.Point(0, 1)) - origin
origin = np.array([origin.x(), origin.y()])
x = np.array([x.x(), x.y()])
y = np.array([y.x(), y.y()])
return x, y
def newFrames(self):
"""Return a list of all frames acquired since the last call to newFrames."""
prof = pg.debug.Profiler(disabled=True)
now = ptime.time()
dt = now - self.lastFrameTime
exp = self.getParam("exposure")
bin = self.getParam("binning")
fps = 1.0 / (exp + (40e-3 / (bin[0] * bin[1])))
nf = int(dt * fps)
if nf == 0:
return []
self.lastFrameTime = now + exp
prof()
region = self.getParam("region")
prof()
bg = self.getBackground()[region[0] : region[0] + region[2], region[1] : region[1] + region[3]]
prof()
# Start with noise
shape = region[2:]
data = self.getNoise(shape)
# data = np.zeros(shape, dtype=float)
prof()
# Add specimen
data += bg * (exp * 10)
prof()
# update cells
spikes = np.random.poisson(min(dt, 0.4) * self.cells["rate"])
self.cells["value"] *= np.exp(-dt / self.cells["decayTau"])
self.cells["value"] = np.clip(self.cells["value"] + spikes * 0.2, 0, 1)
data[data < 0] = 0
# draw cells
px = (self.pixelVectors()[0] ** 2).sum() ** 0.5
# Generate transform that maps grom global coordinates to image coordinates
cameraTr = pg.SRTTransform3D(self.inverseGlobalTransform())
# note we use binning=(1,1) here because the image is downsampled later.
frameTr = self.makeFrameTransform(region, [1, 1]).inverted()[0]
tr = pg.SRTTransform(frameTr * cameraTr)
for cell in self.cells:
w = cell["size"] / px
pos = pg.Point(cell["x"], cell["y"])
imgPos = tr.map(pos)
start = (int(imgPos.x()), int(imgPos.y()))
stop = (int(start[0] + w), int(start[1] + w))
val = cell["intensity"] * cell["value"] * self.getParam("exposure")
data[max(0, start[0]) : max(0, stop[0]), max(0, start[1]) : max(0, stop[1])] += val
# Binning
if bin[0] > 1:
data = fn.downsample(data, bin[0], axis=0)
if bin[1] > 1:
data = fn.downsample(data, bin[1], axis=1)
data = data.astype(np.uint16)
prof()
self.frameId += 1
frames = []
for i in range(nf):
frames.append({"data": data, "time": now + (i / fps), "id": self.frameId})
prof()
return frames
def quit(self):
pass
def listParams(self, params=None):
"""List properties of specified parameters, or of all parameters if None"""
if params is None:
return self.paramRanges
else:
if isinstance(params, six.string_types):
return self.paramRanges[params]
out = OrderedDict()
for k in params:
out[k] = self.paramRanges[k]
return out
def setParams(self, params, autoRestart=True, autoCorrect=True):
dp = []
ap = {}
for k in params:
if k in self.groupParams:
ap.update(dict(zip(self.groupParams[k], params[k])))
dp.append(k)
params.update(ap)
for k in dp:
del params[k]
self.params.update(params)
newVals = params
restart = True
if autoRestart and restart:
self.restart()
self.sigParamsChanged.emit(newVals)
return (newVals, restart)
def getParams(self, params=None):
if params is None:
params = list(self.listParams().keys())
vals = OrderedDict()
for k in params:
if k in self.groupParams:
vals[k] = list(self.getParams(self.groupParams[k]).values())
else:
vals[k] = self.params[k]
return vals
def setParam(self, param, value, autoRestart=True, autoCorrect=True):
return self.setParams({param: value}, autoRestart=autoRestart, autoCorrect=autoCorrect)
def getParam(self, param):
return self.getParams([param])[param]
def createTask(self, cmd, parentTask):
with self.lock:
return MockCameraTask(self, cmd, parentTask)
class MockCameraTask(CameraTask):
"""Generate exposure waveform when recording with mockcamera.
"""
def __init__(self, dev, cmd, parentTask):
CameraTask.__init__(self, dev, cmd, parentTask)
self._DAQCmd["exposure"]["lowLevelConf"] = {"mockFunc": self.makeExpWave}
self.frameTimes = []
def makeExpWave(self):
# Called by DAQGeneric to simulate a read-from-DAQ
# first look up the DAQ configuration so we know the sample rate / number
daq = self.dev.listChannels()["exposure"]["device"]
cmd = self.parentTask().tasks[daq].cmd
start = self.parentTask().startTime
sampleRate = cmd["rate"]
data = np.zeros(cmd["numPts"], dtype=np.uint8)
for f in self.frames:
t = f.info()["time"]
exp = f.info()["exposure"]
i0 = int((t - start) * sampleRate)
i1 = i0 + int((exp - 0.1e-3) * sampleRate)
data[i0:i1] = 1
return data
def mandelbrot(width=500, height=None, maxIter=20, xRange=(-2.0, 1.0), yRange=(-1.2, 1.2)):
x0, x1 = xRange
y0, y1 = yRange
if height is None:
height = int(width * (y1 - y0) / (x1 - x0))
x = np.linspace(x0, x1, width).reshape(width, 1)
y = np.linspace(y0, y1, height).reshape(1, height)
# speed up with a clever initial mask:
x14 = x - 0.25
y2 = y ** 2
q = x14 ** 2 + y2
mask = q * (q + x14) > 0.25 * y2
mask &= (x + 1) ** 2 + y2 > 1 / 16.0
mask &= x > -2
mask &= x < 0.7
mask &= y > -1.2
mask &= y < 1.2
img = np.zeros((width, height), dtype=int)
xInd, yInd = np.mgrid[0:width, 0:height]
x = x.reshape(width)[xInd]
y = y.reshape(height)[yInd]
z0 = np.empty((width, height), dtype=np.complex64)
z0.real = x
z0.imag = y
z = z0.copy()
for i in range(maxIter):
z = z[mask]
z0 = z0[mask]
xInd = xInd[mask]
yInd = yInd[mask]
z *= z
z += z0
mask = np.abs(z) < 2.0
img[xInd[mask], yInd[mask]] = i % (maxIter - 1)
return img
| acq4/acq4 | acq4/devices/MockCamera/mock_camera.py | Python | mit | 14,519 |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import spack.paths
class UrlListTest(Package):
"""Mock package with url_list."""
homepage = "http://www.url-list-example.com"
web_data_path = join_path(spack.paths.test_path, 'data', 'web')
url = 'file://' + web_data_path + '/foo-0.0.0.tar.gz'
list_url = 'file://' + web_data_path + '/index.html'
list_depth = 3
version('0.0.0', 'abc000')
version('1.0.0', 'abc100')
version('3.0', 'abc30')
version('4.5', 'abc45')
version('2.0.0b2', 'abc200b2')
version('3.0a1', 'abc30a1')
version('4.5-rc5', 'abc45rc5')
| iulian787/spack | var/spack/repos/builtin.mock/packages/url-list-test/package.py | Python | lgpl-2.1 | 793 |
# encoding: utf-8
if __name__ == '__main__':
from PyQt5 import Qt, QtWidgets, QtCore
enter_event = Qt.QKeyEvent(Qt.QEvent.KeyPress, Qt.Qt.Key_Enter, Qt.Qt.NoModifier)
import MainPane
MainPane.start_app() | yauchien/Bot-Visitor | main.py | Python | gpl-2.0 | 220 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.