code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
sbox = [
[1, 1, 1, 0],
[0, 1, 0, 0],
[1, 1, 0, 1],
[0, 0, 0, 1],
[0, 0, 1, 0],
[1, 1, 1, 1],
[1, 0, 1, 1],
[1, 0, 0, 0],
[0, 0, 1, 1],
[1, 0, 1, 0],
[0, 1, 1, 0],
[1, 1, 0, 0],
[0, 1, 0, 1],
[1, 0, 0, 1],
[0, 0, 0, 0],
[0, 1, 1, 1],
]
invsbox = [
[1, 1, 1, 0],
[0, 0, 1, 1],
[0, 1, 0, 0],
[1, 0, 0, 0],
[0, 0, 0, 1],
[1, 1, 0, 0],
[1, 0, 1, 0],
[1, 1, 1, 1],
[0, 1, 1, 1],
[1, 1, 0, 1],
[1, 0, 0, 1],
[0, 1, 1, 0],
[1, 0, 1, 1],
[0, 0, 1, 0],
[0, 0, 0, 0],
[0, 1, 0, 1],
]
h2b = {"0":[0, 0, 0, 0], "1":[0, 0, 0, 1], "2":[0, 0, 1, 0], "3":[0, 0, 1, 1],
"4":[0, 1, 0, 0], "5":[0, 1, 0, 1], "6":[0, 1, 1, 0], "7":[0, 1, 1, 1],
"8":[1, 0, 0, 0], "9":[1, 0, 0, 1], "a":[1, 0, 1, 0], "b":[1, 0, 1, 1],
"c":[1, 1, 0, 0], "d":[1, 1, 0, 1], "e":[1, 1, 1, 0], "f":[1, 1, 1, 1]}
multGF24 = [
['0000000000000000'],
['0123456789abcdef'],
['02468ace3175b9fd'],
['0365cfa9b8de7412'],
['048c37bf62ea51d9'],
['05af72d8eb419c36'],
['06cabd71539fe824'],
['07e9f816da3425cb'],
['083b6e5dc4f7a291'],
['09182b3a4d5c6f7e'],
['0a7de493f5821b6c'],
['0b5ea1f47c29d683'],
['0cb759e2a61df348'],
['0d941c852fb63ea7'],
['0ef1d32c97684ab5'],
['0fd296481ec3875a']]
def c2h(char):
return hex(int(ord(char))).lstrip('0x')
def stringToBlocks(str):
""" This function take a char as input and
return a matrix of nibbles.
sample: stringToBlocks('t') -> [[[0, 1, 1, 1], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 1, 1]]]"""
result = []
if (len(str) % 2):
str = str + '#'
for cpt in range(0, len(str), 2):
result.append([h2b[c2h(str[cpt])[0]], h2b[c2h(str[cpt])[1]], h2b[c2h(str[cpt+1])[0]], h2b[c2h(str[cpt+1])[1]]])
return result
def blocksToBin(blocks):
result = ''
for block in blocks:
result += nibblesToString(block)
print result
def blocksToString(blocks):
tmp = ''
result = ''
for block in blocks:
for nibble in block:
i = nibbleToInt(nibble)
if (i):
tmp += hex(i).lstrip('0x')
else:
tmp += '0'
for i in range(0, len(tmp), 2):
result += chr(int(tmp[i]+tmp[i+1], 16))
return result
def nibblesToBlock(nibbles):
""" This function take a list of 4 nibbles as input
and return a list (block) of 16 binary digit.
"""
result = []
for n in nibbles:
for bit in n:
result.append(bit)
return result
def nibblesToString(nibbles):
"""
This function take a list of 4 nibbles as input
and return a string for printing these nibbles.
"""
result = ''
for n in nibbles:
for bit in n:
result += str(bit)
result += ' '
return result
def nibbleToInt(nibble):
""" This function take a single nibble
as input and return its integer representation.
"""
result = (nibble[0] * 2**3) + (nibble[1] * 2**2) + (nibble[2] * 2**1) + (nibble[3] * 2**0)
return result
def xorNibbles(n1, n2):
""" This function take two nibbles as input,
execute a xor operation and return the result.
"""
result = []
for i in range(4):
result.append(n1[i] ^ n2[i])
return result
def galoisMultNibbles(n1, n2):
""" This function take two nibbles
as input, execute their galois multiplication
and return the result.
For mini aes galois multiplication means n1 * n2 mod(x^4 + x +1);
"""
i1 = nibbleToInt(n1)
i2 = nibbleToInt(n2)
return h2b[multGF24[i1][0][i2]]
def keySchedule(key):
""" This function take a key in the form
of a list of four nibbles, execute the mini aes
key schedule and return the result.
"""
rcon1 = [0, 0, 0, 1]
rcon2 = [0, 0, 1, 0]
w0 = key[0]
w1 = key[1]
w2 = key[2]
w3 = key[3]
w4 = xorNibbles(xorNibbles(w0, sbox[nibbleToInt(w3)]), rcon1)
w5 = xorNibbles(w1, w4)
w6 = xorNibbles(w2, w5)
w7 = xorNibbles(w3, w6)
w8 = xorNibbles(xorNibbles(w4, sbox[nibbleToInt(w7)]), rcon2)
w9 = xorNibbles(w5, w8)
w10 = xorNibbles(w6, w9)
w11 = xorNibbles(w7, w10)
return [[w0, w1, w2, w3], [w4, w5, w6, w7], [w8, w9, w10, w11]]
def addKey(key, nibbles):
result = []
for i in range(4):
result.append( xorNibbles(nibbles[i], key[i]) )
print "addKey ->\t", nibblesToString(result)
return result
def nibbleSub(nibbles):
result = []
for n in nibbles:
tmp = (n[0] * 2**3) + (n[1] * 2**2) + (n[2] * 2**1) + (n[3] * 2**0)
result.append(sbox[tmp])
print "nibbleSub ->\t", nibblesToString(result)
return result
def invNibbleSub(nibbles):
result = []
for n in nibbles:
tmp = (n[0] * 2**3) + (n[1] * 2**2) + (n[2] * 2**1) + (n[3] * 2**0)
result.append(invsbox[tmp])
print "invNibbleSub ->\t", nibblesToString(result)
return result
def shiftRow(nibbles):
result = []
result.append(nibbles[0])
result.append(nibbles[3])
result.append(nibbles[2])
result.append(nibbles[1])
print "shiftRow ->\t", nibblesToString(result)
return result
def mixColumn(nibbles):
matrix = [[0, 0, 1, 1], [0, 0, 1, 0], [0, 0, 1, 0], [0, 0, 1, 1]]
d0 = xorNibbles(galoisMultNibbles(matrix[0], nibbles[0]), galoisMultNibbles(matrix[2], nibbles[1]))
d1 = xorNibbles(galoisMultNibbles(matrix[1], nibbles[0]), galoisMultNibbles(matrix[3], nibbles[1]))
d2 = xorNibbles(galoisMultNibbles(matrix[0], nibbles[2]), galoisMultNibbles(matrix[2], nibbles[3]))
d3 = xorNibbles(galoisMultNibbles(matrix[1], nibbles[2]), galoisMultNibbles(matrix[3], nibbles[3]))
result = [d0, d1, d2, d3]
print "mixColumn ->\t", nibblesToString(result)
return result
def miniAESencrypt(plain, key):
print "### plain ->\t", nibblesToString(plain)
print "### key ->\t", nibblesToString(key)
roundKeys = keySchedule(key)
cipher = addKey(roundKeys[0], plain)
cipher = nibbleSub(cipher)
cipher = shiftRow(cipher)
cipher = mixColumn(cipher)
cipher = addKey(roundKeys[1], cipher)
cipher = nibbleSub(cipher)
cipher = shiftRow(cipher)
cipher = addKey(roundKeys[2], cipher)
print "### cipher ->\t", nibblesToString(cipher)
return cipher
def miniAESdecrypt(cipher, key):
print "### cipher ->\t", nibblesToString(cipher)
print "### key ->\t", nibblesToString(key)
roundKeys = keySchedule(key)
plain = addKey(roundKeys[2], cipher)
plain = invNibbleSub(plain)
plain = shiftRow(plain)
plain = addKey(roundKeys[1], plain)
plain = mixColumn(plain)
plain = invNibbleSub(plain)
plain = shiftRow(plain)
plain = addKey(roundKeys[0], plain)
print "### plain ->\t", nibblesToString(plain)
return plain
def miniAEStextEncrypt(plain, key):
print plain, key
cipher = []
binplain = stringToBlocks(plain)
binkey = stringToBlocks(key)
for block in binplain:
cipher.append(miniAESencrypt(block, binkey[0]))
return cipher
def miniAEStextDecrypt(cipher, key):
print cipher, key
plain = []
binkey = stringToBlocks(key)
for block in cipher:
plain.append(miniAESdecrypt(block, binkey[0]))
return blocksToString(plain).rstrip('#')
def miniAESvectorTest():
plain = [[1, 0, 0, 1], [1, 1, 0, 0], [0, 1, 1, 0], [0, 0, 1, 1]]
key = [[1, 1, 0, 0], [0, 0, 1, 1], [1, 1, 1, 1], [0, 0, 0, 0]]
cipher = miniAESencrypt(plain, key)
print
plain = miniAESdecrypt(cipher, key)
def miniAEStextTest(key, plain):
cipher = miniAEStextEncrypt(plain, key)
plain = miniAEStextDecrypt(cipher, key)
print plain
if __name__ == "__main__":
miniAESvectorTest()
print
miniAEStextTest('KE', 'ab') | archoad/PythonAES | miniaes.py | Python | gpl-3.0 | 7,116 |
#
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import copy
from ansible import constants as C
from ansible.module_utils._text import to_text
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.eos.eos import eos_provider_spec
from ansible.plugins.action.normal import ActionModule as _ActionModule
from ansible.module_utils.network.common.utils import load_provider
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
del tmp # tmp no longer has any effect
socket_path = None
if self._play_context.connection in ('network_cli', 'httpapi'):
provider = self._task.args.get('provider', {})
if any(provider.values()):
display.warning('provider is unnecessary when using %s and will be ignored' % self._play_context.connection)
del self._task.args['provider']
if self._task.args.get('transport'):
display.warning('transport is unnecessary when using %s and will be ignored' % self._play_context.connection)
del self._task.args['transport']
elif self._play_context.connection == 'local':
provider = load_provider(eos_provider_spec, self._task.args)
transport = provider['transport'] or 'cli'
display.vvvv('connection transport is %s' % transport, self._play_context.remote_addr)
if transport == 'cli':
pc = copy.deepcopy(self._play_context)
pc.connection = 'network_cli'
pc.network_os = 'eos'
pc.remote_addr = provider['host'] or self._play_context.remote_addr
pc.port = int(provider['port'] or self._play_context.port or 22)
pc.remote_user = provider['username'] or self._play_context.connection_user
pc.password = provider['password'] or self._play_context.password
pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file
pc.timeout = int(provider['timeout']) if provider['timeout'] else None
pc.become = provider['authorize'] or False
if pc.become:
pc.become_method = 'enable'
pc.become_pass = provider['auth_pass']
display.vvv('using connection plugin %s (was local)' % pc.connection, pc.remote_addr)
connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin)
if connection._play_context.timeout is None:
connection._play_context.timeout = connection.get_option('persistent_command_timeout')
socket_path = connection.run()
display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)
if not socket_path:
return {'failed': True,
'msg': 'unable to open shell. Please see: ' +
'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'}
task_vars['ansible_socket'] = socket_path
else:
self._task.args['provider'] = ActionModule.eapi_implementation(provider, self._play_context)
else:
return {'failed': True, 'msg': 'Connection type %s is not valid for this module' % self._play_context.connection}
if (self._play_context.connection == 'local' and transport == 'cli') or self._play_context.connection == 'network_cli':
# make sure we are in the right cli context which should be
# enable mode and not config module
if socket_path is None:
socket_path = self._connection.socket_path
conn = Connection(socket_path)
out = conn.get_prompt()
while '(config' in to_text(out, errors='surrogate_then_replace').strip():
display.vvvv('wrong context, sending exit to device', self._play_context.remote_addr)
conn.send_command('abort')
out = conn.get_prompt()
result = super(ActionModule, self).run(task_vars=task_vars)
return result
@staticmethod
def eapi_implementation(provider, play_context):
provider['transport'] = 'eapi'
if provider.get('host') is None:
provider['host'] = play_context.remote_addr
if provider.get('port') is None:
default_port = 443 if provider['use_ssl'] else 80
provider['port'] = int(play_context.port or default_port)
if provider.get('timeout') is None:
provider['timeout'] = C.PERSISTENT_COMMAND_TIMEOUT
if provider.get('username') is None:
provider['username'] = play_context.connection_user
if provider.get('password') is None:
provider['password'] = play_context.password
if provider.get('authorize') is None:
provider['authorize'] = False
return provider
| mheap/ansible | lib/ansible/plugins/action/eos.py | Python | gpl-3.0 | 5,886 |
from functools import wraps
from flask import request, redirect, session, url_for
from models.documents import User
def login():
def decorator(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if 'username' not in session:
return redirect(url_for('base.login', next=request.url))
return f(*args, **kwargs)
return decorated_function
return decorator
def admin():
def decorator(f):
@wraps(f)
def decorated_function(*args, **kwargs):
me = session.get('username')
if me:
if me == 'admin':
return f(*args, **kwargs)
else:
return "Only allowed for admin"
else:
return redirect(url_for('base.login', next=request.url))
return decorated_function
return decorator | kailashbuki/predator | installed/webserver/views/access/requires.py | Python | mit | 890 |
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
ext = Extension("objs", sources = ["objs.py"])
setup(ext_modules = [ext], cmdclass = {'build_ext': build_ext})
| pedrohforli/InfoRectMaker | Setups/topyd.py | Python | gpl-2.0 | 228 |
"""Utilities for testing"""
import itertools
from gameanalysis import rsgame
def basic_games():
"""Small basic games for testing"""
yield rsgame.empty(1, 2)
yield rsgame.empty(2, 2)
yield rsgame.empty(2, 3)
yield rsgame.empty(3, 2)
yield rsgame.empty(3, 3)
yield rsgame.empty([2, 3], [3, 2])
yield rsgame.empty([1, 1, 1], 2)
def singleton_games():
"""Games that have singleton roles"""
yield rsgame.empty([2, 1], [1, 2])
yield rsgame.empty([1, 2], [2, 1])
for strats in itertools.islice(itertools.product(*[[1, 2]] * 3), 1, None):
yield rsgame.empty(1, strats)
def large_games():
"""Games that test functionality in large spaces"""
yield rsgame.empty([1, 1], [5, 5])
yield rsgame.empty([2, 2], [5, 5])
yield rsgame.empty([5, 5], [2, 2])
yield rsgame.empty([1, 1, 1, 1], 2)
yield rsgame.empty([3, 3, 3], [3, 3, 3])
yield rsgame.empty([2, 3, 4], [4, 3, 2])
yield rsgame.empty(170, 2)
yield rsgame.empty(180, 2)
def edge_games():
"""Small number of edge games"""
yield rsgame.empty(4, 3)
yield rsgame.empty([3, 2], [2, 3])
yield rsgame.empty([2, 2, 2], 2)
| egtaonline/GameAnalysis | test/utils.py | Python | apache-2.0 | 1,170 |
import pandas as pd
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
from parsl.monitoring.web_app.app import app, get_db, close_db
from parsl.monitoring.web_app.utils import dropdown
from parsl.monitoring.web_app.apps import workflow_details, tasks_details
def display_workflow(workflow_name):
sql_conn = get_db()
df_workflows = pd.read_sql_query('SELECT workflow_name, time_began, rundir, run_id FROM workflows WHERE workflow_name=(?)',
sql_conn, params=(workflow_name, ))
close_db()
return html.Div(children=[
html.H2(id='workflow_name', children=df_workflows['workflow_name'][0]),
dropdown(id='run_number_dropdown', dataframe=df_workflows.sort_values(by='time_began', ascending=False), field='rundir'),
dcc.Tabs(id="tabs", value='workflow', children=[
dcc.Tab(label='Workflow', value='workflow'),
dcc.Tab(label='Tasks', value='tasks'),
]),
html.Div(id='tabs-content')
])
@app.callback(Output('tabs-content', 'children'),
[Input('tabs', 'value')])
def render_content(tab):
if tab == 'workflow':
return workflow_details.layout
elif tab == 'tasks':
return tasks_details.layout
| swift-lang/swift-e-lab | parsl/monitoring/web_app/apps/tabs.py | Python | apache-2.0 | 1,307 |
# _ __ _ _ ____________
# | '__| | | |_ /_ /_ /
# | | | |_| |/ / / / / /
# |_| \__,_/___/___/___|
#
__author__ = "Ruslan Zaporojets"
__email__ = "ruzzzua@gmail.com"
__license__ = "MIT"
__version__ = "1.0.0"
# Date: = "2016.10.28"
import os, sys
from difflib import SequenceMatcher
from Registry import Registry
from RegistryExport import RegistryExport
ADDED_NAME = 'added.reg'
CHANGED_NAME = 'changed.reg'
REMOVED_NAME = 'removed.reg'
def _key_fix_cameyo(path_parts):
# Fix 'Cameyo.Repackage.VERSION\Registry\...'
if len(path_parts) < 3:
return []
result = path_parts[2:]
if result[0] == '%CurrentUser%':
result[0] = 'HKEY_CURRENT_USER'
elif result[0] == 'Machine':
result[0] = 'HKEY_LOCAL_MACHINE'
else:
raise Exception # TODO
return result
def _log_error(errorcode, data):
s = RegistryExport.error_to_log(errorcode, data)
if s: print(s)
return RegistryExport.on_error(errorcode, data)
def keys_diff(key1, key2):
assert isinstance(key1, Registry.RegistryKey)
assert isinstance(key2, Registry.RegistryKey)
reg_exp = RegistryExport(_key_fix_cameyo, _log_error)
d1 = reg_exp.key_to_tuples(key1)
d2 = reg_exp.key_to_tuples(key2)
prev_key = ''
def to_single_line(reg_tuple):
global prev_key
if reg_tuple[0]: # Key tuple
k = reg_tuple[1].lower()
prev_key = k
n = ''
v = ''
else:
k = prev_key
n = reg_tuple[2].lower()
v = reg_tuple[3]
return k + n + v
sm = SequenceMatcher(
None,
a=[to_single_line(c) for c in d1],
b=[to_single_line(c) for c in d2],
)
opcodes = sm.get_opcodes()
changed_list = []
added_list = []
deleted_list = []
for tag, a1, a2, b1, b2 in opcodes:
if tag == 'replace':
changed_list.extend(range(b1, b2))
elif tag == 'insert':
added_list.extend(range(b1, b2))
elif tag == 'delete':
deleted_list.extend(range(a1, a2))
print(changed_list)
print(added_list)
print(deleted_list)
def main(argv=None):
if argv is None: argv = sys.argv
argc = len(argv)
if not ((argc == 2) and (os.path.isdir(sys.argv[1]))):
sys.exit('Usage: %s cameyo_unpacked_dir'
% (os.path.basename(sys.argv[0])))
root_dir = sys.argv[1]
reg_old = os.path.join(root_dir, 'CHANGES\\VirtReg.Base.dat')
reg_new = os.path.join(root_dir, 'CHANGES\\VirtReg.dat')
reg1 = Registry.Registry(reg_old)
reg2 = Registry.Registry(reg_new)
keys_diff(reg1.root(), reg2.root())
if __name__ == '__main__':
main(argv=sys.argv) | Ruzzz/OneFileTools | script/cameyo_regdiff2.py | Python | mit | 2,779 |
# @MUNTJAC_COPYRIGHT@
# @MUNTJAC_LICENSE@
from unittest import TestCase
from muntjac.data.util.indexed_container import IndexedContainer
from muntjac.data.util.hierarchical_container import HierarchicalContainer
class TestContainerSorting(TestCase):
_ITEM_DATA_MINUS2_NULL = 'Data -2 null'
_ITEM_DATA_MINUS2 = 'Data -2'
_ITEM_DATA_MINUS1 = 'Data -1'
_ITEM_DATA_MINUS1_NULL = 'Data -1 null'
_ITEM_ANOTHER_NULL = 'Another null'
_ITEM_STRING_2 = 'String 2'
_ITEM_STRING_NULL2 = 'String null'
_ITEM_STRING_1 = 'String 1'
_PROPERTY_INTEGER_NULL2 = 'integer-null'
_PROPERTY_INTEGER_NOT_NULL = 'integer-not-null'
_PROPERTY_STRING_NULL = 'string-null'
_PROPERTY_STRING_ID = 'string-not-null'
def setUp(self):
super(TestContainerSorting, self).setUp()
def testEmptyFilteredIndexedContainer(self):
ic = IndexedContainer()
self.addProperties(ic)
self.populate(ic)
ic.addContainerFilter(self._PROPERTY_STRING_ID, 'aasdfasdfasdf',
True, False)
ic.sort([self._PROPERTY_STRING_ID], [True])
def testFilteredIndexedContainer(self):
ic = IndexedContainer()
self.addProperties(ic)
self.populate(ic)
ic.addContainerFilter(self._PROPERTY_STRING_ID, 'a', True, False)
ic.sort([self._PROPERTY_STRING_ID], [True])
self.verifyOrder(ic, [self._ITEM_ANOTHER_NULL, self._ITEM_DATA_MINUS1,
self._ITEM_DATA_MINUS1_NULL, self._ITEM_DATA_MINUS2,
self._ITEM_DATA_MINUS2_NULL])
def testIndexedContainer(self):
ic = IndexedContainer()
self.addProperties(ic)
self.populate(ic)
ic.sort([self._PROPERTY_STRING_ID], [True])
self.verifyOrder(ic, [self._ITEM_ANOTHER_NULL, self._ITEM_DATA_MINUS1,
self._ITEM_DATA_MINUS1_NULL, self._ITEM_DATA_MINUS2,
self._ITEM_DATA_MINUS2_NULL, self._ITEM_STRING_1,
self._ITEM_STRING_2, self._ITEM_STRING_NULL2])
ic.sort([self._PROPERTY_INTEGER_NOT_NULL, self._PROPERTY_INTEGER_NULL2,
self._PROPERTY_STRING_ID], [True, False, True])
self.verifyOrder(ic, [self._ITEM_DATA_MINUS2,
self._ITEM_DATA_MINUS2_NULL, self._ITEM_DATA_MINUS1,
self._ITEM_DATA_MINUS1_NULL, self._ITEM_ANOTHER_NULL,
self._ITEM_STRING_NULL2, self._ITEM_STRING_1,
self._ITEM_STRING_2])
ic.sort([self._PROPERTY_INTEGER_NOT_NULL, self._PROPERTY_INTEGER_NULL2,
self._PROPERTY_STRING_ID], [True, True, True])
self.verifyOrder(ic, [self._ITEM_DATA_MINUS2_NULL,
self._ITEM_DATA_MINUS2, self._ITEM_DATA_MINUS1_NULL,
self._ITEM_DATA_MINUS1, self._ITEM_ANOTHER_NULL,
self._ITEM_STRING_NULL2, self._ITEM_STRING_1,
self._ITEM_STRING_2])
def testHierarchicalContainer(self):
hc = HierarchicalContainer()
self.populateContainer(hc)
hc.sort(['name'], [True])
self.verifyOrder(hc, ['Audi', 'C++', 'Call of Duty', 'Cars',
'English', 'Fallout', 'Finnish', 'Ford', 'Games', 'Java',
'Might and Magic', 'Natural languages', 'PHP',
'Programming languages', 'Python', 'Red Alert', 'Swedish',
'Toyota', 'Volvo'])
self.assertArrays(list(hc.rootItemIds()), [self._nameToId['Cars'],
self._nameToId['Games'], self._nameToId['Natural languages'],
self._nameToId['Programming languages']])
self.assertArrays(list(hc.getChildren(self._nameToId['Games'])),
[self._nameToId['Call of Duty'], self._nameToId['Fallout'],
self._nameToId['Might and Magic'],
self._nameToId['Red Alert']])
@classmethod
def populateContainer(cls, container):
container.addContainerProperty('name', str, None)
cls.addItem(container, 'Games', None)
cls.addItem(container, 'Call of Duty', 'Games')
cls.addItem(container, 'Might and Magic', 'Games')
cls.addItem(container, 'Fallout', 'Games')
cls.addItem(container, 'Red Alert', 'Games')
cls.addItem(container, 'Cars', None)
cls.addItem(container, 'Toyota', 'Cars')
cls.addItem(container, 'Volvo', 'Cars')
cls.addItem(container, 'Audi', 'Cars')
cls.addItem(container, 'Ford', 'Cars')
cls.addItem(container, 'Natural languages', None)
cls.addItem(container, 'Swedish', 'Natural languages')
cls.addItem(container, 'English', 'Natural languages')
cls.addItem(container, 'Finnish', 'Natural languages')
cls.addItem(container, 'Programming languages', None)
cls.addItem(container, 'C++', 'Programming languages')
cls.addItem(container, 'PHP', 'Programming languages')
cls.addItem(container, 'Java', 'Programming languages')
cls.addItem(container, 'Python', 'Programming languages')
_index = 0
_nameToId = dict()
_idToName = dict()
@classmethod
def addItem(cls, *args):
nargs = len(args)
if nargs == 3:
container, string, parent = args
cls._nameToId[string] = cls._index
cls._idToName[cls._index] = string
item = container.addItem(cls._index)
item.getItemProperty('name').setValue(string)
if parent is not None and isinstance(container, HierarchicalContainer):
container.setParent(cls._index, cls._nameToId[parent])
cls._index += 1
elif nargs == 5:
ic, idd, string_null, integer, integer_null = args
i = ic.addItem(idd)
i.getItemProperty(cls._PROPERTY_STRING_ID).setValue(idd)
i.getItemProperty(cls._PROPERTY_STRING_NULL).setValue(string_null)
i.getItemProperty(cls._PROPERTY_INTEGER_NOT_NULL).setValue(integer)
i.getItemProperty(cls._PROPERTY_INTEGER_NULL2).setValue(integer_null)
return i
else:
raise ValueError
def verifyOrder(self, ic, idOrder):
size = len(ic)
actual = [None] * size
for index, o in enumerate(ic.getItemIds()):
if (o.__class__ == int) and (idOrder[index].__class__ == str):
o = self._idToName[o]
actual[index] = o
self.assertArrays(actual, idOrder)
def assertArrays(self, actualObjects, expectedObjects):
self.assertEquals(len(expectedObjects), len(actualObjects),
'Actual contains a different number of values than was expected')
for i in range(len(actualObjects)):
actual = actualObjects[i]
expected = expectedObjects[i]
self.assertEquals(expected, actual, 'Item[%d] does not match' % i)
def populate(self, ic):
self.addItem(ic, self._ITEM_STRING_1, self._ITEM_STRING_1, 1, 1)
self.addItem(ic, self._ITEM_STRING_NULL2, None, 0, None)
self.addItem(ic, self._ITEM_STRING_2, self._ITEM_STRING_2, 2, 2)
self.addItem(ic, self._ITEM_ANOTHER_NULL, None, 0, None)
self.addItem(ic, self._ITEM_DATA_MINUS1, self._ITEM_DATA_MINUS1, -1, -1)
self.addItem(ic, self._ITEM_DATA_MINUS1_NULL, None, -1, None)
self.addItem(ic, self._ITEM_DATA_MINUS2, self._ITEM_DATA_MINUS2, -2, -2)
self.addItem(ic, self._ITEM_DATA_MINUS2_NULL, None, -2, None)
def addProperties(self, ic):
ic.addContainerProperty('id', str, None)
ic.addContainerProperty(self._PROPERTY_STRING_ID, str, '')
ic.addContainerProperty(self._PROPERTY_STRING_NULL, str, None)
ic.addContainerProperty(self._PROPERTY_INTEGER_NULL2, int, None)
ic.addContainerProperty(self._PROPERTY_INTEGER_NOT_NULL, int, 0)
ic.addContainerProperty('comparable-null', int, 0)
class MyObject(object):
def __init__(self):
_data = None
def __eq__(self, o):
if o is None:
return 1
if o.data is None:
return 0 if self._data is None else 1
elif self._data is None:
return -1
else:
return self._data == o.data
| rwl/muntjac | muntjac/test/server/data/util/container_sorting_test.py | Python | apache-2.0 | 8,193 |
import frappe
def execute():
if frappe.db.exists("DocType", "Guardian"):
frappe.reload_doc("schools", "doctype", "student")
frappe.reload_doc("schools", "doctype", "student_guardian")
frappe.reload_doc("schools", "doctype", "student_sibling")
if "student" not in frappe.db.get_table_columns("Guardian"):
return
guardian = frappe.get_all("Guardian", fields=["name", "student"])
for d in guardian:
if d.student:
student = frappe.get_doc("Student", d.student)
if student:
student.append("guardians", {"guardian": d.name})
student.save()
| shreyasp/erpnext | erpnext/patches/v7_1/set_student_guardian.py | Python | gpl-3.0 | 572 |
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 07 22:00:12 2016
@author: ryandrewjones
"""
import unittest
import pandas as pd
import numpy as np
import numpy.testing as npt
from energyPATHWAYS import util
class TestDfOperation(unittest.TestCase):
# indicies to play with
GEOGRAPHIES = range(1, 10)
ENERGY_TYPES = range(1, 10)
YEARS = range(2000, 2051)
VINTAGES = range(1990, 2051)
HOUSING_TYPES = range(1, 5)
TECHNOLOGIES = range(1, 7)
def setUp(self):
# dataframes to play with (these are totals), note that the column name is always value,
# as it is in the rest of the code
index = pd.MultiIndex.from_product((self.GEOGRAPHIES, self.ENERGY_TYPES, self.YEARS),
names=['census_division', 'energy_type', 'year'])
self.a_total = pd.DataFrame(index=index, columns=['value'])
self.a_total['value'] = np.arange(len(self.a_total))+.5
index = pd.MultiIndex.from_product((self.GEOGRAPHIES, self.ENERGY_TYPES, self.TECHNOLOGIES, self.VINTAGES),
names=['census_division', 'energy_type', 'technology', 'vintage'])
self.b_total = pd.DataFrame(index=index, columns=['value'])
self.b_total['value'] = np.arange(len(self.b_total))+.5
# note order of levels doesn't matter
index = pd.MultiIndex.from_product((self.GEOGRAPHIES, self.TECHNOLOGIES, self.ENERGY_TYPES),
names=['census_division', 'technology', 'energy_type'])
self.c_total = pd.DataFrame(index=index, columns=['value'])
self.c_total['value'] = np.arange(len(self.c_total))+.5
index = pd.MultiIndex.from_product((self.GEOGRAPHIES, self.HOUSING_TYPES),
names=['census_division', 'housing_types'])
self.d_total = pd.DataFrame(index=index, columns=['value'])
self.d_total['value'] = np.arange(len(self.d_total))+.5
self.b_inten = self.b_total.groupby(
level=['census_division', 'energy_type', 'technology']
).transform(lambda x: x / x.sum())
def test_basic_add(self):
# this is straight forward, they both match, and this should be quite fast
df = util.DfOper.add((self.a_total, self.a_total), expandable=(False, False), collapsible=(True, True))
npt.assert_almost_equal(df.sum().sum(), self.a_total.sum().sum()*2)
def test_mult_after_index_reorder(self):
# order of the index doesn't matter
df_before = util.DfOper.mult((self.a_total, self.a_total), expandable=(False, False), collapsible=(True, True))
a_total_new_order = self.a_total.reorder_levels([0, 2, 1])
# note that now that we have reordered, it is not sorted
df_after = util.DfOper.mult((self.a_total, a_total_new_order),
expandable=(False, False), collapsible=(True, True))
npt.assert_almost_equal(df_before.sum().sum(), df_after.sum().sum())
def test_add_many(self):
# we can add many together at once
df = util.DfOper.add((self.a_total,)*5, expandable=True, collapsible=False)
npt.assert_almost_equal(df.sum().sum(), self.a_total.sum().sum()*5)
def test_collapse(self):
# here they are both totals, so all we can do is collapse them, the sum before and after should match
df = util.DfOper.add((self.a_total, self.b_total), expandable=(False, False), collapsible=(True, True))
npt.assert_almost_equal(df.sum().sum(), self.a_total.sum().sum() + self.b_total.sum().sum())
def test_expand(self):
# here b has an extra level that is not in c, because c is an intensity
# we can expand it over the missing level "vintage" in b
c_inten = self.c_total.groupby(level=['census_division', 'energy_type']).transform(lambda x: x / x.sum())
df = util.DfOper.mult((c_inten, self.b_total), expandable=(True, False), collapsible=(False, True))
npt.assert_almost_equal(df.sum().sum(), 73308503.39194776, decimal=6)
def test_cant_expand(self):
# This is the opposite case. Now our intensity dataframe has an extra level, and note that this will raise an error.
with self.assertRaises(ValueError) as context_manager:
util.DfOper.mult((self.c_total, self.b_inten), expandable=(False, True), collapsible=(True, False))
self.assertEqual(context_manager.exception.message,
'DataFrame b has extra levels, DataFrame a cannot expand, and DataFrame b cannot collapse')
def test_divide_totals_to_make_intensity(self):
# here we are dividing two totals to make an intensity.
# In this case, b has the extra level vintage, and it is necessary to collapse it before they can be divided
df = util.DfOper.divi((self.c_total, self.b_total), expandable=(False, False), collapsible=(True, True))
# it happens quite a bit that we go back and forth from totals to intensity when we "clean" the data,
# as this next part shows
# df is now an intensity
df2 = util.DfOper.mult((self.b_total, df), expandable=(False, True), collapsible=(True, False))
npt.assert_almost_equal(df2.sum().sum(), self.c_total.sum().sum())
def test_incompatible_total_and_intensity(self):
# totals are collapsible, intensities are expandable, and this gives us an error because they each have levels
# that the other doesn't have. We are at an impasse, so an error is raised.
with self.assertRaises(ValueError) as context_manager:
util.DfOper.mult((self.c_total, self.b_inten), expandable=(False, True), collapsible=(True, False))
self.assertEqual(context_manager.exception.message,
'DataFrame b has extra levels, DataFrame a cannot expand, and DataFrame b cannot collapse')
def test_ignore_none(self):
# None is just ignored
df = util.DfOper.divi((None, self.c_total, None))
self.assertIs(df, self.c_total)
| energyPATHWAYS/energyPATHWAYS | energyPATHWAYS/tests/test_df_operation.py | Python | mit | 6,085 |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import logging
import time
import unittest
import numpy as np
import paddle
PRINT_STEP = 20
SEED = 2020
program_translator = paddle.jit.ProgramTranslator()
class SimpleLSTMRNN(paddle.nn.Layer):
def __init__(self,
hidden_size,
num_steps,
num_layers=2,
init_scale=0.1,
dropout=None):
super(SimpleLSTMRNN, self).__init__()
self._hidden_size = hidden_size
self._num_layers = num_layers
self._init_scale = init_scale
self._dropout = dropout
self._num_steps = num_steps
self.cell_array = []
self.hidden_array = []
self.weight_1_arr = []
self.weight_2_arr = []
self.bias_arr = []
self.mask_array = []
for i in range(self._num_layers):
weight_1 = self.create_parameter(
attr=paddle.ParamAttr(initializer=paddle.nn.initializer.Uniform(
low=-self._init_scale, high=self._init_scale)),
shape=[self._hidden_size * 2, self._hidden_size * 4],
dtype="float32",
default_initializer=paddle.nn.initializer.Uniform(
low=-self._init_scale, high=self._init_scale))
self.weight_1_arr.append(self.add_parameter('w_%d' % i, weight_1))
bias_1 = self.create_parameter(
attr=paddle.ParamAttr(initializer=paddle.nn.initializer.Uniform(
low=-self._init_scale, high=self._init_scale)),
shape=[self._hidden_size * 4],
dtype="float32",
default_initializer=paddle.nn.initializer.Constant(0.0))
self.bias_arr.append(self.add_parameter('b_%d' % i, bias_1))
def forward(self, input_embedding, init_hidden=None, init_cell=None):
cell_array = []
hidden_array = []
for i in range(self._num_layers):
hidden_array.append(init_hidden[i])
cell_array.append(init_cell[i])
res = []
for index in range(self._num_steps):
step_input = input_embedding[:, index, :]
for k in range(self._num_layers):
pre_hidden = hidden_array[k]
pre_cell = cell_array[k]
weight_1 = self.weight_1_arr[k]
bias = self.bias_arr[k]
nn = paddle.concat(x=[step_input, pre_hidden], axis=1)
gate_input = paddle.matmul(x=nn, y=weight_1)
gate_input = paddle.add(x=gate_input, y=bias)
i, j, f, o = paddle.split(
x=gate_input, num_or_sections=4, axis=-1)
c = pre_cell * paddle.nn.functional.sigmoid(
f) + paddle.nn.functional.sigmoid(i) * paddle.tanh(j)
m = paddle.tanh(c) * paddle.nn.functional.sigmoid(o)
hidden_array[k] = m
cell_array[k] = c
step_input = m
if self._dropout is not None and self._dropout > 0.0:
step_input = paddle.nn.functional.dropout(
step_input,
dropout_prob=self._dropout,
dropout_implementation='upscale_in_train')
res.append(step_input)
real_res = paddle.concat(x=res, axis=1)
real_res = paddle.reshape(real_res,
[-1, self._num_steps, self._hidden_size])
last_hidden = paddle.concat(x=hidden_array, axis=1)
last_hidden = paddle.reshape(
last_hidden, shape=[-1, self._num_layers, self._hidden_size])
last_hidden = paddle.transpose(x=last_hidden, perm=[1, 0, 2])
last_cell = paddle.concat(x=cell_array, axis=1)
last_cell = paddle.reshape(
last_cell, shape=[-1, self._num_layers, self._hidden_size])
last_cell = paddle.transpose(x=last_cell, perm=[1, 0, 2])
return real_res, last_hidden, last_cell
class PtbModel(paddle.nn.Layer):
def __init__(self,
hidden_size,
vocab_size,
num_layers=2,
num_steps=20,
init_scale=0.1,
dropout=None):
super(PtbModel, self).__init__()
self.hidden_size = hidden_size
self.vocab_size = vocab_size
self.init_scale = init_scale
self.num_layers = num_layers
self.num_steps = num_steps
self.dropout = dropout
self.simple_lstm_rnn = SimpleLSTMRNN(
hidden_size,
num_steps,
num_layers=num_layers,
init_scale=init_scale,
dropout=dropout)
self.embedding = paddle.fluid.dygraph.nn.Embedding(
size=[vocab_size, hidden_size],
dtype='float32',
is_sparse=False,
param_attr=paddle.ParamAttr(
name='embedding_para',
initializer=paddle.nn.initializer.Uniform(
low=-init_scale, high=init_scale)))
self.softmax_weight = self.create_parameter(
attr=paddle.ParamAttr(),
shape=[self.hidden_size, self.vocab_size],
dtype="float32",
default_initializer=paddle.nn.initializer.Uniform(
low=-self.init_scale, high=self.init_scale))
self.softmax_bias = self.create_parameter(
attr=paddle.ParamAttr(),
shape=[self.vocab_size],
dtype="float32",
default_initializer=paddle.nn.initializer.Uniform(
low=-self.init_scale, high=self.init_scale))
def build_once(self, input, label, init_hidden, init_cell):
pass
@paddle.jit.to_static
def forward(self, input, label, init_hidden, init_cell):
init_h = paddle.reshape(
init_hidden, shape=[self.num_layers, -1, self.hidden_size])
init_c = paddle.reshape(
init_cell, shape=[self.num_layers, -1, self.hidden_size])
x_emb = self.embedding(input)
x_emb = paddle.reshape(
x_emb, shape=[-1, self.num_steps, self.hidden_size])
if self.dropout is not None and self.dropout > 0.0:
x_emb = paddle.nn.functional.dropout(
x_emb,
dropout_prob=self.dropout,
dropout_implementation='upscale_in_train')
rnn_out, last_hidden, last_cell = self.simple_lstm_rnn(x_emb, init_h,
init_c)
projection = paddle.matmul(x=rnn_out, y=self.softmax_weight)
projection = paddle.add(x=projection, y=self.softmax_bias)
loss = paddle.nn.functional.softmax_with_cross_entropy(
logits=projection, label=label, soft_label=False)
loss = paddle.reshape(loss, shape=[-1, self.num_steps])
loss = paddle.mean(loss, axis=[0])
loss = paddle.fluid.layers.reduce_sum(loss)
return loss, last_hidden, last_cell
def debug_emb(self):
np.save("emb_grad", self.x_emb.gradient())
def train(place):
num_layers = 1
batch_size = 4
hidden_size = 10
num_steps = 3
init_scale = 0.1
max_epoch = 1
dropout = 0.0
vocab_size = 1000
batch_num = 200
paddle.disable_static(place)
paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
ptb_model = PtbModel(
hidden_size=hidden_size,
vocab_size=vocab_size,
num_layers=num_layers,
num_steps=num_steps,
init_scale=init_scale,
dropout=dropout)
sgd = paddle.optimizer.SGD(learning_rate=1e-3,
parameters=ptb_model.parameters())
for epoch_id in range(max_epoch):
total_loss = 0.0
iters = 0.0
total_sample = 0
init_hidden_data = np.zeros(
(num_layers, batch_size, hidden_size), dtype='float32')
init_cell_data = np.zeros(
(num_layers, batch_size, hidden_size), dtype='float32')
init_hidden = paddle.to_tensor(
data=init_hidden_data, dtype=None, place=None, stop_gradient=True)
init_cell = paddle.to_tensor(
data=init_cell_data, dtype=None, place=None, stop_gradient=True)
for step_id in range(batch_num):
x_data = np.arange(12).reshape(4, 3).astype('int64')
y_data = np.arange(1, 13).reshape(4, 3).astype('int64')
y_data = y_data.reshape((-1, 1))
x_data = x_data.reshape((-1, num_steps, 1))
y_data = y_data.reshape((-1, num_steps, 1))
x = paddle.to_tensor(
data=x_data, dtype=None, place=None, stop_gradient=True)
y = paddle.to_tensor(
data=y_data, dtype=None, place=None, stop_gradient=True)
dy_loss, last_hidden, last_cell = ptb_model(x, y, init_hidden,
init_cell)
out_loss = dy_loss.numpy()
dy_loss.backward()
sgd.minimize(dy_loss)
ptb_model.clear_gradients()
total_loss += out_loss
iters += num_steps
total_sample += 1
if step_id % PRINT_STEP == 0:
if step_id == 0:
logging.info("epoch %d | step %d, loss %0.3f" %
(epoch_id, step_id, total_loss / total_sample))
avg_batch_time = time.time()
else:
speed = PRINT_STEP / (time.time() - avg_batch_time)
logging.info(
"epoch %d | step %d, loss %0.3f, speed %.3f steps/s" %
(epoch_id, step_id, total_loss / total_sample, speed))
avg_batch_time = time.time()
ret = out_loss, last_hidden.numpy(), last_cell.numpy()
paddle.enable_static()
return ret
def train_dygraph(place):
program_translator.enable(False)
return train(place)
def train_static(place):
program_translator.enable(True)
return train(place)
class TestPtb(unittest.TestCase):
def setUp(self):
self.place = paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
else paddle.CPUPlace()
def test_check_result(self):
loss_1, hidden_1, cell_1 = train_static(self.place)
loss_2, hidden_2, cell_2 = train_dygraph(self.place)
self.assertTrue(
np.allclose(loss_1, loss_2),
msg="static loss: {} \ndygraph loss: {}".format(loss_1, loss_2))
self.assertTrue(
np.allclose(hidden_1, hidden_2),
msg="static hidden: {} \ndygraph acc1: {}".format(hidden_1,
hidden_2))
self.assertTrue(
np.allclose(cell_1, cell_2),
msg="static cell: {} \ndygraph cell: {}".format(cell_1, cell_2))
if __name__ == '__main__':
unittest.main()
| luotao1/Paddle | python/paddle/fluid/tests/unittests/dygraph_to_static/test_ptb_lm_v2.py | Python | apache-2.0 | 11,601 |
# -*- coding: utf-8 -*-
from openerp import models, fields
class ProductTemplate(models.Model):
_inherit = "product.template"
analyzer_id = fields.Char(string='Analyzer ID' , related='product_variant_ids.product_tmpl_id.analyzer_id')
| ichi23de5/ichi_Repo | code_training/models/product.py | Python | gpl-3.0 | 245 |
import pandas as pd
from atmPy.aerosols.size_distribution import diameter_binning
from atmPy.aerosols.size_distribution import sizedistribution
from atmPy.data_archives.arm._netCDF import ArmDataset
class ArmDatasetSub(ArmDataset):
def __init__(self,*args, **kwargs):
self._data_period = 2700.
self._time_offset = (- self._data_period, 's')
super(ArmDatasetSub,self).__init__(*args, **kwargs)
self._concatable = ['size_distribution']
def _data_quality_control(self):
if self.data_quality_flag_max == None:
if self.data_quality == 'good':
self.data_quality_flag_max = 0
elif self.data_quality == 'patchy':
self.data_quality_flag_max = 15
elif self.data_quality == 'bad':
self.data_quality_flag_max = 100000
else:
txt = '%s is not an excepted values for data_quality ("good", "patchy", "bad")'%(self.data_quality)
raise ValueError(txt)
def _parse_netCDF(self):
super(ArmDatasetSub,self)._parse_netCDF()
df = pd.DataFrame(self._read_variable('number_concentration')['data'],
index = self.time_stamps)
d = self._read_variable('diameter')['data']
bins, colnames = diameter_binning.bincenters2binsANDnames(d[:]*1000)
self.size_distribution = sizedistribution.SizeDist_TS(df,bins,'dNdlogDp')
self.size_distribution._data_period = self._data_period
def plot_all(self):
self.size_distribution.plot()
def _concat_rules(arm_data_objs):
"""nothing here"""
out = ArmDatasetSub(False)
out._concat(arm_data_objs)
return out
# def _concat_rules(files):
# out = ArmDatasetSub(False)
# data = pd.concat([i.size_distribution.data for i in files])
# out.size_distribution = sizedistribution.SizeDist_TS(data,files[0].size_distribution.bins,'dNdlogDp')
# out.size_distribution._data_period = out._data_period
# return out | hagne/atm-py | atmPy/data_archives/arm/file_io/products/_tdmasize.py | Python | mit | 2,013 |
#-*- coding:utf-8 -*-
from builtins import range
from functools import total_ordering
@total_ordering
class moduint(object):
def __init__(self, arg):
self.arg = int(arg) % self.__class__.limit
assert(self.arg >= 0 and self.arg < self.__class__.limit)
def __repr__(self):
return self.__class__.__name__ + '(' + hex(self.arg) + ')'
def __hash__(self):
return hash(self.arg)
@classmethod
def maxcast(cls, c2):
c2 = c2.__class__
if cls.size > c2.size:
return cls
else:
return c2
def __eq__(self, y):
if isinstance(y, moduint):
return self.arg == y.arg
return self.arg == y
def __ne__(self, y):
# required Python 2.7.14
return not self == y
def __lt__(self, y):
if isinstance(y, moduint):
return self.arg < y.arg
return self.arg < y
def __add__(self, y):
if isinstance(y, moduint):
cls = self.maxcast(y)
return cls(self.arg + y.arg)
else:
return self.__class__(self.arg + y)
def __and__(self, y):
if isinstance(y, moduint):
cls = self.maxcast(y)
return cls(self.arg & y.arg)
else:
return self.__class__(self.arg & y)
def __div__(self, y):
# Python: 8 / -7 == -2 (C-like: -1)
# int(float) trick cannot be used, due to information loss
den = int(y)
num = int(self)
result_sign = 1 if (den * num) >= 0 else -1
cls = self.__class__
if isinstance(y, moduint):
cls = self.maxcast(y)
return (abs(num) // abs(den)) * result_sign
def __floordiv__(self, y):
return self.__div__(y)
def __int__(self):
return int(self.arg)
def __long__(self):
return int(self.arg)
def __index__(self):
return int(self.arg)
def __invert__(self):
return self.__class__(~self.arg)
def __lshift__(self, y):
if isinstance(y, moduint):
cls = self.maxcast(y)
return cls(self.arg << y.arg)
else:
return self.__class__(self.arg << y)
def __mod__(self, y):
# See __div__ for implementation choice
cls = self.__class__
if isinstance(y, moduint):
cls = self.maxcast(y)
return cls(self.arg - y * (self // y))
def __mul__(self, y):
if isinstance(y, moduint):
cls = self.maxcast(y)
return cls(self.arg * y.arg)
else:
return self.__class__(self.arg * y)
def __neg__(self):
return self.__class__(-self.arg)
def __or__(self, y):
if isinstance(y, moduint):
cls = self.maxcast(y)
return cls(self.arg | y.arg)
else:
return self.__class__(self.arg | y)
def __radd__(self, y):
return self.__add__(y)
def __rand__(self, y):
return self.__and__(y)
def __rdiv__(self, y):
if isinstance(y, moduint):
cls = self.maxcast(y)
return cls(y.arg // self.arg)
else:
return self.__class__(y // self.arg)
def __rfloordiv__(self, y):
return self.__rdiv__(y)
def __rlshift__(self, y):
if isinstance(y, moduint):
cls = self.maxcast(y)
return cls(y.arg << self.arg)
else:
return self.__class__(y << self.arg)
def __rmod__(self, y):
if isinstance(y, moduint):
cls = self.maxcast(y)
return cls(y.arg % self.arg)
else:
return self.__class__(y % self.arg)
def __rmul__(self, y):
return self.__mul__(y)
def __ror__(self, y):
return self.__or__(y)
def __rrshift__(self, y):
if isinstance(y, moduint):
cls = self.maxcast(y)
return cls(y.arg >> self.arg)
else:
return self.__class__(y >> self.arg)
def __rshift__(self, y):
if isinstance(y, moduint):
cls = self.maxcast(y)
return cls(self.arg >> y.arg)
else:
return self.__class__(self.arg >> y)
def __rsub__(self, y):
if isinstance(y, moduint):
cls = self.maxcast(y)
return cls(y.arg - self.arg)
else:
return self.__class__(y - self.arg)
def __rxor__(self, y):
return self.__xor__(y)
def __sub__(self, y):
if isinstance(y, moduint):
cls = self.maxcast(y)
return cls(self.arg - y.arg)
else:
return self.__class__(self.arg - y)
def __xor__(self, y):
if isinstance(y, moduint):
cls = self.maxcast(y)
return cls(self.arg ^ y.arg)
else:
return self.__class__(self.arg ^ y)
def __hex__(self):
return hex(self.arg)
def __abs__(self):
return abs(self.arg)
def __rpow__(self, v):
return v ** self.arg
def __pow__(self, v):
return self.__class__(self.arg ** v)
class modint(moduint):
def __init__(self, arg):
if isinstance(arg, moduint):
arg = arg.arg
a = arg % self.__class__.limit
if a >= self.__class__.limit // 2:
a -= self.__class__.limit
self.arg = a
assert(
self.arg >= -self.__class__.limit // 2 and
self.arg < self.__class__.limit
)
def is_modint(a):
return isinstance(a, moduint)
def size2mask(size):
return (1 << size) - 1
mod_size2uint = {}
mod_size2int = {}
mod_uint2size = {}
mod_int2size = {}
def define_int(size):
"""Build the 'modint' instance corresponding to size @size"""
global mod_size2int, mod_int2size
name = 'int%d' % size
cls = type(name, (modint,), {"size": size, "limit": 1 << size})
globals()[name] = cls
mod_size2int[size] = cls
mod_int2size[cls] = size
return cls
def define_uint(size):
"""Build the 'moduint' instance corresponding to size @size"""
global mod_size2uint, mod_uint2size
name = 'uint%d' % size
cls = type(name, (moduint,), {"size": size, "limit": 1 << size})
globals()[name] = cls
mod_size2uint[size] = cls
mod_uint2size[cls] = size
return cls
def define_common_int():
"Define common int"
common_int = range(1, 257)
for i in common_int:
define_int(i)
for i in common_int:
define_uint(i)
define_common_int()
| mrphrazer/miasm | miasm/expression/modint.py | Python | gpl-2.0 | 6,503 |
from hitchnode.node_service import NpmService
from hitchnode.node_package import NodePackage
from hitchnode.node_service import StaticNodeServer
UNIXPACKAGES = []
| hitchtest/hitchnode | hitchnode/__init__.py | Python | agpl-3.0 | 164 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Relay functions for rewriting fake quantized ops."""
import numpy as np
import tvm
from tvm import relay
from tvm.ir import TensorAffineType, TupleAffineType
# import to register canonicalization funcs for fq2i
# pylint: disable=unused-import
from tvm.relay.qnn.op import canonicalizations
from tvm.tir import bijective_layout
from ..op import register_fake_quantization_to_integer
def fold_constant(expr):
return relay.transform.FoldConstantExpr(expr, tvm.IRModule())
def get_zeros(scale):
return fold_constant(relay.op.cast(relay.op.zeros_like(scale), "int32"))
def infer_shape(expr):
return relay.transform.InferType()(tvm.IRModule.from_expr(expr))["main"].body.checked_type.shape
def approx_equal(x, y):
x = fold_constant(x)
y = fold_constant(y)
if isinstance(x, relay.Constant) and isinstance(y, relay.Constant):
equal = np.allclose(x.data.asnumpy(), y.data.asnumpy())
else:
equal = tvm.ir.structural_equal(x, y)
return equal
@register_fake_quantization_to_integer("qnn.dequantize")
def dequantize(expr, type_map):
"""Remove dequantize op"""
out = expr.args[0]
t = type_map[expr]
return [out, t]
@register_fake_quantization_to_integer("qnn.quantize")
def quantize(expr, type_map):
"""Turn a quantize op into requantize or remove it"""
out = expr.args[0]
t = type_map[out]
in_scale = fold_constant(t.scale)
in_zero_point = fold_constant(t.zero_point)
if not (
approx_equal(in_scale, expr.args[1])
and approx_equal(in_zero_point, expr.args[2])
and tvm.ir.structural_equal(t.dtype, expr.attrs.out_dtype)
):
out = relay.qnn.op.requantize(
out,
in_scale,
in_zero_point,
expr.args[1],
expr.args[2],
out_dtype=expr.attrs.out_dtype,
axis=t.axis,
)
return [
out,
TensorAffineType(expr.args[1], expr.args[2], expr.attrs.out_dtype, expr.attrs.axis),
]
def register_unary_identity(op_name):
def identity(expr, type_map):
assert len(expr.args) == 1
arg = expr.args[0]
t = type_map[arg]
return [expr, t]
return register_fake_quantization_to_integer(op_name, identity)
register_unary_identity("reshape")
register_unary_identity("squeeze")
register_unary_identity("strided_slice")
register_unary_identity("transpose")
register_unary_identity("expand_dims")
register_unary_identity("nn.max_pool2d")
register_unary_identity("nn.batch_flatten")
register_unary_identity("nn.depth_to_space")
register_unary_identity("max")
register_unary_identity("min")
@register_fake_quantization_to_integer("nn.avg_pool2d")
def avgpool2d(expr, type_map):
"""Rewrite a avgpool op"""
arg = expr.args[0]
t = type_map[arg]
arg = relay.op.cast(arg, "int32")
out = relay.op.nn.avg_pool2d(arg, **expr.attrs)
out = relay.op.cast(out, t.dtype)
return [out, t]
@register_fake_quantization_to_integer("nn.global_avg_pool2d")
def global_avgpool2d(expr, type_map):
"""Rewrite a global_avgpool op"""
arg = expr.args[0]
t = type_map[arg]
arg = relay.op.cast(arg, "int32")
out = relay.op.nn.global_avg_pool2d(arg)
out = relay.op.cast(out, t.dtype)
return [out, t]
@register_fake_quantization_to_integer("broadcast_to")
def broadcast_to(expr, type_map):
"""Rewrite a broadcast_to op"""
arg = expr.args[0]
t = type_map[arg]
shape = expr.attrs.shape
out = relay.op.broadcast_to(arg, shape)
return [out, t]
@register_fake_quantization_to_integer("nn.bias_add")
def bias_add(expr, type_map):
"""Rewrite a bias_add op"""
x, b = expr.args
x_t = type_map[x]
b_t = type_map[b]
in_scale = fold_constant(x_t.scale)
in_zero_point = fold_constant(x_t.zero_point)
if not (
approx_equal(x_t.scale, b_t.scale)
and approx_equal(x_t.zero_point, b_t.zero_point)
and tvm.ir.structural_equal(x_t.dtype, b_t.dtype)
):
b = relay.qnn.op.requantize(
b,
b_t.scale,
b_t.zero_point,
in_scale,
in_zero_point,
out_dtype=x_t.dtype,
axis=0,
)
out = relay.op.nn.bias_add(x, b, **expr.attrs)
return [out, x_t]
@register_fake_quantization_to_integer("nn.conv2d")
def conv2d(expr, type_map):
"""Rewrite a conv2d op"""
attrs = {**expr.attrs}
attrs.pop("out_dtype")
x, weight = expr.args
x_t = type_map[x]
w_t = type_map[weight]
conv_scale = fold_constant(x_t.scale * w_t.scale)
conv_zp = get_zeros(conv_scale)
out = relay.qnn.op.conv2d(
x, weight, x_t.zero_point, w_t.zero_point, x_t.scale, w_t.scale, **attrs
)
out_layout = attrs["out_layout"] if attrs["out_layout"] != "" else attrs["data_layout"]
out_axis = bijective_layout(out_layout, "NCHW").backward_index(list(range(4)))[1]
return [out, TensorAffineType(conv_scale, conv_zp, out.attrs.out_dtype, out_axis.value)]
@register_fake_quantization_to_integer("nn.conv2d_transpose")
def conv2d_transpose(expr, type_map):
"""Rewrite a conv2d_transpose op"""
attrs = {**expr.attrs}
attrs.pop("out_dtype")
x, weight = expr.args
x_t = type_map[x]
w_t = type_map[weight]
conv_scale = fold_constant(x_t.scale * w_t.scale)
conv_zp = get_zeros(conv_scale)
out = relay.qnn.op.conv2d_transpose(
x, weight, x_t.zero_point, w_t.zero_point, x_t.scale, w_t.scale, **attrs
)
out_layout = attrs["out_layout"] if attrs["out_layout"] != "" else attrs["data_layout"]
out_axis = bijective_layout(out_layout, "NCHW").backward_index(list(range(4)))[1]
return [out, TensorAffineType(conv_scale, conv_zp, out.attrs.out_dtype, out_axis.value)]
@register_fake_quantization_to_integer("nn.dense")
def dense(expr, type_map):
"""Rewrite a dense op"""
attrs = {**expr.attrs}
attrs.pop("out_dtype")
x, weight = expr.args
x_t = type_map[x]
w_t = type_map[weight]
dense_scale = fold_constant(x_t.scale * w_t.scale)
dense_zp = get_zeros(dense_scale)
out = relay.qnn.op.dense(
x, weight, x_t.zero_point, w_t.zero_point, x_t.scale, w_t.scale, **attrs
)
return [out, TensorAffineType(dense_scale, dense_zp, out.attrs.out_dtype, 1)]
@register_fake_quantization_to_integer("nn.batch_matmul")
def batch_matmul(expr, type_map):
"""Rewrite a batch_matmul op"""
x, y = expr.args
x_t = type_map[x]
y_t = type_map[y]
matmul_scale = fold_constant(x_t.scale * y_t.scale)
matmul_zp = relay.const(0)
out = relay.qnn.op.batch_matmul(x, y, x_t.zero_point, y_t.zero_point, x_t.scale, y_t.scale)
return [out, TensorAffineType(matmul_scale, matmul_zp, out.attrs.out_dtype, x_t.axis)]
@register_fake_quantization_to_integer("concatenate")
def concat(expr, type_map):
"""Rewrite a concat op"""
scales = []
zps = []
tuple_type = type_map[expr.args[0]]
for t in tuple_type.types:
scales.append(t.scale)
zps.append(t.zero_point)
out_type = type_map[expr]
out = relay.qnn.op.concatenate(
expr.args[0],
relay.Tuple(scales),
relay.Tuple(zps),
out_type.scale,
out_type.zero_point,
**expr.attrs,
)
return [out, out_type]
@register_fake_quantization_to_integer("topk")
def topk(expr, type_map):
"""Rewrite a topk op"""
arg = expr.args[0]
t = type_map[arg]
attrs = {**expr.attrs}
assert "ret_type" in attrs and attrs["ret_type"] == "values"
return [expr, t]
@register_fake_quantization_to_integer("split")
def split(expr, type_map):
"""Rewrite a split op"""
arg = expr.args[0]
t = type_map[arg]
attrs = {**expr.attrs}
if isinstance(attrs["indices_or_sections"], tvm.tir.IntImm):
num_split = attrs["indices_or_sections"].value
attrs["indices_or_sections"] = num_split
else:
num_split = len(attrs["indices_or_sections"]) + 1
return [expr, TupleAffineType([t] * num_split)]
@register_fake_quantization_to_integer("clip")
def clip(expr, type_map):
"""Rewrite a clip op"""
arg = expr.args[0]
t = type_map[arg]
amin = expr.attrs.a_min
amax = expr.attrs.a_max
scale = fold_constant(t.scale)
z_p = fold_constant(t.zero_point)
if (
isinstance(scale, relay.expr.Constant)
and scale.data.numpy().size == 1
and isinstance(z_p, relay.expr.Constant)
and z_p.data.numpy().size == 1
):
scale = scale.data.numpy().item()
z_p = z_p.data.numpy().item()
new_min = int(amin / scale + z_p)
new_max = int(amax / scale + z_p)
out = relay.op.clip(arg, new_min, new_max)
else:
if not isinstance(amin, relay.expr.Constant):
amin = relay.op.const(amin)
if not isinstance(amax, relay.expr.Constant):
amax = relay.op.const(amax)
scale_shape = infer_shape(scale)
if len(scale_shape) > 0 and scale_shape[0] > 1:
b_shape = [1] * len(infer_shape(arg))
b_shape[t.axis] = -1
amin = relay.op.reshape(relay.op.broadcast_to(amin, scale_shape), b_shape)
amax = relay.op.reshape(relay.op.broadcast_to(amax, scale_shape), b_shape)
amin = relay.qnn.op.quantize(amin, scale, z_p, t.axis, t.dtype)
amax = relay.qnn.op.quantize(amax, scale, z_p, t.axis, t.dtype)
out = relay.op.minimum(relay.op.maximum(arg, fold_constant(amin)), fold_constant(amax))
return [out, t]
@register_fake_quantization_to_integer("nn.relu")
def relu(expr, type_map):
"""Rewrite a relu op"""
arg = expr.args[0]
t = type_map[arg]
scale_shape = infer_shape(t.scale)
z_p = t.zero_point
assert len(scale_shape) <= 1
if len(scale_shape) == 1 and scale_shape[0] > 1:
b_shape = [1] * len(infer_shape(arg))
b_shape[t.axis] = -1
z_p = relay.op.reshape(relay.op.broadcast_to(z_p, scale_shape), b_shape)
zero = relay.op.cast(z_p, t.dtype)
return [relay.op.maximum(arg, fold_constant(zero)), t]
@register_fake_quantization_to_integer("nn.pad")
def pad(expr, type_map):
"""Rewite an nn.pad op"""
arg = expr.args[0]
t = type_map[arg]
pad_value = expr.args[1]
## TF2ONNX will sometimes implement the pad_value as a constant without a quantize
## To support that, the pass lets branches that terminate in a constant through
if pad_value in type_map:
## if the pad value is calcuated from a dequantize op, it should be in the type map
## and we need to make sure it's affine type matches the arg
pad_t = type_map[pad_value]
if not tvm.ir.structural_equal(t, pad_t):
pad_value = relay.qnn.op.requantize(
pad_value,
pad_t.scale,
pad_t.zero_point,
t.scale,
t.zero_point,
out_dtype=t.dtype,
axis=pad_t.axis,
)
else:
## If the pad-value is a constant, we need to quantize it
assert isinstance(pad_value, relay.expr.Constant)
pad_value = relay.qnn.op.quantize(pad_value, t.scale, t.zero_point)
out = relay.op.nn.pad(arg, pad_value=pad_value, **expr.attrs)
return [out, t]
def get_binary_types(expr, type_map):
"""Get Affine types of a binary op's inputs and unify them"""
##Support the case where one input is quantized and the other is a constant float
left = expr.args[0]
right = expr.args[1]
left_t = None
right_t = None
if left in type_map:
left_t = type_map[left]
if right in type_map:
right_t = type_map[right]
out_t = type_map[expr]
if left_t is None and right_t is None:
raise TypeError("neither input is quantized!")
if left_t is None:
assert isinstance(left, relay.expr.Constant)
left = relay.qnn.op.quantize(
left, right_t.scale, right_t.zero_point, out_dtype=right_t.dtype
)
left_t = right_t
out_t = right_t
if right_t is None:
assert isinstance(right, relay.expr.Constant)
right = relay.qnn.op.quantize(
right, left_t.scale, left_t.zero_point, out_dtype=left_t.dtype
)
right_t = left_t
out_t = left_t
# Handle the case of mismatched inputs
if not left_t.dtype == out_t.dtype:
out_t = left_t
return left, right, left_t, right_t, out_t
def register_binary_qnn(op_name, op):
"""Register a Binary Op that converts to QNN"""
def binary(expr, type_map):
left, right, left_t, right_t, out_t = get_binary_types(expr, type_map)
out = op(
left,
right,
left_t.scale,
left_t.zero_point,
right_t.scale,
right_t.zero_point,
out_t.scale,
out_t.zero_point,
)
return [out, out_t]
return register_fake_quantization_to_integer(op_name, binary)
# Use lambdas here to avoid a circular import problem
# pylint: disable=unnecessary-lambda
register_binary_qnn("add", lambda *args: relay.qnn.op.add(*args))
register_binary_qnn("multiply", lambda *args: relay.qnn.op.mul(*args))
register_binary_qnn("subtract", lambda *args: relay.qnn.op.subtract(*args))
def register_binary_identity(op_name, op):
"""Register a binary op that works directly on int8"""
def binary(expr, type_map):
left, right, left_t, right_t, out_t = get_binary_types(expr, type_map)
if left_t != out_t:
left = relay.qnn.op.requantize(
left,
left_t.scale,
left_t.zero_point,
out_t.scale,
out_t.zero_point,
out_dtype=out_t.dtype,
axis=left_t.axis,
)
if right_t != out_t:
right = relay.qnn.op.requantize(
right,
right_t.scale,
right_t.zero_point,
out_t.scale,
out_t.zero_point,
out_dtype=out_t.dtype,
axis=right_t.axis,
)
out = op(left, right)
return [out, out_t]
return register_fake_quantization_to_integer(op_name, binary)
register_binary_identity("minimum", relay.op.minimum)
register_binary_identity("maximum", relay.op.maximum)
def register_unary_qnn(op_name, op):
"""Rewrite a unary op"""
def unary(expr, type_map):
arg = expr.args[0]
x_t = type_map[arg]
out_t = type_map[expr]
out = op(
arg,
x_t.scale,
x_t.zero_point,
out_t.scale,
out_t.zero_point,
)
return [out, x_t]
return register_fake_quantization_to_integer(op_name, unary)
register_unary_qnn("sqrt", relay.qnn.op.sqrt)
register_unary_qnn("rsqrt", relay.qnn.op.rsqrt)
register_unary_qnn("exp", relay.qnn.op.exp)
register_unary_qnn("erf", relay.qnn.op.erf)
register_unary_qnn("sigmoid", relay.qnn.op.sigmoid)
register_unary_qnn("tanh", relay.qnn.op.tanh)
| dmlc/tvm | python/tvm/relay/transform/fake_quantization_to_integer.py | Python | apache-2.0 | 15,868 |
import pylab
import numpy
import ardustat_library_simple as ard
import time
import sys
from glob import glob
import os
def get_latest():
data_files = glob("*.dat")
high_time = 0
recent_file = "foo"
for d in data_files:
if os.path.getmtime(d) > high_time:
high_time = os.path.getmtime(d)
recent_file = d
return recent_file
try:
file_name = sys.argv[1]
except Exception, err:
file_name = get_latest()
print "defaulting to most recent file:", file_name
data = open(file_name).read()
data = data.split("\n")
times = []
potential = []
current = []
cycles = []
this_cycle = 0
for d in data:
try:
parts = d.split(",")
times.append(parts[0])
potential.append(parts[1])
current.append(parts[2])
cycle = int(parts[3])
if cycle != this_cycle:
this_cycle = cycle
cycles.append({'times':times,'potential':potential,'current':current})
times = []
potential = []
current = []
except Exception, err:
foo = err
cycles.append({'times':times,'potential':potential,'current':current})
counter = 1
for c in cycles:
pylab.plot(c['potential'],c['current'],label='Cycle '+str(counter))
pylab.legend(loc="best")
pylab.ylabel("Current (A)")
pylab.xlabel("Potential (V)")
counter += 1
pylab.savefig("out-cv.png") | kjiang8/Ardustat | Deprecated_Unsupported/Python_Client/plot_cv.py | Python | bsd-2-clause | 1,246 |
"""engine.SCons.Tool.icc
Tool-specific initialization for the OS/2 icc compiler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2017 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/icc.py 74b2c53bc42290e911b334a6b44f187da698a668 2017/11/14 13:16:53 bdbaddog"
from . import cc
def generate(env):
"""Add Builders and construction variables for the OS/2 to an Environment."""
cc.generate(env)
env['CC'] = 'icc'
env['CCCOM'] = '$CC $CFLAGS $CCFLAGS $CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS /c $SOURCES /Fo$TARGET'
env['CXXCOM'] = '$CXX $CXXFLAGS $CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS /c $SOURCES /Fo$TARGET'
env['CPPDEFPREFIX'] = '/D'
env['CPPDEFSUFFIX'] = ''
env['INCPREFIX'] = '/I'
env['INCSUFFIX'] = ''
env['CFILESUFFIX'] = '.c'
env['CXXFILESUFFIX'] = '.cc'
def exists(env):
return env.Detect('icc')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mapycz/mapnik | scons/scons-local-3.0.1/SCons/Tool/icc.py | Python | lgpl-2.1 | 2,190 |
import os
DEBUG = True
SITE_ID = 1
APP_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), ''))
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
STATIC_URL = '/static/'
# STATIC_ROOT = os.path.join(APP_ROOT, '../app_static')
STATICFILES_DIRS = (
os.path.join(APP_ROOT, 'static'),
)
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.staticfiles',
'django.contrib.sitemaps',
'django.contrib.sites',
'bower',
'bower.tests.test_app',
]
SECRET_KEY = 'foobar'
TEST_RUNNER = 'discover_runner.DiscoverRunner'
| kitsunde/jack-bower | bower/tests/test_settings.py | Python | mit | 783 |
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2017, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import unittest
from q2_types.feature_table import (FeatureTable, Frequency, RelativeFrequency,
PresenceAbsence, BIOMV210DirFmt)
from qiime2.plugin.testing import TestPluginBase
class TestTypes(TestPluginBase):
package = 'q2_types.feature_table.tests'
def test_feature_table_semantic_type_registration(self):
self.assertRegisteredSemanticType(FeatureTable)
def test_frequency_semantic_type_registration(self):
self.assertRegisteredSemanticType(Frequency)
def test_relative_frequency_semantic_type_registration(self):
self.assertRegisteredSemanticType(RelativeFrequency)
def test_presence_absence_semantic_type_registration(self):
self.assertRegisteredSemanticType(PresenceAbsence)
def test_feature_table_semantic_type_to_v210_format_registration(self):
self.assertSemanticTypeRegisteredToFormat(
FeatureTable[Frequency | RelativeFrequency | PresenceAbsence],
BIOMV210DirFmt)
if __name__ == "__main__":
unittest.main()
| jairideout/q2-types | q2_types/feature_table/tests/test_type.py | Python | bsd-3-clause | 1,406 |
import sys
from ReadFile import ReadAdjacentList
from Degree import Degree
def MaximalNonBranchingPaths(graph):
paths = []
degree = Degree(graph)
visited = [] # stores all the visited 1-in-1-out nodes
for v in graph:
if degree[v][0] != 1 or degree[v][1] != 1: # if v is not a 1-in-1-out node
visited.append(v)
if degree[v][1] > 0:
for w in graph[v]:
path = [v, w]
while degree[w][0] == 1 and degree[w][1] == 1:
visited.append(w)
w = graph[w][0]
path.append(w)
paths.append(path)
for v in graph:
if degree[v][0] == 1 and degree[v][1] == 1:
if v not in visited:
visited.append(v)
w = graph[v][0]
cycle = [v]
while degree[w][0] == 1 and degree[w][1] == 1:
visited.append(w)
cycle.append(w)
if v == w:
break
w = graph[w][0]
paths.append(cycle)
return paths
if __name__ == "__main__":
file = sys.argv[1]
with open(file) as f:
graph = ReadAdjacentList(f)
paths = MaximalNonBranchingPaths(graph)
for path in paths:
print ' -> '.join(path)
| Shenmolu/rosalind | MaximalNonBranchingPaths.py | Python | gpl-3.0 | 1,395 |
import add_code_to_python_process
print add_code_to_python_process.run_python_code(3736, "print(20)", connect_debugger_tracing=False) | dannyperry571/theapprentice | script.module.pydevd/lib/pydevd_attach_to_process/_check.py | Python | gpl-2.0 | 133 |
# This file is a part of MediaDrop (http://www.mediadrop.net),
# Copyright 2009-2015 MediaDrop contributors
# For the exact contribution history, see the git revision log.
# The source code contained in this file is licensed under the GPLv3 or
# (at your option) any later version.
# See LICENSE.txt in the main project directory, for more information.
from datetime import datetime, timedelta
from mediadrop.model import Group
from mediadrop.lib.test.db_testcase import DBTestCase
from mediadrop.lib.test.pythonic_testcase import *
class GroupExampleTest(DBTestCase):
def test_can_create_example_group(self):
group = Group.example()
assert_not_none(group.group_id)
assert_equals(u'baz_users', group.group_name)
assert_equals(u'Baz Users', group.display_name)
assert_almost_equals(datetime.now(), group.created,
max_delta=timedelta(seconds=1))
def test_can_override_example_data(self):
group = Group.example(name=u'bar', display_name=u'Bar Foo')
assert_equals(u'Bar Foo', group.display_name)
assert_equals(u'bar', group.group_name)
import unittest
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(GroupExampleTest))
return suite
| jobsafran/mediadrop | mediadrop/model/tests/group_example_test.py | Python | gpl-3.0 | 1,298 |
#!/usr/bin/env python3
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This is generated, do not edit. Update BuildConfigGenerator.groovy and
# 3ppFetch.template instead.
import argparse
import json
import os
import re
import urllib.request
_REPO_URL = 'https://dl.google.com/dl/android/maven2'
_GROUP_NAME = 'android/arch/lifecycle'
_MODULE_NAME = 'livedata'
_FILE_EXT = 'aar'
_OVERRIDE_LATEST = None
_PATCH_VERSION = 'cr1'
def do_latest():
if _OVERRIDE_LATEST is not None:
print(_OVERRIDE_LATEST + f'.{_PATCH_VERSION}')
return
maven_metadata_url = '{}/{}/{}/maven-metadata.xml'.format(
_REPO_URL, _GROUP_NAME, _MODULE_NAME)
metadata = urllib.request.urlopen(maven_metadata_url).read().decode(
'utf-8')
# Do not parse xml with the python included parser since it is susceptible
# to maliciously crafted xmls. Only use regular expression parsing to be
# safe. RE should be enough to handle what we need to extract.
match = re.search('<latest>([^<]+)</latest>', metadata)
if match:
latest = match.group(1)
else:
# if no latest info was found just hope the versions are sorted and the
# last one is the latest (as is commonly the case).
latest = re.findall('<version>([^<]+)</version>', metadata)[-1]
print(latest + f'.{_PATCH_VERSION}')
def get_download_url(version):
# Remove the patch version when getting the download url
version_no_patch, patch = version.rsplit('.', 1)
if patch.startswith('cr'):
version = version_no_patch
file_url = '{0}/{1}/{2}/{3}/{2}-{3}.{4}'.format(_REPO_URL, _GROUP_NAME,
_MODULE_NAME, version,
_FILE_EXT)
file_name = file_url.rsplit('/', 1)[-1]
partial_manifest = {
'url': [file_url],
'name': [file_name],
'ext': '.' + _FILE_EXT,
}
print(json.dumps(partial_manifest))
def main():
ap = argparse.ArgumentParser()
sub = ap.add_subparsers()
latest = sub.add_parser('latest')
latest.set_defaults(func=lambda _opts: do_latest())
download = sub.add_parser('get_url')
download.set_defaults(
func=lambda _opts: get_download_url(os.environ['_3PP_VERSION']))
opts = ap.parse_args()
opts.func(opts)
if __name__ == '__main__':
main()
| chromium/chromium | third_party/android_deps/libs/android_arch_lifecycle_livedata/3pp/fetch.py | Python | bsd-3-clause | 2,496 |
import logging
from django.db import migrations
from django.apps import apps
class CreateView(migrations.CreateModel):
def database_forwards(self, app_label, schema_editor, from_state, to_state):
fake_model = to_state.apps.get_model(app_label, self.name)
if not self.allow_migrate_model(
schema_editor.connection.alias, fake_model):
return
model = self._get_model(fake_model, app_label, to_state)
self._drop_view(fake_model, model, schema_editor)
if hasattr(model, 'view'):
self._create_standard_view(model, schema_editor)
elif hasattr(model, 'get_view_str'):
self._create_view_from_raw_sql(model.get_view_str(), schema_editor)
else:
raise Exception('{} has neither view nor get_view_str'.format(
model))
def database_backwards(self, app_label, schema_editor, from_state, to):
fake_model = from_state.apps.get_model(app_label, self.name)
model = self._get_model(fake_model, app_label, to)
self._drop_view(fake_model, model, schema_editor)
def _get_model(self, state, app_label, fake_model):
models = apps.get_app_config(app_label).models_module
if hasattr(models, self.name):
return getattr(models, self.name)
# TODO: identify model more reliably and support more than 1 level
for submodule in models.__dict__.values():
if hasattr(submodule, self.name):
return getattr(submodule, self.name)
logging.warning('Using fake model, this may fail with inherited views')
return fake_model
def _drop_view(self, fake_model, model, schema_editor):
if hasattr(model, 'drop_view_sql'):
sql_template = model.drop_view_sql
else:
sql_template = 'DROP VIEW IF EXISTS %(table)s'
args = {
'table': schema_editor.quote_name(fake_model._meta.db_table),
}
sql = sql_template % args
schema_editor.execute(sql, None)
def _create_standard_view(self, model, schema_editor):
sql_template = 'CREATE VIEW %(table)s AS %(definition)s'
qs = str(model.view())
args = {
'table': schema_editor.quote_name(model._meta.db_table),
'definition': qs,
}
sql = sql_template % args
self._create_view_from_raw_sql(sql, schema_editor)
def _create_view_from_raw_sql(self, sql, schema_editor):
schema_editor.execute(sql, None)
| manuelnaranjo/django-database-view | dbview/helpers.py | Python | mit | 2,533 |
import glob
from datetime import datetime
import re
from operator import itemgetter
from netCDF4 import Dataset
import numpy
import click
from pyproj import Proj
import rasterio
from rasterio.crs import CRS
from rasterio.windows import get_data_window, union
from trefoil.cli import cli
from trefoil.netcdf.variable import SpatialCoordinateVariables, DateVariable
from trefoil.netcdf.crs import set_crs
from trefoil.netcdf.utilities import get_pack_atts, get_fill_value
from trefoil.geometry.bbox import BBox
DATE_REGEX = re.compile('%[yYmd]') # TODO: add all appropriate strftime directives
@cli.command(short_help='Convert rasters to NetCDF')
@click.argument('files')
@click.argument('output', type=click.Path())
@click.argument('variable', type=click.STRING)
@click.option('--dtype', type=click.Choice(['float32', 'float64', 'int8', 'int16', 'int32', 'uint8', 'uint16', 'uint32']), default=None, help='Data type of output variable. Will be inferred from input raster if not provided.')
@click.option('--src-crs', default=None, type=click.STRING, help='Source coordinate reference system (limited to EPSG codes, e.g., EPSG:4326). Will be read from file if not provided.')
@click.option('--x', 'x_name', type=click.STRING, help='Name of x dimension and variable (default: lon or x)')
@click.option('--y', 'y_name', type=click.STRING, help='Name of y dimension and variable (default: lat or y)')
@click.option('--z', 'z_name', type=click.STRING, default='time', help='Name of z dimension and variable', show_default=True)
@click.option('--datetime-pattern', type=click.STRING, help='strftime-style pattern to parse date and time from the filename')
@click.option('--netcdf3', is_flag=True, default=False, help='Output in NetCDF3 version instead of NetCDF4')
@click.option('--zip', 'compress', is_flag=True, default=False, help='Use zlib compression of data and coordinate variables')
@click.option('--packed', is_flag=True, default=False, help='Pack floating point values into an integer (will lose precision)')
@click.option('--xy-dtype', type=click.Choice(['float32', 'float64']), default='float32', help='Data type of spatial coordinate variables.', show_default=True)
# @click.option('--z-dtype', type=click.Choice(['float32', 'float64', 'int8', 'int16', 'int32', 'uint8', 'uint16', 'uint32']), default=None, help='Data type of z variable. Will be inferred from values if not provided.')
@click.option('--calendar', type=click.STRING, default='standard', help='Calendar to use if z dimension is a date type', show_default=True)
@click.option('--autocrop', is_flag=True, default=False, help='Automatically crop to data bounds (trim NODATA)')
def to_netcdf(
files,
output,
variable,
dtype,
src_crs,
x_name,
y_name,
z_name,
datetime_pattern,
netcdf3,
compress,
packed,
xy_dtype,
# z_dtype,
calendar,
autocrop):
"""
Convert rasters to NetCDF and stack them according to a dimension.
X and Y dimension names will be named according to the source projection (lon, lat if geographic projection, x, y
otherwise) unless specified.
Will overwrite an existing NetCDF file.
Only the first band of the input will be turned into a NetCDF file.
"""
# TODO: add format string template to this to parse out components
filenames = list(glob.glob(files))
if not filenames:
raise click.BadParameter('No files found matching that pattern', param='files', param_hint='FILES')
z_values = []
if datetime_pattern is not None:
datetimes = (datetime.strptime(x, datetime_pattern) for x in filenames)
# Sort both datimes and filenames by datetimes
z_values, filenames = [list(x) for x in zip(*sorted(zip(datetimes, filenames), key=itemgetter(0)))]
items = tuple(enumerate(filenames))
has_z = len(filenames) > 1
if has_z and not z_name:
raise click.BadParameter('Required when > 1 input file', param='--z', param_hint='--z')
if src_crs:
src_crs = CRS.from_string(src_crs)
template_ds = rasterio.open(filenames[0])
src_crs = template_ds.crs or src_crs
if not src_crs:
raise click.BadParameter('Required when no CRS information available in source files', param='--src-crs',
param_hint='--src-crs')
prj = Proj(**src_crs.to_dict())
bounds = template_ds.bounds
width = template_ds.width
height = template_ds.height
window = None
src_dtype = numpy.dtype(template_ds.dtypes[0])
dtype = numpy.dtype(dtype) if dtype else src_dtype
if dtype == src_dtype:
fill_value = template_ds.nodata
if src_dtype.kind in ('u', 'i'):
# nodata always comes from rasterio as floating point
fill_value = int(fill_value)
else:
fill_value = get_fill_value(dtype)
x_name = x_name or ('lon' if src_crs.is_geographic else 'x')
y_name = y_name or ('lat' if src_crs.is_geographic else 'y')
var_kwargs = {
'fill_value': fill_value
}
format = 'NETCDF3_CLASSIC' if netcdf3 else 'NETCDF4'
with Dataset(output, 'w', format=format) as out:
if packed or autocrop:
mins = []
maxs = []
windows = []
click.echo('Inspecting input datasets...')
with click.progressbar(items) as iter:
for index, filename in iter:
with rasterio.open(filename) as src:
data = src.read(1, masked=True)
if packed:
mins.append(data.min())
maxs.append(data.max())
if autocrop:
data_window = get_data_window(data)
if data_window != ((0, height), (0, width)):
windows.append(data_window)
if packed:
min_value = min(mins)
max_value = max(maxs)
scale, offset = get_pack_atts(dtype, min_value, max_value)
if autocrop and windows:
window = union(windows)
bounds = template_ds.window_bounds(window)
height = window[0][1] - window[0][0]
width = window[1][1] - window[1][0]
coords = SpatialCoordinateVariables.from_bbox(BBox(bounds, prj), width, height, xy_dtype)
coords.add_to_dataset(out, x_name, y_name, zlib=compress)
var_dimensions = [y_name, x_name]
shape = list(coords.shape)
if has_z:
shape.insert(0, len(filenames))
out.createDimension(z_name, shape[0])
var_dimensions.insert(0, z_name)
if z_values:
dates = DateVariable(numpy.array(z_values),
units_start_date=z_values[0], calendar=calendar)
dates.add_to_dataset(out, z_name)
click.echo('Creating {0}:{1} with shape {2}'.format(output, variable, shape))
out_var = out.createVariable(variable, dtype, dimensions=var_dimensions,
zlib=compress, **var_kwargs)
set_crs(out, variable, prj, set_proj4_att=True)
if packed:
out_var.setncattr('scale_factor', scale)
out_var.setncattr('add_offset', offset)
click.echo('Copying data from input files...')
with click.progressbar(items) as iter:
for index, filename in iter:
with rasterio.open(filename) as src:
data = src.read(1, masked=True, window=window)
if has_z:
out_var[index, :] = data
else:
out_var[:] = data
out.sync()
| consbio/clover | trefoil/cli/convert.py | Python | bsd-3-clause | 7,787 |
_is_init = 0
def init():
global list_cameras, Camera, colorspace, _is_init
import os,sys
use_opencv = False
use_vidcapture = False
use__camera = True
if sys.platform == 'win32':
use_vidcapture = True
elif "linux" in sys.platform:
use__camera = True
else:
use_opencv = True
# see if we have any user specified defaults in environments.
camera_env = os.environ.get("PYGAME_CAMERA", "")
if camera_env == "opencv":
use_opencv = True
if camera_env == "vidcapture":
use_vidcapture = True
# select the camera module to import here.
# the _camera module has some code which can be reused by other modules.
# it will also be the default one.
import _camera
colorspace = _camera.colorspace
if use__camera:
list_cameras = _camera.list_cameras
Camera = _camera.Camera
if use_opencv:
try:
import _camera_opencv_highgui
except:
_camera_opencv_highgui = None
if _camera_opencv_highgui:
_camera_opencv_highgui.init()
list_cameras = _camera_opencv_highgui.list_cameras
Camera = _camera_opencv_highgui.Camera
if use_vidcapture:
try:
import _camera_vidcapture
except:
_camera_vidcapture = None
if _camera_vidcapture:
_camera_vidcapture.init()
list_cameras = _camera_vidcapture.list_cameras
Camera = _camera_vidcapture.Camera
_is_init = 1
pass
def quit():
global _is_init
_is_init = 0
pass
def _check_init():
global _is_init
if not _is_init:
raise ValueError("Need to call camera.init() before using.")
def list_cameras():
"""
"""
_check_init()
raise NotImplementedError()
class Camera:
def __init__(self, device =0, size = (320, 200), mode = "RGB"):
"""
"""
_check_init()
raise NotImplementedError()
def set_resolution(self, width, height):
"""Sets the capture resolution. (without dialog)
"""
pass
def start(self):
"""
"""
def stop(self):
"""
"""
def get_buffer(self):
"""
"""
def set_controls(self, **kwargs):
"""
"""
def get_image(self, dest_surf = None):
"""
"""
def get_surface(self, dest_surf = None):
"""
"""
if __name__ == "__main__":
# try and use this camera stuff with the pygame camera example.
import pygame.examples.camera
#pygame.camera.Camera = Camera
#pygame.camera.list_cameras = list_cameras
pygame.examples.camera.main()
| JulienMcJay/eclock | windows/Python27/Lib/site-packages/pygame/camera.py | Python | gpl-2.0 | 2,738 |
import re
import pytest
from user_sync.certgen import *
from user_sync.error import AssertionException
@pytest.fixture()
def random_subject():
return get_subject_fields(randomize=True)
@pytest.fixture()
def key():
return create_key()
def test_get_subject_fields(random_subject):
assert len(random_subject['countryName']) == 2
test_keys = set()
for key in random_subject:
assert key not in test_keys
assert re.search('^[a-zA-Z0-9=]{2,}', key)
test_keys.add(key)
def test_create_key(key):
assert key.key_size == 2048
assert key._backend.name == 'openssl'
def test_create_cert(random_subject, key):
cert = create_cert(random_subject, key)
cert_dict = {i.oid._name: i.value for i in cert.subject}
for k, v in six.iteritems(cert_dict):
assert random_subject[k] == v
random_subject['countryName'] = 'usa'
with pytest.raises(AssertionException):
create_cert(random_subject, key)
def test_write_key_to_file(test_resources, key):
write_key_to_file(test_resources['priv_key'], key)
with open(test_resources['priv_key'], 'r') as f:
data = f.read()
opening = '-----BEGIN RSA PRIVATE KEY-----'
ending = '-----END RSA PRIVATE KEY-----'
assert opening in data and ending in data
def test_write_cert_to_file(test_resources, random_subject, key):
public_cert = test_resources['certificate']
cert = create_cert(random_subject, key)
write_cert_to_file(public_cert, cert)
with open(public_cert, 'r') as f:
data = f.read()
opening = '-----BEGIN CERTIFICATE-----'
ending = '-----END CERTIFICATE-----'
assert opening in data and ending in data
| adobe-apiplatform/user-sync.py | tests/test_certgen.py | Python | mit | 1,689 |
# -*- coding: cp1252 -*-
# This file is part of pyTSEB for estimating the resistances to momentum and heat transport
# Copyright 2016 Hector Nieto and contributors listed in the README.md file.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Created on Apr 6 2015
@author: Hector Nieto (hnieto@ias.csic.es)
Modified on Jan 27 2016
@author: Hector Nieto (hnieto@ias.csic.es)
DESCRIPTION
===========
This module includes functions for calculating the resistances for
heat and momentum trasnport for both One- and Two-Source Energy Balance models.
Additional functions needed in are imported from the following packages
* :doc:`meteoUtils` for the estimation of meteorological variables.
* :doc:`MOsimilarity` for the estimation of the Monin-Obukhov length and stability functions.
PACKAGE CONTENTS
================
Resistances
-----------
* :func:`calc_R_A` Aerodynamic resistance.
* :func:`calc_R_S_Choudhury` [Choudhury1988]_ soil resistance.
* :func:`calc_R_S_McNaughton` [McNaughton1995]_ soil resistance.
* :func:`calc_R_S_Kustas` [Kustas1999]_ soil resistance.
* :func:`calc_R_x_Choudhury` [Choudhury1988]_ canopy boundary layer resistance.
* :func:`calc_R_x_McNaughton` [McNaughton1995]_ canopy boundary layer resistance.
* :func:`calc_R_x_Norman` [Norman1995]_ canopy boundary layer resistance.
Stomatal conductance
--------------------
* :func:`calc_stomatal_conductance_TSEB` TSEB stomatal conductance.
* :func:`calc_stomatal_conductance_OSEB` OSEB stomatal conductance.
* :func:`calc_coef_m2mmol` Conversion factor from stomatal conductance from m s-1 to mmol m-2 s-1.
Estimation of roughness
-----------------------
* :func:`calc_d_0` Zero-plane displacement height.
* :func:`calc_roughness` Roughness for different land cover types.
* :func:`calc_z_0M` Aerodynamic roughness lenght.
* :func:`raupach` Roughness and displacement height factors for discontinuous canopies.
"""
from math import pi
import numpy as np
from .MO_similarity import calc_Psi_H
from .meteo_utils import calc_rho,calc_c_p,calc_psicr,calc_lambda,calc_vapor_pressure
#==============================================================================
# List of constants used in TSEB model and sub-routines
#==============================================================================
# Land Cover Classes
CROP = 11
GRASS = 2
SHRUB = 5
CONIFER = 4
BROADLEAVED = 3
# Leaf stomata distribution
AMPHISTOMATOUS = 2
HYPOSTOMATOUS = 1
# von Karman's constant
k = 0.4
# acceleration of gravity (m s-2)
gravity = 9.8
# Universal gas constant (kPa m3 mol-1 K-1)
R_u = 0.0083144
CM_a = 0.01 # Choudhury and Monteith 1988 leaf drag coefficient
KN_b = 0.012 # Value propoesd in Kustas et al 1999
KN_c = 0.0025 # Coefficient from Norman et al. 1995
KN_C_dash = 90.0 # value proposed in Norman et al. 1995
def calc_d_0(h_C):
''' Zero-plane displacement height
Calculates the zero-plane displacement height based on a
fixed ratio of canopy height.
Parameters
----------
h_C : float
canopy height (m).
Returns
-------
d_0 : float
zero-plane displacement height (m).'''
d_0 = h_C * 0.65
return np.asarray(d_0)
def calc_roughness(LAI, h_C, w_C=1, landcover=11):
''' Surface roughness and zero displacement height for different vegetated surfaces.
Calculates the roughness using different approaches depending we are dealing with
crops or grasses (fixed ratio of canopy height) or shrubs and forests,depending of LAI
and canopy shape, after [Schaudt2000]_
Parameters
----------
LAI : float
Leaf (Plant) Area Index.
h_C : float
Canopy height (m)
w_C : float, optional
Canopy height to width ratio.
landcover : int, optional
landcover type, use 11 for crops, 2 for grass, 5 for shrubs,
4 for conifer forests and 3 for broadleaved forests.
Returns
-------
z_0M : float
aerodynamic roughness length for momentum trasport (m).
d : float
Zero-plane displacement height (m).
References
----------
.. [Schaudt2000] K.J Schaudt, R.E Dickinson, An approach to deriving roughness length
and zero-plane displacement height from satellite data, prototyped with BOREAS data,
Agricultural and Forest Meteorology, Volume 104, Issue 2, 8 August 2000, Pages 143-155,
http://dx.doi.org/10.1016/S0168-1923(00)00153-2.
'''
# Convert input scalars to numpy arrays
LAI, h_C, w_C, landcover = map(np.asarray, (LAI, h_C, w_C, landcover))
# Initialize fractional cover and horizontal area index
fc, lambda_ = [np.zeros(LAI.shape) for i in range(2)]
# Needleleaf canopies
fc[landcover == CONIFER] = 1. - np.exp(-0.5 * LAI[landcover == CONIFER])
lambda_[landcover == CONIFER] = (2. / pi) * fc[landcover == CONIFER] * \
w_C[landcover == CONIFER]
# Broadleaved canopies
fc[landcover == BROADLEAVED] = 1. - np.exp(-LAI[landcover == BROADLEAVED])
lambda_[landcover == BROADLEAVED] = fc[landcover == BROADLEAVED] * \
w_C[landcover == BROADLEAVED]
# Shrublands
fc[landcover == SHRUB] = 1. - np.exp(-0.5 * LAI[landcover == SHRUB])
lambda_[landcover == SHRUB] = fc[landcover == SHRUB] * \
w_C[landcover == SHRUB]
# Calculation of the Raupach (1994) formulae
z0M_factor, d_factor = raupach(lambda_)
# Calculation of correction factors from Lindroth
fz = np.asarray(0.3299 * LAI**1.5 + 2.1713)
fd = np.asarray(1. - 0.3991 * np.exp(-0.1779 * LAI))
# LAI <= 0
fz[LAI <= 0] = 1.0
fd[LAI <= 0] = 1.0
# LAI >= 0.8775:
fz[LAI >= 0.8775] = 1.6771 * np.exp(-0.1717 * LAI[LAI >= 0.8775]) + 1.
fd[LAI >= 0.8775] = 1. - 0.3991 * np.exp(-0.1779 * LAI[LAI >= 0.8775])
# Application of the correction factors to roughness and displacement
# height
z0M_factor = np.asarray(z0M_factor * fz)
d_factor = np.asarray(d_factor * fd)
# For crops and grass we use a fixed ratio of canopy height
z0M_factor[np.logical_or(landcover == CROP, landcover == GRASS)] = 1. / 8.
d_factor[np.logical_or(landcover == CROP, landcover == GRASS)] = 0.65
# Calculation of rouhgness length
z_0M = z0M_factor * h_C
# Calculation of zero plane displacement height
d = d_factor * h_C
return np.asarray(z_0M), np.asarray(d)
def calc_R_A(z_T, ustar, L, d_0, z_0H, useRi=False):
''' Estimates the aerodynamic resistance to heat transport based on the
MO similarity theory.
Parameters
----------
z_T : float
air temperature measurement height (m).
ustar : float
friction velocity (m s-1).
L : float
Monin Obukhov Length or Richardson number for stability (see useRi variable).
d_0 : float
zero-plane displacement height (m).
z_0M : float
aerodynamic roughness length for momentum trasport (m).
z_0H : float
aerodynamic roughness length for heat trasport (m).
useRi : bool, optional
boolean variable to use Richardsond number instead of the MO length.
Returns
-------
R_A : float
aerodyamic resistance to heat transport in the surface layer (s m-1).
References
----------
.. [Norman1995] J.M. Norman, W.P. Kustas, K.S. Humes, Source approach for estimating
soil and vegetation energy fluxes in observations of directional radiometric
surface temperature, Agricultural and Forest Meteorology, Volume 77, Issues 3-4,
Pages 263-293, http://dx.doi.org/10.1016/0168-1923(95)02265-Y.
'''
# Convert input scalars to numpy arrays
z_T, ustar, L, d_0, z_0H = map(np.asarray, (z_T, ustar, L, d_0, z_0H))
R_A_log = np.asarray(np.log((z_T - d_0) / z_0H))
if (useRi):
# use the approximation Ri ~ (z-d_0)./L from end of section 2.2 from
# Norman et. al., 2000 (DTD paper)
Psi_H = calc_Psi_H(L)
Psi_H0 = calc_Psi_H(L / (z_T - d_0) * z_0H)
else:
# if L -> infinity, z./L-> 0 and there is neutral atmospheric stability
# other atmospheric conditions
L[L == 0] = 1e-36
Psi_H = calc_Psi_H((z_T - d_0) / L)
Psi_H0 = calc_Psi_H(z_0H / L)
#i = np.logical_and(z_star>0, z_T<=z_star)
#Psi_H_star[i] = MO.calc_Psi_H_star(z_T[i], L[i], d_0[i], z_0H[i], z_star[i])
R_A = np.asarray(np.ones(ustar.shape) * float('inf'))
R_A[ustar != 0] = (R_A_log[ustar != 0] - Psi_H[ustar != 0] +
Psi_H0[ustar != 0]) / \
(ustar[ustar != 0] * k)
return np.asarray(R_A)
def calc_R_S_Choudhury(u_star, h_C, z_0M, d_0, zm, z0_soil=0.01, alpha_k=2.0):
''' Aerodynamic resistance at the soil boundary layer.
Estimates the aerodynamic resistance at the soil boundary layer based on the
K-Theory model of [Choudhury1988]_.
Parameters
----------
u_star : float
friction velocity (m s-1).
h_C : float
canopy height (m).
z_0M : float
aerodynamic roughness length for momentum trasport (m).
d_0 : float
zero-plane displacement height (m).
zm : float
height on measurement of wind speed (m).
z0_soil : float, optional
roughness length of the soil layer, use z0_soil=0.01.
alpha_k : float, optional
Heat diffusion coefficient, default=2.
Returns
-------
R_S : float
Aerodynamic resistance at the soil boundary layer (s m-1).
References
----------
.. [Choudhury1988] Choudhury, B. J., & Monteith, J. L. (1988). A four-layer model
for the heat budget of homogeneous land surfaces.
Royal Meteorological Society, Quarterly Journal, 114(480), 373-398.
http://dx/doi.org/10.1002/qj.49711448006.
'''
# Soil resistance eqs. 24 & 25 [Choudhury1988]_
K_h = k * u_star * (h_C - d_0)
R_S = (h_C * np.exp(alpha_k) / (alpha_k * K_h)) * \
(np.exp(-alpha_k * z0_soil / h_C) - np.exp(-alpha_k * (d_0 + z_0M) / h_C))
return np.asarray(R_S)
def calc_R_S_McNaughton(u_friction):
''' Aerodynamic resistance at the soil boundary layer.
Estimates the aerodynamic resistance at the soil boundary layer based on the
Lagrangian model of [McNaughton1995]_.
Parameters
----------
u_friction : float
friction velocity (m s-1).
Returns
-------
R_S : float
Aerodynamic resistance at the soil boundary layer (s m-1)
References
----------
.. [McNaughton1995] McNaughton, K. G., & Van den Hurk, B. J. J. M. (1995).
A 'Lagrangian' revision of the resistors in the two-layer model for calculating
the energy budget of a plant canopy. Boundary-Layer Meteorology, 74(3), 261-288.
http://dx/doi.org/10.1007/BF00712121.
'''
R_S = 10.0 / u_friction
return np.asarray(R_S)
def calc_R_S_Kustas(u_S, deltaT):
''' Aerodynamic resistance at the soil boundary layer.
Estimates the aerodynamic resistance at the soil boundary layer based on the
original equations in TSEB [Kustas1999]_.
Parameters
----------
u_S : float
wind speed at the soil boundary layer (m s-1).
deltaT : float
Surface to air temperature gradient (K).
Returns
-------
R_S : float
Aerodynamic resistance at the soil boundary layer (s m-1).
References
----------
.. [Kustas1999] William P Kustas, John M Norman, Evaluation of soil and vegetation heat
flux predictions using a simple two-source model with radiometric temperatures for
partial canopy cover, Agricultural and Forest Meteorology, Volume 94, Issue 1,
Pages 13-29, http://dx.doi.org/10.1016/S0168-1923(99)00005-2.
'''
# Convert input scalars to numpy arrays
u_S, deltaT = map(np.asarray, (u_S, deltaT))
deltaT = np.asarray(np.maximum(deltaT, 0.0))
R_S = 1.0 / (KN_c * deltaT**(1.0 / 3.0) + KN_b * u_S)
return np.asarray(R_S)
def calc_R_x_Choudhury(U_C, F, leaf_width, alpha_prime=3.0):
''' Estimates aerodynamic resistance at the canopy boundary layer.
Estimates the aerodynamic resistance at the canopy boundary layer based on the
K-Theory model of [Choudhury1988]_.
Parameters
----------
u_C : float
wind speed at the canopy interface (m s-1).
F : float
local Leaf Area Index.
leaf_width : float
efective leaf width size (m).
alpha_prime : float, optional
Wind exctinction coefficient, default=3.
Returns
-------
R_x : float
Aerodynamic resistance at the canopy boundary layer (s m-1).
References
----------
.. [Choudhury1988] Choudhury, B. J., & Monteith, J. L. (1988). A four-layer model
for the heat budget of homogeneous land surfaces.
Royal Meteorological Society, Quarterly Journal, 114(480), 373-398.
http://dx/doi.org/10.1002/qj.49711448006.
'''
# Eqs. 29 & 30 [Choudhury1988]_
R_x = 1.0 / (F * (2.0 * CM_a / alpha_prime) * np.sqrt(U_C / \
leaf_width) * (1.0 - np.exp(-alpha_prime / 2.0)))
# R_x=(alpha_u*(sqrt(leaf_width/U_C)))/(2.0*alpha_0*LAI*(1.-exp(-alpha_u/2.0)))
return np.asarray(R_x)
def calc_R_x_McNaughton(F, leaf_width, u_star):
''' Estimates aerodynamic resistance at the canopy boundary layer.
Estimates the aerodynamic resistance at the canopy boundary layer based on the
Lagrangian model of [McNaughton1995]_.
Parameters
----------
F : float
local Leaf Area Index.
leaf_width : float
efective leaf width size (m).
u_d_zm : float
wind speed at the height of momomentum source-sink.
Returns
-------
R_x : float
Aerodynamic resistance at the canopy boundary layer (s m-1).
References
----------
.. [McNaughton1995] McNaughton, K. G., & Van den Hurk, B. J. J. M. (1995).
A 'Lagrangian' revision of the resistors in the two-layer model for calculating
the energy budget of a plant canopy. Boundary-Layer Meteorology, 74(3), 261-288.
http://dx/doi.org/10.1007/BF00712121.
'''
C_dash = 130.0
C_dash_F = C_dash / F
# Eq. 30 in [McNaugthon1995]
R_x = C_dash_F * (leaf_width * u_star)**0.5 + 0.36 / u_star
return np.asarray(R_x)
def calc_R_x_Norman(LAI, leaf_width, u_d_zm):
''' Estimates aerodynamic resistance at the canopy boundary layer.
Estimates the aerodynamic resistance at the soil boundary layer based on the
original equations in TSEB [Norman1995]_.
Parameters
----------
F : float
local Leaf Area Index.
leaf_width : float
efective leaf width size (m).
u_d_zm : float
wind speed at the height of momomentum source-sink. .
Returns
-------
R_x : float
Aerodynamic resistance at the canopy boundary layer (s m-1).
References
----------
.. [Norman1995] J.M. Norman, W.P. Kustas, K.S. Humes, Source approach for estimating
soil and vegetation energy fluxes in observations of directional radiometric
surface temperature, Agricultural and Forest Meteorology, Volume 77, Issues 3-4,
Pages 263-293, http://dx.doi.org/10.1016/0168-1923(95)02265-Y.
'''
# Convert input scalars to numpy array
C_dash_F = KN_C_dash / LAI
R_x = C_dash_F * (leaf_width / u_d_zm)**0.5
return np.asarray(R_x)
def calc_stomatal_conductance_TSEB(
LE_C,
LE,
R_A,
R_x,
e_a,
T_A,
T_C,
F,
p=1013.0,
leaf_type=1,
f_g=1,
f_dry=1):
''' TSEB Stomatal conductace
Estimates the effective Stomatal conductace by inverting the
resistance-based canopy latent heat flux from a Two source perspective
Parameters
----------
LE_C : float
Canopy latent heat flux (W m-2).
LE : float
Surface (bulk) latent heat flux (W m-2).
R_A : float
Aerodynamic resistance to heat transport (s m-1).
R_x : float
Bulk aerodynamic resistance to heat transport at the canopy boundary layer (s m-1).
e_a : float
Water vapour pressure at the reference height (mb).
T_A : float
Air temperature at the reference height (K).
T_C : float
Canopy (leaf) temperature (K).
F : float
local Leaf Area Index.
p : float, optional
Atmospheric pressure (mb) use 1013.0 as default.
leaf_type : int, optional
type of leaf regarding stomata distribution.
1=HYPOSTOMATOUS stomata in the lower surface of the leaf (default).
2=AMPHISTOMATOUS, stomata in both surfaces of the leaf.
f_g : float, optional
Fraction of green leaves.
f_dry : float, optional
Fraction of dry (non-wet) leaves.
Returns
-------
G_s : float
effective leaf stomata conductance (m s-1).
References
----------
.. [Anderson2000] M.C. Anderson, J.M. Norman, T.P. Meyers, G.R. Diak, An analytical
model for estimating canopy transpiration and carbon assimilation fluxes based on
canopy light-use efficiency, Agricultural and Forest Meteorology, Volume 101,
Issue 4, 12 April 2000, Pages 265-289, ISSN 0168-1923,
http://dx.doi.org/10.1016/S0168-1923(99)00170-7.'''
# Convert input scalars to numpy arrays
LE_C, LE, R_A, R_x, e_a, T_A, T_C, F, p, leaf_type, f_g, f_dry = map(
np.asarray, (LE_C, LE, R_A, R_x, e_a, T_A, T_C, F, p, leaf_type, f_g, f_dry))
G_s = np.zeros(np.shape(LE_C))
# Invert the bulk SW to obtain eb (vapor pressure at the canopy interface)
rho = calc_rho(p, e_a, T_A)
Cp = calc_c_p(p, e_a)
Lambda = calc_lambda(T_A)
psicr = calc_psicr(p, Lambda)
e_ac = e_a + LE * R_A * psicr / (rho * Cp)
# Calculate the saturation vapour pressure in the leaf in mb
e_star = calc_vapor_pressure(T_C)
# Calculate the boundary layer canopy resisitance to water vapour (Anderson et al. 2000)
# Invert the SW LE_S equation to calculate the bulk stomatal resistance
R_c = np.asarray((rho * Cp * (e_star - e_ac) / (LE_C * psicr)) - R_x)
K_c = np.asarray(f_dry * f_g * leaf_type)
# Get the mean stomatal resistance (here LAI comes in as stomatal resistances
# are in parallel: 1/Rc=sum(1/R_st)=LAI/Rst
# ans the mean leaf conductance is the reciprocal of R_st (m s-1)
G_s[R_c > 0] = 1.0 / R_c[R_c > 0] * K_c[R_c > 0] * F[R_c > 0]
return np.asarray(G_s)
def calc_stomatal_conductance_OSEB(
LE,
R_A,
e_a,
T_A,
T_C,
F,
p=1013.0,
leaf_type=1,
f_g=1,
f_dry=1):
''' OSEB Stomatal conductace
Estimates the effective Stomatal conductace by inverting the
resistance-based canopy latent heat flux from a One source perspective
Parameters
----------
LE : float
Surface (bulk) latent heat flux (W m-2).
R_A : float
Aerodynamic resistance to heat transport (s m-1).
R_x : float
Bulk aerodynamic resistance to heat transport at the canopy boundary layer (s m-1).
e_a : float
Water vapour pressure at the reference height (mb).
T_A : float
Air temperature at the reference height (K).
T_C : float
Canopy (leaf) temperature (K).
F : float
local Leaf Area Index.
p : float, optional
Atmospheric pressure (mb) use 1013.0 as default.
leaf_type : int, optional
type of leaf regarding stomata distribution.
1=HYPOSTOMATOUS stomata in the lower surface of the leaf (default).
2=AMPHISTOMATOUS, stomata in both surfaces of the leaf.
f_g : float, optional
Fraction of green leaves.
f_dry : float, optional
Fraction of dry (non-wet) leaves.
Returns
-------
G_s : float
effective leaf stomata conductance (m s-1).
References
----------
.. [Berni1999] J.A.J. Berni, P.J. Zarco-Tejada, G. Sepulcre-Canto, E. Fereres,
F. Villalobos, Mapping canopy conductance and CWSI in olive orchards using high
resolution thermal remote sensing imagery, Remote Sensing of Environment,
Volume 113, Issue 11, 16 November 2009, Pages 2380-2388,
http://dx.doi.org/10.1016/j.rse.2009.06.018.
'''
# Convert input scalars to numpy array
LE, R_A, e_a, T_A, T_C, F, p, leaf_type, f_g, f_dry = map(
np.asarray, (LE, R_A, e_a, T_A, T_C, F, p, leaf_type, f_g, f_dry))
G_s = np.zeros(np.shape(LE))
# Invert the bulk SW to obtain eb (vapor pressure at the canopy interface)
rho = calc_rho(p, e_a, T_A)
Cp = calc_c_p(p, e_a)
Lambda = calc_lambda(T_A)
psicr = calc_psicr(p, Lambda)
# Calculate the saturation vapour pressure in the leaf in mb
e_star = calc_vapor_pressure(T_C)
# Calculate the boundary layer canopy resisitance to water vapour (Anderson et al. 2000)
# Invert the SW LE_S equation to calculate the bulk stomatal resistance
R_c = np.asarray((rho * Cp * (e_star - e_a) / (LE * psicr)) - R_A)
K_c = np.asarray(f_dry * f_g * leaf_type)
# Get the mean stomatal resistance (here LAI comes in as stomatal resistances
# are in parallel: 1/Rc=sum(1/R_st)=LAI/Rst
# ans the mean leaf conductance is the reciprocal of R_st (m s-1)
G_s[R_c > 0] = 1.0 / R_c[R_c > 0] * K_c[R_c > 0] * F[R_c > 0]
return np.asarray(G_s)
def calc_coef_m2mmol(T_C, p=101.325):
'''Calculates the conversion factor from stomatal conductance from m s-1
to mmol m-2 s-1.
Parameters
----------
T_C : float
Leaf temperature (K).
p : float, optional
Atmospheric pressure (kPa), default = 101.3 kPa.
Returns
-------
K_gs : float
Conversion factor from m s-1 to mmol m-2 s-1.
References
----------
[Kimball2015] Kimball, B. A., White, J. W., Ottman, M. J., Wall, G. W., Bernacchi, C. J.,
Morgan, J., & Smith, D. P. (2015). Predicting canopy temperatures and infrared heater energy
requirements for warming field plots. Agronomy Journal, 107(1), 129-141
http://dx.doi.org/10.2134/agronj14.0109.
'''
K_gs = p / (R_u * T_C) # to mol m-2 s-1
K_gs = K_gs * 1e3 # to mmol m-2 s-1
return np.asarray(K_gs)
def calc_z_0H(z_0M, kB=0):
'''Estimate the aerodynamic routhness length for heat trasport.
Parameters
----------
z_0M : float
aerodynamic roughness length for momentum transport (m).
kB : float
kB parameter, default = 0.
Returns
-------
z_0H : float
aerodynamic roughness length for momentum transport (m).
References
----------
.. [Norman1995] J.M. Norman, W.P. Kustas, K.S. Humes, Source approach for estimating
soil and vegetation energy fluxes in observations of directional radiometric
surface temperature, Agricultural and Forest Meteorology, Volume 77, Issues 3-4,
Pages 263-293, http://dx.doi.org/10.1016/0168-1923(95)02265-Y.
'''
import numpy as np
z_0H = z_0M / np.exp(kB)
return np.asarray(z_0H)
def calc_z_0M(h_C):
''' Aerodynamic roughness lenght.
Estimates the aerodynamic roughness length for momentum trasport
as a ratio of canopy height.
Parameters
----------
h_C : float
Canopy height (m).
Returns
-------
z_0M : float
aerodynamic roughness length for momentum transport (m).'''
z_0M = h_C * 0.125
return np.asarray(z_0M)
def raupach(lambda_):
'''Roughness and displacement height factors for discontinuous canopies
Estimated based on the frontal canopy leaf area, based on Raupack 1994 model,
after [Schaudt2000]_
Parameters
----------
lambda_ : float
roughness desnsity or frontal area index.
Returns
-------
z0M_factor : float
height ratio of roughness length for momentum transport
d_factor : float
height ratio of zero-plane displacement height
References
----------
.. [Schaudt2000] K.J Schaudt, R.E Dickinson, An approach to deriving roughness length
and zero-plane displacement height from satellite data, prototyped with BOREAS data,
Agricultural and Forest Meteorology, Volume 104, Issue 2, 8 August 2000, Pages 143-155,
http://dx.doi.org/10.1016/S0168-1923(00)00153-2.
'''
# Convert input scalar to numpy array
lambda_ = np.asarray(lambda_)
z0M_factor = np.zeros(lambda_.shape)
d_factor = np.asarray(np.zeros(lambda_.shape) + 0.65)
# Calculation of the Raupach (1994) formulae
# if lambda_ > 0.152:
i = lambda_ > 0.152
z0M_factor[i] = (0.0537 / (lambda_[i]**0.510)) * \
(1. - np.exp(-10.9 * lambda_[i]**0.874)) + 0.00368
# else:
z0M_factor[~i] = 5.86 * \
np.exp(-10.9 * lambda_[~i]**1.12) * lambda_[~i]**1.33 + 0.000860
# if lambda_ > 0:
i = lambda_ > 0
d_factor[i] = 1. - \
(1. - np.exp(-np.sqrt(15.0 * lambda_[i]))) / np.sqrt(15.0 * lambda_[i])
return np.asarray(z0M_factor), np.asarray(d_factor)
| bucricket/projectMAS | pydisalexi/resistances.py | Python | bsd-3-clause | 25,740 |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2013, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
r"""
This module allows you to easily push your data through a determined set of tasks
and stop/continue execution if necessary.
.. sidebar:: Holding Pen
Holding Pen (:py:mod:`.views.holdingpen`) is a web interface displaying
all the data objects that ran through a workflow.
Here you can interact with the workflows and data directly via a GUI.
=================
Create a workflow
=================
Create a workflow for your data using functions as individual tasks.
.. code-block:: python
from invenio_workflows.tasks.sample_tasks import (
add_data,
halt_if_higher_than_20,
)
class myworkflow(object):
\"\"\"Add 20 to integer and halt if higher.\"\"\"
workflow = [add_data(20),
halt_if_higher_than_20]
Save it as a new file in your module located under `workflows/` with the same
name as the class. For example at `yourmodule/workflows/myworkflow.py`.
The `workflow` attribute should be a list of functions
(or list of lists of tasks) as per the conditions of the
underlying `workflows-module`_.
.. sidebar:: Naming things
:subtitle: A valid workflow must:
(a) Have matching class name and file-name or (b) map the class
name using ``__all__ = ["myname"]`` notation.
The workflows registry will make sure pick up any files under `workflows/`.
=============
Create a task
=============
The functions in the workflow are called tasks. Each task must *at least*
take two arguments:
.. code-block:: python
def halt_if_higher_than_20(obj, eng):
\"\"\"Check if current data is more than than 20.\"\"\"
if obj.data > 20:
eng.halt("Data higher than 20.")
`obj` (:py:class:`.models.BibWorkflowObject`)
*is the current object being worked on*
`obj` adds extra functionality by wrapping around your data and
provide utilities to interface with the Holding Pen interface.
`eng` (:py:class:`.engine.BibWorkflowEngine`)
*is the current instance of the workflow engine*
`eng` give you access to manipulating the workflow execution itself and
to retrieve all the objects being processed.
Other parameters may be passed as `*args` or `**kwargs`.
Pass additional arguments
=========================
To allow arguments being passed to the task from the workflow definition,
simply wrap your task in a closure:
.. code-block:: python
def add_data(data_param):
\"\"\"Add data_param to the obj.data.\"\"\"
def _add_data(obj, eng):
data = data_param
obj.data += data
return _add_data
It can then be called from the workflow definition as `add_data(20)`,
returning the inner function.
==============
Run a workflow
==============
Finally, to run your workflow you there are mainly two use-cases:
* run only a **single data object**, or
* run **multiple data objects** through a workflow.
The former use the :py:class:`.models.BibWorkflowObject` model API, and
the latter use the :py:mod:`.api`.
Run a single data object
========================
.. note:: This method is recommended if you only have one data
item you want to run through the workflow.
.. code-block:: python
from invenio_workflows.models import BibWorkflowObject
myobj = BibWorkflowObject.create_object()
myobj.set_data(10)
eng = myobj.start_workflow("myworkflow")
Once the workflow completes it will return the engine instance that ran it.
To get the data, simply call the `get_data()` function of
:py:class:`.models.BibWorkflowObject`
.. code-block:: python
myobj.get_data() # outputs: 30
Run multiple data objects
=========================
.. note:: This method is recommended if you need to run several objects through a workflow.
To do this simply import the workflows API function `start()` and provide
a list of objects:
.. code-block:: python
from invenio_workflows.api import start
eng = start(workflow_name="myworkflow", data=[5, 10])
*Here we are passing simple data objects in form integers.*
As usual, the `start()` function returns the `eng` instance that ran the
workflow. You can query this object to retrieve the data you sent in:
.. code-block:: python
len(eng.objects) # outputs: 4
Why 4 objects when we only shipped 2 objects? Well, we take initial snapshots
(copy of BibWorkflowObject) of the original data. In the example above,
we get 4 objects back as each object passed have a snapshot created.
.. sidebar:: Object versions and YOU
The data you pass to the workflows API is wrapped in a BibWorkflowObject.
This object have a `version` property which tells you the state of object.
For example, if the object is currently *halted* in the middle of a
workflow, or if it is an *initial* object.
*initial* objects are basically snapshots of the BibWorkflowObject just
before the workflow started. These are created to allow for objects to
be easily restarted in the workflow with the initial data intact.
You can also query the engine instance to only give you the objects which are in
a certain state.
.. code-block:: python
len(eng.initial_objects) # outputs: 2
len(eng.halted_objects) # outputs: 2
len(eng.completed_objects) # outputs: 0
len(eng.running_objects) # outputs: 0
len(eng.waiting_objects) # outputs: 0
len(eng.error_objects) # outputs: 0
(`eng.completed_objects` is empty because both objects passed is halted.)
This output is actually representative of snapshots of the objects, not the
objects themselves. The _default_ snapshotting behaviour is also evident here:
There is one snapshot taken in the beginning of the execution and one
when the object reaches one of the other states. A snapshot can only be in a
single state.
No object will ever be in the `running` state under usual operation.
Moreover, to retrieve the data from the first object, you can use
`get_data()` as with single objects:
.. code-block:: python
res = halted_objects[0].get_data()
print res
# outputs: 25
Run workflows asynchronously
============================
So far we have in been running our workflows in the current process. However,
for long running processes we do not want to wait for the workflow to finish
before continuing the processing.
Luckily, there is API to do this:
`BibWorkflowObject.start_workflow(delayed=True)`
as when running single objects, you can pass the delayed parameter to
enable asynchronous execution.
`api.start_delayed()`
The API provide this function `start_delayed()` to run a workflow
asynchronously.
To use this functionality you need to make sure you are running a task queue
such as `Celery`_ that will run the workflow in a separate process.
.. note:: The delayed API returns a :py:class:`.worker_result.AsynchronousResultWrapper`
instead of a :py:class:`.engine.BibWorkflowEngine` instance.
In order to communicate with such a task queue we make use of *worker plugins*.
Workers
=======
A worker is a plugin (or bridge) from the Invenio workflows module to some
distributed task queue. By default, we have provided workers for `Celery`_ and
`RQ`_.
These plugins are used by the :py:mod:`.worker_engine` to launch workflows
asynchronously in a task queue.
*We recommend to use Celery as the default asynchronous worker.*
Working with extra data
=======================
If you need to add some extra data to the :py:class:`.models.BibWorkflowObject` that is
not suitable to add to the ``obj.data`` attribute, you can make use if the
``obj.extra_data`` attribute.
The extra_data attribute is basically a normal dictionary that you can fill. However
it contains some additional information by default.
.. code-block:: python
{
"_tasks_results": {},
"owner": {},
"_task_counter": {},
"_error_msg": None,
"_last_task_name": "",
"latest_object": -1,
"_action": None,
"redis_search": {},
"source": "",
"_task_history: [],
}
This information is used by the :py:class:`.models.BibWorkflowObject` to store some additional
data related to the workflow execution and additional data added by tasks.
It also stores information that is integrated with Holding Pen - the graphical interface
for all the data objects.
===========
Holding Pen
===========
The graphical interface over all the data objects that have been executed in a workflow.
The name *Holding Pen* originates from a library use case of having some incoming bibliographical
meta-data records on "hold" - awaiting some human curator to analyze the record and decide if
the record should be inserted into the repository.
One common usage of this interface is acceptance of user submissions.
We will take this concept of record approval further throughout this guide as we explain the
most common use cases for the Holding Pen.
.. note:: The Holding Pen is accessible under `/admin/holdingpen`
Data object display in Holding Pen
==================================
To properly represent a data objects in the Holding Pen, the workflow definition
explained above can be further enriched by adding some static functions to the class.
* `get_title`: return the "title" of the data object shown in the table display.
E.g. title of meta-data record
* `get_description`: return a short desciption of the data object shown in the table display.
E.g. identifiers and categories
* `formatter`: used in the object detailed display to render the data in the object for the user.
E.g. the detailed record format of a meta-data record.
Actions in Holding Pen
======================
An action in Holding Pen is a generic term describing an action that can be taken
on a data object.
To use the example of record approval, we basically mean adding GUI buttons to
accept or reject a data object. The action taken (the button pressed) on the data object
is then connected to a custom action back-end that may then decide to
e.g. continue the workflow or simply delete the object.
.. sidebar:: Approval action
In our example of the *approval action* we will make use of front-end assets
(JavaScript/HTML/templates) to display and listen to events on buttons and
a Python back-end plugin to react on the chosen action.
Adding an action
----------------
By default we have added an approval action which can be used to allow a data object
to continue the workflow or be deleted.
`workflows/actions/approval.py`
Action back-end located in ``workflows/actions/approval.py`` that implements
``render()``, ``render_mini()`` and ``resolve()``. ``resolve()`` handles the
continuation or deletion of the data object using the workflows API.
`templates/workflows/actions/approval_(main|mini|side).html`
jinja templates used to render the action UI. There are different templates
in play here depending on position.
* `mini`: displayed in the main table (for a quick action).
* `side`: displayed on the right side of the object details page.
* `main`: displayed in the middle of the object details page.
`static/workflows/actions/approval.js`
JavaScript file listening to events in the approval UI to call the backend
via ajax calls.
To enable the JavaScript to be loaded via requireJS, you need to override the
actions "init" JavaScript file `static/workflows/actions/init.js` on your overlay
and add any initialization code for the action (e.g. attaching events).
Using an action
---------------
There are two ways of activating an action:
* **When halting a workflow:** :py:meth:`.engine.BibWorkflowEngine.halt` has
a parameter that allows you to set an action that needs to be taken in
the Holding Pen - along with a message to be displayed.
* **Directly using the :py:class:`.models.BibWorkflowObject` API**. :py:meth:`.models.BibWorkflowObject.set_action`
:py:meth:`.models.BibWorkflowObject.remove_action` :py:meth:`.models.BibWorkflowObject.get_action`.
Task results in Holding Pen
===========================
If you want to add some special task results to be displayed on the details page
of the data object in Holding Pen, you can use the task results API available
in :py:class:`.models.BibWorkflowObject`.
The API provides functions to manipulate the task results:
:py:meth:`.models.BibWorkflowObject.add_task_result`
Adds a task result to the end of a list associated with a label (name).
:py:meth:`.models.BibWorkflowObject.update_task_results`
Update task result for a specific label (name).
:py:meth:`.models.BibWorkflowObject.get_tasks_results`
Return all tasks results as a dictionary as ``{ name: [result, ..] }``
The *task result* is a dictionary given as context to the template
when rendered. The result given here is added to a list of results
for this name.
.. code-block:: python
obj = BibWorkflowObject() # or BibWorkflowObject.query.get(id)
obj.add_task_result("foo", my_result, "path/to/template")
See sample templates under `templates/workflows/results/*.html`.
.. _workflows-module: https://pypi.python.org/pypi/workflow/1.01
.. _Celery: http://www.celeryproject.org/
.. _RQ: http://python-rq.org/
"""
| jmartinm/invenio-workflows | invenio_workflows/__init__.py | Python | gpl-2.0 | 14,050 |
"""
Unit test script for mongodb 2.0 driver.
This script is designed to be run from engage.tests.test_drivers.
"""
# Id for the resource to be tested.
# An instance with this id must be present
# in the install script.
resource_id = "mongodb"
# The install script should be a json string
# containing a list which includes the
# resource instance for the driver being tested.
# It can use the following substitution variables:
# deployment_home, hostname, username
_install_script = """
[
{ "id": "mongodb",
"key": {"name": "mongodb", "version": "2.0"},
"config_port": {
"home": "${deployment_home}/mongodb-2.0",
"log_file": "${deployment_home}/log/mongodb.log",
"port": 27017
},
"input_ports": {
"host": {
"cpu_arch": "x86_64",
"genforma_home": "${deployment_home}",
"hostname": "${hostname}",
"log_directory": "${deployment_home}/log",
"os_type": "linux",
"os_user_name": "${username}",
"private_ip": null,
"sudo_password": "GenForma/${username}/sudo_password"
},
"python": {"home":"${deployment_home}/apps/python"}
},
"output_ports": {
"mongodb": {
"home": "${deployment_home}/mongodb-2.0",
"hostname": "localhost",
"port": 27017
}
},
"inside": {
"id": "master-host",
"key": {"name": "ubuntu-linux", "version": "10.04"},
"port_mapping": {
"host": "host"
}
}
}
]
"""
def get_install_script():
return _install_script
# If the driver needs access to the password database, either for the sudo
# password or for passwords it maintains in the database, define this function.
# It should return a dict containing an required password entries, except for the
# sudo password which is added by the test driver. If you don't need the password
# database just comment out this function or have it return None.
def get_password_data():
return {}
| quaddra/engage | python_pkg/engage/drivers/standard/mongodb__2_4/drivertest.py | Python | apache-2.0 | 1,962 |
from django.core.management.base import NoArgsCommand
from django.db import transaction
import os.path
import askbot
from askbot.search.postgresql import setup_full_text_search
class Command(NoArgsCommand):
@transaction.commit_on_success
def handle_noargs(self, **options):
script_path = os.path.join(
askbot.get_install_directory(),
'search',
'postgresql',
'thread_and_post_models_01162012.plsql'
)
setup_full_text_search(script_path)
| afdelgado/askbot | askbot/management/commands/init_postgresql_full_text_search.py | Python | gpl-3.0 | 597 |
import six
try:
from collections.abc import Iterable
except ImportError: # FIXME: Remove if Python2 support is removed
from collections import Iterable
def make_tuple(value):
""" Converts the value into a tuple if the value is an iterable with the following exceptions:
* a `None` value will return `None`
* a string value will return a tuple with the string as the unique member
"""
if value is None:
return None
if isinstance(value, six.string_types):
return value,
if isinstance(value, Iterable):
return tuple(value)
else:
return value,
| conan-io/conan | conans/util/misc.py | Python | mit | 626 |
import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCustomName('a Stormtrooper')
mobileTemplate.setLevel(1)
mobileTemplate.setDifficulty(Difficulty.NORMAL)
mobileTemplate.setSocialGroup("township")
mobileTemplate.setOptionsBitmask(Options.INVULNERABLE)
templates = Vector()
templates.add('object/mobile/shared_stormtrooper.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/melee/unarmed/shared_unarmed_default.iff', WeaponType.UNARMED, 1.0, 6, 'kinetic')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
mobileTemplate.setDefaultAttack('creatureMeleeAttack')
mobileTemplate.setAttacks(attacks)
core.spawnService.addMobileTemplate('staticstorm', mobileTemplate)
return | agry/NGECore2 | scripts/mobiles/generic/static/tatooine/staticstorm.py | Python | lgpl-3.0 | 1,106 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import yandex_kassa.utils
def create_items(apps, schema_editor):
Item = apps.get_model('app', 'Item')
Item.objects.bulk_create([
Item(name='HTC Desire', price=5),
Item(name='iPhone 4', price=10),
Item(name='iPhone 5', price=15),
Item(name='iPhone 6', price=20),
])
class Migration(migrations.Migration):
dependencies = [
('yandex_kassa', '__first__'),
]
operations = [
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=32, verbose_name=b'\xd0\x9d\xd0\xb0\xd0\xb8\xd0\xbc\xd0\xb5\xd0\xbd\xd0\xbe\xd0\xb2\xd0\xb0\xd0\xbd\xd0\xb8\xd0\xb5')),
('price', models.PositiveIntegerField(verbose_name=b'\xd0\xa1\xd1\x82\xd0\xbe\xd0\xb8\xd0\xbc\xd0\xbe\xd1\x81\xd1\x82\xd1\x8c')),
],
options={
'verbose_name': '\u0422\u043e\u0432\u0430\u0440',
'verbose_name_plural': '\u0422\u043e\u0432\u0430\u0440\u044b',
},
),
migrations.CreateModel(
name='Order',
fields=[
('uuid', models.CharField(default=yandex_kassa.utils.get_uuid, max_length=64, serialize=False, verbose_name=b'ID \xd0\xb7\xd0\xb0\xd0\xba\xd0\xb0\xd0\xb7\xd0\xb0', primary_key=True)),
('count', models.PositiveIntegerField(default=1, verbose_name=b'\xd0\x9a\xd0\xbe\xd0\xbb-\xd0\xb2\xd0\xbe')),
('amount', models.PositiveIntegerField(verbose_name=b'\xd0\xa1\xd1\x83\xd0\xbc\xd0\xbc\xd0\xb0 \xd0\xb7\xd0\xb0\xd0\xba\xd0\xb0\xd0\xb7\xd0\xb0')),
('item', models.ForeignKey(verbose_name=b'\xd0\xa2\xd0\xbe\xd0\xb2\xd0\xb0\xd1\x80', to='app.Item')),
('payment', models.ForeignKey(verbose_name=b'\xd0\x9f\xd0\xbb\xd0\xb0\xd1\x82\xd0\xb5\xd0\xb6', to='yandex_kassa.Payment')),
],
options={
'verbose_name': '\u0417\u0430\u043a\u0430\u0437',
'verbose_name_plural': '\u0417\u0430\u043a\u0430\u0437\u044b',
},
),
migrations.RunPython(create_items, reverse_code=migrations.RunPython.noop),
]
| VladimirFilonov/django-yandex-kassa | demo/app/migrations/0001_initial.py | Python | mit | 2,393 |
print "Find output of below algebraic expression"
print "a x b-c x d"
print "5 x 10 - 15 x 3"
print "5 * 10 = 50"
print "15 * 3 = 45"
print "50 - 45 = 5"
print "5 x 10 - 15 x 3 = ", 5 * 10 - 15 * 3
| mrniranjan/python-scripts | reboot/math8.py | Python | gpl-2.0 | 199 |
#!/usr/bin/python
import os
os.system('python runTrainer.py --agent=KerasDDPGAgent --env=Detached2DCartPolev0Env --train-for=0 --test-for=10000000 --delay=0.005 --gui --show-test --load-file=checkpoints/KerasDDPG-D2DCartPolev0-chkpt-1.h5')
| benelot/bullet-gym | bullet-gym-primitive/showKerasDDPGDetached2DCartPoleExample.py | Python | mit | 242 |
# -*- coding: utf-8 -*-
"""
Flask extensions instances, for access outside app.factory
"""
from flask_security import SQLAlchemyUserDatastore
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import MetaData, event
from sqlalchemy.engine import Engine
from sqlite3 import Connection as SQLite3Connection
from lib.oidc import OIDC
@event.listens_for(Engine, "connect")
def set_sqlite_pragma(dbapi_connection, connection_record):
if isinstance(dbapi_connection, SQLite3Connection):
cursor = dbapi_connection.cursor()
cursor.execute('PRAGMA foreign_keys=ON')
cursor.close()
# XXX fixes SQLite unnamed constraints causing problems with migrations
naming_convention = {
'ix': 'ix_%(column_0_label)s',
'uq': 'uq_%(table_name)s_%(column_0_name)s',
'ck': 'ck_%(table_name)s_%(column_0_name)s',
'fk': 'fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s',
'pk': 'pk_%(table_name)s'}
db = SQLAlchemy(metadata=MetaData(naming_convention=naming_convention))
oidc = OIDC()
user_datastore = SQLAlchemyUserDatastore(db, None, None)
| crossgovernmentservices/csd-notes | app/extensions.py | Python | mit | 1,087 |
from django.db import migrations
from django.contrib.postgres.operations import UnaccentExtension
# Adiciona a extensão UnaccentExtension para tratar pesquisas na API
# em strings que tenham acento
class Migration(migrations.Migration):
dependencies = [
]
operations = [
UnaccentExtension()
] | culturagovbr/sistema-nacional-cultura | apiv2/migrations/0001_initial.py | Python | agpl-3.0 | 322 |
import datetime
import os
from flask import Flask, g, request, render_template, redirect, url_for, abort
from sqlalchemy import and_
from sqlalchemy.orm.exc import NoResultFound
from webhelpers import paginate
from lib.messages import parse_line
from lib.model import Log, LogPage
from lib.requests import connect_mysql, disconnect_mysql
from werkzeug.contrib.fixers import ProxyFix
app = Flask(__name__)
# Export config
app.config['PROPAGATE_EXCEPTIONS'] = True
app.config['EXPORT_URL'] = os.environ.get("EXPORT_URL", "http://unsupportedlogs.msparp.com")
app.wsgi_app = ProxyFix(app.wsgi_app, 2)
# Pre and post request stuff
app.before_request(connect_mysql)
app.teardown_request(disconnect_mysql)
# Chat
@app.route('/chat')
@app.route('/chat/<chat>')
def chat(chat=None):
if chat is None:
return redirect(url_for("configure"))
return redirect(url_for("view_log", chat=chat))
@app.route('/logs/group/<chat>')
def old_view_log(chat):
return redirect(url_for('view_log', chat=chat))
@app.route('/logs/<log_id>')
def view_log_by_id(log_id=None):
try:
log_id = int(log_id)
except ValueError:
abort(400)
try:
log = g.mysql.query(Log).filter(Log.id==log_id).one()
except NoResultFound:
abort(404)
if log.url is not None:
return redirect(url_for('view_log', chat=log.url))
abort(404)
@app.route('/chat/<chat>/log')
def view_log(chat=None):
try:
log = g.mysql.query(Log).filter(Log.url==chat).one()
except NoResultFound:
abort(404)
current_page = request.args.get('page') or log.page_count
mode = request.args.get('mode') or 'normal'
try:
log_page = g.mysql.query(LogPage).filter(and_(LogPage.log_id==log.id, LogPage.number==current_page)).one()
except NoResultFound:
abort(404)
url_generator = paginate.PageURL(url_for('view_log', chat=chat), {'page': current_page})
# It's only one row per page and we want to fetch them via both log id and
# page number rather than slicing, so we'll just give it an empty list and
# override the count.
paginator = paginate.Page([], page=current_page, items_per_page=1, item_count=log.page_count, url=url_generator)
# Pages end with a line break, so the last line is blank.
lines = log_page.content.split('\n')[0:-1]
lines = filter(lambda x: x is not None, map(lambda _: parse_line(_, 0), lines))
for line in lines:
line['datetime'] = datetime.datetime.fromtimestamp(line['timestamp'])
return render_template('log.html',
chat=chat,
lines=lines,
current_page=current_page,
mode=mode,
paginator=paginator,
)
@app.route('/health', methods=['GET'])
def doHealthCheck():
# should probably actually DO a health check here
return 'ok'
# Redirects
@app.route("/faq")
def faq():
return render_template("pages/msparpfaq.html")
@app.route("/bbcode")
def bbcode():
return render_template("pages/bbcode.html")
@app.route("/userguide")
def userguide():
return render_template("pages/userguide.html")
# Home
@app.route("/")
def configure():
return render_template("frontpage.html")
# Exporting
@app.route('/chat/<chat>/export')
def export_log(chat=None):
if g.redis.exists('chat.' + chat + '.exported'):
return render_template('export_complete.html', chat=chat)
# Add to queue if chat log exists.
if g.mysql.query(Log).filter(Log.url == chat).scalar():
g.redis.sadd('export-queue', chat)
return render_template('export_progress.html', chat=chat)
if __name__ == "__main__":
app.run(port=8000, debug=True)
| MSPARP/MSPARP | main.py | Python | mit | 3,649 |
namespace SeriesNamer
{
partial class UpdateTool
{
/// <summary>
/// Required designer variable.
/// </summary>
private System.ComponentModel.IContainer components = null;
/// <summary>
/// Clean up any resources being used.
/// </summary>
/// <param name="disposing">true if managed resources should be disposed; otherwise, false.</param>
protected override void Dispose(bool disposing)
{
if (disposing && (components != null))
{
components.Dispose();
}
base.Dispose(disposing);
}
#region Windows Form Designer generated code
/// <summary>
/// Required method for Designer support - do not modify
/// the contents of this method with the code editor.
/// </summary>
private void InitializeComponent()
{
this.dOutput = new System.Windows.Forms.TextBox();
this.dAbort = new System.Windows.Forms.Button();
this.dProgress = new System.Windows.Forms.ProgressBar();
this.dWork = new System.ComponentModel.BackgroundWorker();
this.SuspendLayout();
//
// dOutput
//
this.dOutput.Anchor = ((System.Windows.Forms.AnchorStyles)((((System.Windows.Forms.AnchorStyles.Top | System.Windows.Forms.AnchorStyles.Bottom)
| System.Windows.Forms.AnchorStyles.Left)
| System.Windows.Forms.AnchorStyles.Right)));
this.dOutput.Location = new System.Drawing.Point(12, 41);
this.dOutput.Multiline = true;
this.dOutput.Name = "dOutput";
this.dOutput.ReadOnly = true;
this.dOutput.ScrollBars = System.Windows.Forms.ScrollBars.Both;
this.dOutput.Size = new System.Drawing.Size(268, 156);
this.dOutput.TabIndex = 0;
this.dOutput.WordWrap = false;
//
// dAbort
//
this.dAbort.Anchor = ((System.Windows.Forms.AnchorStyles)((System.Windows.Forms.AnchorStyles.Bottom | System.Windows.Forms.AnchorStyles.Right)));
this.dAbort.Location = new System.Drawing.Point(209, 203);
this.dAbort.Name = "dAbort";
this.dAbort.Size = new System.Drawing.Size(75, 23);
this.dAbort.TabIndex = 1;
this.dAbort.Text = "Abort";
this.dAbort.UseVisualStyleBackColor = true;
this.dAbort.Click += new System.EventHandler(this.dAbort_Click);
//
// dProgress
//
this.dProgress.Anchor = ((System.Windows.Forms.AnchorStyles)(((System.Windows.Forms.AnchorStyles.Top | System.Windows.Forms.AnchorStyles.Left)
| System.Windows.Forms.AnchorStyles.Right)));
this.dProgress.Location = new System.Drawing.Point(12, 12);
this.dProgress.Name = "dProgress";
this.dProgress.Size = new System.Drawing.Size(268, 23);
this.dProgress.TabIndex = 2;
//
// dWork
//
this.dWork.WorkerReportsProgress = true;
this.dWork.WorkerSupportsCancellation = true;
this.dWork.DoWork += new System.ComponentModel.DoWorkEventHandler(this.dWork_DoWork);
this.dWork.RunWorkerCompleted += new System.ComponentModel.RunWorkerCompletedEventHandler(this.dWork_RunWorkerCompleted);
this.dWork.ProgressChanged += new System.ComponentModel.ProgressChangedEventHandler(this.dWork_ProgressChanged);
//
// UpdateTool
//
this.AutoScaleDimensions = new System.Drawing.SizeF(6F, 13F);
this.AutoScaleMode = System.Windows.Forms.AutoScaleMode.Font;
this.ClientSize = new System.Drawing.Size(292, 237);
this.Controls.Add(this.dProgress);
this.Controls.Add(this.dAbort);
this.Controls.Add(this.dOutput);
this.Name = "UpdateTool";
this.Text = "UpdateTool";
this.ResumeLayout(false);
this.PerformLayout();
}
#endregion
private System.Windows.Forms.TextBox dOutput;
private System.Windows.Forms.Button dAbort;
private System.Windows.Forms.ProgressBar dProgress;
private System.ComponentModel.BackgroundWorker dWork;
}
} | madeso/prettygood | dotnet/SeriesNamer/UpdateTool.Designer.py | Python | mit | 4,446 |
import sys
import json
def create_new_json():
data = {
'wins': 0,
'loses': 0,
'winrate': 0,
'goals': 0,
'goalsOnYou': 0,
'wins1v1': 0,
'loses1v1': 0,
'winrate1v1': 0,
'wins2v2': 0,
'loses2v2': 0,
'winrate': 0,
'wins3v3': 0,
'loses3v3': 0,
'winrate3v3': 0
}
encoded_json = (json.dumps(data))
with open('data.json', 'w') as data_file:
data_file.write(encoded_json)
| Killmat/RLStatTracker | main.py | Python | lgpl-3.0 | 557 |
import sys
import re
import os
def processOBJ(path, npath):
f = open(path, 'r')
fo = open(npath, 'w')
for line in f:
vertex = "v -?\d\.\d+ -?\d\.\d+ -?\d\.\d+"
face = "f \d+ \d+ \d+$"
tri = "f \d+ \d+ \d+ \d+$"
line = re.sub(r'(?P<num>\d+)\/\d+', r'\1', line)
if re.match(vertex,line):
fo.write(line)
elif re.match(tri,line):
fo.write(line)
elif re.match(face,line):
fo.write(line)
if __name__ == "__main__":
if len(sys.argv) != 3:
sys.exit(1)
print "NOT ENOUGH ARGUMENTS"
processOBJ(sys.argv[1], sys.argv[2])
# for i in range(1,19):
# processOBJ("tmpobj_stone%d.obj" % i, "asteroid%d.obj" % i) | kyleconroy/starfighter | processOBJ.py | Python | mit | 764 |
import numpy
from chainer import function_node
from chainer.utils import type_check
class Transpose(function_node.FunctionNode):
"""Permute the dimensions of an array."""
def __init__(self, axes=None):
self.axes = axes
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1,)
@property
def label(self):
return 'Transpose'
def forward(self, inputs):
x = inputs[0]
y = x.transpose(self.axes)
return y,
def backward(self, indexes, grad_outputs):
inv_axes = self.axes
if inv_axes:
axes_len = len(inv_axes)
inv_axes = tuple(numpy.argsort([ax % axes_len for ax in inv_axes]))
return Transpose(inv_axes).apply(grad_outputs)
def transpose(x, axes=None):
"""Permute the dimensions of an input variable without copy.
Args:
x (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`): Input variable to be transposed.
A :math:`(s_1, s_2, ..., s_N)` -shaped float array.
axes (tuple of ints): By default, reverse the dimensions,
otherwise permute the axes according to the values given.
Returns:
~chainer.Variable: Variable whose axes are permuted.
.. admonition:: Example
>>> x = np.array([[[0, 1, 2], [3, 4, 5]]], np.float32)
>>> x.shape
(1, 2, 3)
>>> y = F.transpose(x) # reverse the dimensions
>>> y.shape
(3, 2, 1)
>>> y.data
array([[[0.],
[3.]],
<BLANKLINE>
[[1.],
[4.]],
<BLANKLINE>
[[2.],
[5.]]], dtype=float32)
>>> y = F.transpose(x, axes=(1, 0, 2)) # swap 1st and 2nd axis
>>> y.shape
(2, 1, 3)
>>> y.data
array([[[0., 1., 2.]],
<BLANKLINE>
[[3., 4., 5.]]], dtype=float32)
"""
return Transpose(axes).apply((x,))[0]
| rezoo/chainer | chainer/functions/array/transpose.py | Python | mit | 2,002 |
'''
Python program to do number representation conversion:
1. from decimal integer to hexadecimal string
2. from hexadecimal string to decimal integer
'''
print("""Lab 03
From decimal to hexadecimal
---------------------------""")
# Get the decimal value
# Initialize an empty string
# Make one temporary variable for decimal value
num = dec = int(input("Give a positive integer in decimal representation: "))
str_hex = ""
# While the temporary variable num is non-zero
# Calculate the remainder of decimal value divided by 16
# If it yields more than 9, we have to convert it into char using ascii table
# code for the char [a, b, c, d, e, f] which starts from 97 to 102
# else use plain str() and concatenate to str_hex
# Divide num by 16 using integer quotient for the next iteration
while num:
digit = num % 16
if digit > 9:
str_hex = chr(digit + 87) + str_hex
else:
str_hex = str(digit) + str_hex
num = num // 16
# str_hex will be a valid hex value by the end of iterations
print("The hexadecimal representation of {} is 0x{}".format(dec, str_hex))
print("""
From decimal to hexadecimal
---------------------------""")
# Get the hexadecimal value
# Initialize new decimal and power variable
str_hex = input("Give a positive integer in hexadecimal representation: ")
str_hex = str_hex.lower().lstrip('0x')
dec = pwr = 0
# For each digit from last to first
# If it's an alphabet, use it's ascii table code subtracted by 87
# so it directly maps to it's hex value
# else turn it into plain int
# After that multiply it by power according to its position
# Add to decimal value
for char in str_hex[::-1]:
if char >= 'a':
dec += (ord(char) - 87) * 16 ** pwr
else:
dec += int(char) * 16 ** pwr
pwr += 1
# The decimal value will be complete by the end of iterations
print("The decimal representation of 0x{} is {}".format(str_hex, dec))
print("Thanks for using this program.")
input("Press Enter to continue ...") # Hold the screen display
| giovanism/TarungLab | lab/03/lab03_f.py | Python | mit | 2,013 |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for generating program synthesis and evaluation data."""
import contextlib
import sys
import StringIO
import random
import os
class ListType(object):
def __init__(self, arg):
self.arg = arg
def __str__(self):
return "[" + str(self.arg) + "]"
def __eq__(self, other):
if not isinstance(other, ListType):
return False
return self.arg == other.arg
def __hash__(self):
return hash(self.arg)
class VarType(object):
def __init__(self, arg):
self.arg = arg
def __str__(self):
return str(self.arg)
def __eq__(self, other):
if not isinstance(other, VarType):
return False
return self.arg == other.arg
def __hash__(self):
return hash(self.arg)
class FunctionType(object):
def __init__(self, args):
self.args = args
def __str__(self):
return str(self.args[0]) + " -> " + str(self.args[1])
def __eq__(self, other):
if not isinstance(other, FunctionType):
return False
return self.args == other.args
def __hash__(self):
return hash(tuple(self.args))
class Function(object):
def __init__(self, name, arg_types, output_type, fn_arg_types = None):
self.name = name
self.arg_types = arg_types
self.fn_arg_types = fn_arg_types or []
self.output_type = output_type
Null = 100
## Functions
f_head = Function("c_head", [ListType("Int")], "Int")
def c_head(xs): return xs[0] if len(xs) > 0 else Null
f_last = Function("c_last", [ListType("Int")], "Int")
def c_last(xs): return xs[-1] if len(xs) > 0 else Null
f_take = Function("c_take", ["Int", ListType("Int")], ListType("Int"))
def c_take(n, xs): return xs[:n]
f_drop = Function("c_drop", ["Int", ListType("Int")], ListType("Int"))
def c_drop(n, xs): return xs[n:]
f_access = Function("c_access", ["Int", ListType("Int")], "Int")
def c_access(n, xs): return xs[n] if n >= 0 and len(xs) > n else Null
f_max = Function("c_max", [ListType("Int")], "Int")
def c_max(xs): return max(xs) if len(xs) > 0 else Null
f_min = Function("c_min", [ListType("Int")], "Int")
def c_min(xs): return min(xs) if len(xs) > 0 else Null
f_reverse = Function("c_reverse", [ListType("Int")], ListType("Int"))
def c_reverse(xs): return list(reversed(xs))
f_sort = Function("sorted", [ListType("Int")], ListType("Int"))
# def c_sort(xs): return sorted(xs)
f_sum = Function("sum", [ListType("Int")], "Int")
# def c_sum(xs): return sum(xs)
## Lambdas
# Int -> Int
def plus_one(x): return x + 1
def minus_one(x): return x - 1
def times_two(x): return x * 2
def neg(x): return x * (-1)
def div_two(x): return int(x/2)
def sq(x): return x**2
def times_three(x): return x * 3
def div_three(x): return int(x/3)
def times_four(x): return x * 4
def div_four(x): return int(x/4)
# Int -> Bool
def pos(x): return x > 0
def neg(x): return x < 0
def even(x): return x%2 == 0
def odd(x): return x%2 == 1
# Int -> Int -> Int
def add(x, y): return x + y
def sub(x, y): return x - y
def mul(x, y): return x * y
# HOFs
f_map = Function("map", [ListType("Int")],
ListType("Int"),
[FunctionType(["Int", "Int"])])
f_filter = Function("filter", [ListType("Int")],
ListType("Int"),
[FunctionType(["Int", "Bool"])])
f_count = Function("c_count", [ListType("Int")],
"Int",
[FunctionType(["Int", "Bool"])])
def c_count(f, xs): return len([x for x in xs if f(x)])
f_zipwith = Function("c_zipwith", [ListType("Int"), ListType("Int")],
ListType("Int"),
[FunctionType(["Int", "Int", "Int"])]) #FIX
def c_zipwith(f, xs, ys): return [f(x, y) for (x, y) in zip(xs, ys)]
f_scan = Function("c_scan", [ListType("Int")],
ListType("Int"),
[FunctionType(["Int", "Int", "Int"])])
def c_scan(f, xs):
out = xs
for i in range(1, len(xs)):
out[i] = f(xs[i], xs[i -1])
return out
@contextlib.contextmanager
def stdoutIO(stdout=None):
old = sys.stdout
if stdout is None:
stdout = StringIO.StringIO()
sys.stdout = stdout
yield stdout
sys.stdout = old
def evaluate(program_str, input_names_to_vals, default="ERROR"):
exec_str = []
for name, val in input_names_to_vals.iteritems():
exec_str += name + " = " + str(val) + "; "
exec_str += program_str
if type(exec_str) is list:
exec_str = "".join(exec_str)
with stdoutIO() as s:
# pylint: disable=bare-except
try:
exec exec_str + " print(out)"
return s.getvalue()[:-1]
except:
return default
# pylint: enable=bare-except
class Statement(object):
"""Statement class."""
def __init__(self, fn, output_var, arg_vars, fn_args=None):
self.fn = fn
self.output_var = output_var
self.arg_vars = arg_vars
self.fn_args = fn_args or []
def __str__(self):
return "%s = %s(%s%s%s)"%(self.output_var,
self.fn.name,
", ".join(self.fn_args),
", " if self.fn_args else "",
", ".join(self.arg_vars))
def substitute(self, env):
self.output_var = env.get(self.output_var, self.output_var)
self.arg_vars = [env.get(v, v) for v in self.arg_vars]
class ProgramGrower(object):
"""Grow programs."""
def __init__(self, functions, types_to_lambdas):
self.functions = functions
self.types_to_lambdas = types_to_lambdas
def grow_body(self, new_var_name, dependencies, types_to_vars):
"""Grow the program body."""
choices = []
for f in self.functions:
if all([a in types_to_vars.keys() for a in f.arg_types]):
choices.append(f)
f = random.choice(choices)
args = []
for t in f.arg_types:
possible_vars = random.choice(types_to_vars[t])
var = random.choice(possible_vars)
args.append(var)
dependencies.setdefault(new_var_name, []).extend(
[var] + (dependencies[var]))
fn_args = [random.choice(self.types_to_lambdas[t]) for t in f.fn_arg_types]
types_to_vars.setdefault(f.output_type, []).append(new_var_name)
return Statement(f, new_var_name, args, fn_args)
def grow(self, program_len, input_types):
"""Grow the program."""
var_names = list(reversed(map(chr, range(97, 123))))
dependencies = dict()
types_to_vars = dict()
input_names = []
for t in input_types:
var = var_names.pop()
dependencies[var] = []
types_to_vars.setdefault(t, []).append(var)
input_names.append(var)
statements = []
for _ in range(program_len - 1):
var = var_names.pop()
statements.append(self.grow_body(var, dependencies, types_to_vars))
statements.append(self.grow_body("out", dependencies, types_to_vars))
new_var_names = [c for c in map(chr, range(97, 123))
if c not in input_names]
new_var_names.reverse()
keep_statements = []
env = dict()
for s in statements:
if s.output_var in dependencies["out"]:
keep_statements.append(s)
env[s.output_var] = new_var_names.pop()
if s.output_var == "out":
keep_statements.append(s)
for k in keep_statements:
k.substitute(env)
return Program(input_names, input_types, ";".join(
[str(k) for k in keep_statements]))
class Program(object):
"""The program class."""
def __init__(self, input_names, input_types, body):
self.input_names = input_names
self.input_types = input_types
self.body = body
def evaluate(self, inputs):
"""Evaluate this program."""
if len(inputs) != len(self.input_names):
raise AssertionError("inputs and input_names have to"
"have the same len. inp: %s , names: %s" %
(str(inputs), str(self.input_names)))
inp_str = ""
for (name, inp) in zip(self.input_names, inputs):
inp_str += name + " = " + str(inp) + "; "
with stdoutIO() as s:
# pylint: disable=exec-used
exec inp_str + self.body + "; print(out)"
# pylint: enable=exec-used
return s.getvalue()[:-1]
def flat_str(self):
out = ""
for s in self.body.split(";"):
out += s + ";"
return out
def __str__(self):
out = ""
for (n, t) in zip(self.input_names, self.input_types):
out += n + " = " + str(t) + "\n"
for s in self.body.split(";"):
out += s + "\n"
return out
prog_vocab = []
prog_rev_vocab = {}
def tokenize(string, tokens=None):
"""Tokenize the program string."""
if tokens is None:
tokens = prog_vocab
tokens = sorted(tokens, key=len, reverse=True)
out = []
string = string.strip()
while string:
found = False
for t in tokens:
if string.startswith(t):
out.append(t)
string = string[len(t):]
found = True
break
if not found:
raise ValueError("Couldn't tokenize this: " + string)
string = string.strip()
return out
def clean_up(output, max_val=100):
o = eval(str(output))
if isinstance(o, bool):
return o
if isinstance(o, int):
if o >= 0:
return min(o, max_val)
else:
return max(o, -1 * max_val)
if isinstance(o, list):
return [clean_up(l) for l in o]
def make_vocab():
gen(2, 0)
def gen(max_len, how_many):
"""Generate some programs."""
functions = [f_head, f_last, f_take, f_drop, f_access, f_max, f_min,
f_reverse, f_sort, f_sum, f_map, f_filter, f_count, f_zipwith,
f_scan]
types_to_lambdas = {
FunctionType(["Int", "Int"]): ["plus_one", "minus_one", "times_two",
"div_two", "sq", "times_three",
"div_three", "times_four", "div_four"],
FunctionType(["Int", "Bool"]): ["pos", "neg", "even", "odd"],
FunctionType(["Int", "Int", "Int"]): ["add", "sub", "mul"]
}
tokens = []
for f in functions:
tokens.append(f.name)
for v in types_to_lambdas.values():
tokens.extend(v)
tokens.extend(["=", ";", ",", "(", ")", "[", "]", "Int", "out"])
tokens.extend(map(chr, range(97, 123)))
io_tokens = map(str, range(-220, 220))
if not prog_vocab:
prog_vocab.extend(["_PAD", "_EOS"] + tokens + io_tokens)
for i, t in enumerate(prog_vocab):
prog_rev_vocab[t] = i
io_tokens += [",", "[", "]", ")", "(", "None"]
grower = ProgramGrower(functions=functions,
types_to_lambdas=types_to_lambdas)
def mk_inp(l):
return [random.choice(range(-5, 5)) for _ in range(l)]
tar = [ListType("Int")]
inps = [[mk_inp(3)], [mk_inp(5)], [mk_inp(7)], [mk_inp(15)]]
save_prefix = None
outcomes_to_programs = dict()
tried = set()
counter = 0
choices = [0] if max_len == 0 else range(max_len)
while counter < 100 * how_many and len(outcomes_to_programs) < how_many:
counter += 1
length = random.choice(choices)
t = grower.grow(length, tar)
while t in tried:
length = random.choice(choices)
t = grower.grow(length, tar)
# print(t.flat_str())
tried.add(t)
outcomes = [clean_up(t.evaluate(i)) for i in inps]
outcome_str = str(zip(inps, outcomes))
if outcome_str in outcomes_to_programs:
outcomes_to_programs[outcome_str] = min(
[t.flat_str(), outcomes_to_programs[outcome_str]],
key=lambda x: len(tokenize(x, tokens)))
else:
outcomes_to_programs[outcome_str] = t.flat_str()
if counter % 5000 == 0:
print "== proggen: tried: " + str(counter)
print "== proggen: kept: " + str(len(outcomes_to_programs))
if counter % 250000 == 0 and save_prefix is not None:
print "saving..."
save_counter = 0
progfilename = os.path.join(save_prefix, "prog_" + str(counter) + ".txt")
iofilename = os.path.join(save_prefix, "io_" + str(counter) + ".txt")
prog_token_filename = os.path.join(save_prefix,
"prog_tokens_" + str(counter) + ".txt")
io_token_filename = os.path.join(save_prefix,
"io_tokens_" + str(counter) + ".txt")
with open(progfilename, "a+") as fp, \
open(iofilename, "a+") as fi, \
open(prog_token_filename, "a+") as ftp, \
open(io_token_filename, "a+") as fti:
for (o, p) in outcomes_to_programs.iteritems():
save_counter += 1
if save_counter % 500 == 0:
print "saving %d of %d" % (save_counter, len(outcomes_to_programs))
fp.write(p+"\n")
fi.write(o+"\n")
ftp.write(str(tokenize(p, tokens))+"\n")
fti.write(str(tokenize(o, io_tokens))+"\n")
return list(outcomes_to_programs.values())
| unnikrishnankgs/va | venv/lib/python3.5/site-packages/tensorflow/models/neural_gpu/program_utils.py | Python | bsd-2-clause | 13,451 |
'''Export MISP event to VirusTotal Graph.'''
import base64
import json
from vt_graph_parser.importers.pymisp_response import from_pymisp_response
misperrors = {
'error': 'Error'
}
moduleinfo = {
'version': '0.1',
'author': 'VirusTotal',
'description': 'Send event to VirusTotal Graph',
'module-type': ['export']
}
mispattributes = {
'input': [
'hostname',
'domain',
'ip-src',
'ip-dst',
'md5',
'sha1',
'sha256',
'url',
'filename|md5',
'filename'
]
}
moduleconfig = [
'vt_api_key',
'fetch_information',
'private',
'fetch_vt_enterprise',
'expand_one_level',
'user_editors',
'user_viewers',
'group_editors',
'group_viewers'
]
def handler(q=False):
"""Expansion handler.
Args:
q (bool, optional): module data. Defaults to False.
Returns:
[str]: VirusTotal graph links
"""
if not q:
return False
request = json.loads(q)
if not request.get('config') or not request['config'].get('vt_api_key'):
misperrors['error'] = 'A VirusTotal api key is required for this module.'
return misperrors
config = request['config']
api_key = config.get('vt_api_key')
fetch_information = config.get('fetch_information') or False
private = config.get('private') or False
fetch_vt_enterprise = config.get('fetch_vt_enterprise') or False
expand_one_level = config.get('expand_one_level') or False
user_editors = config.get('user_editors')
if user_editors:
user_editors = user_editors.split(',')
user_viewers = config.get('user_viewers')
if user_viewers:
user_viewers = user_viewers.split(',')
group_editors = config.get('group_editors')
if group_editors:
group_editors = group_editors.split(',')
group_viewers = config.get('group_viewers')
if group_viewers:
group_viewers = group_viewers.split(',')
graphs = from_pymisp_response(
request, api_key, fetch_information=fetch_information,
private=private, fetch_vt_enterprise=fetch_vt_enterprise,
user_editors=user_editors, user_viewers=user_viewers,
group_editors=group_editors, group_viewers=group_viewers,
expand_node_one_level=expand_one_level)
links = []
for graph in graphs:
graph.save_graph()
links.append(graph.get_ui_link())
# This file will contains one VirusTotal graph link for each exported event
file_data = str(base64.b64encode(
bytes('\n'.join(links), 'utf-8')), 'utf-8')
return {'response': [], 'data': file_data}
def introspection():
modulesetup = {
'responseType': 'application/txt',
'outputFileExtension': 'txt',
'userConfig': {},
'inputSource': []
}
return modulesetup
def version():
moduleinfo['config'] = moduleconfig
return moduleinfo
| MISP/misp-modules | misp_modules/modules/export_mod/vt_graph.py | Python | agpl-3.0 | 2,930 |
import pypyodbc
from group_plugin import GroupPlugin, Group
class ODBCGroupPlugin(GroupPlugin):
def __init__(self):
super(ODBCGroupPlugin, self).__init__()
self.connection_str = self.get_conf_option('connection_str')
self.groups_sql = self.get_conf_option('groups_sql')
self.change_group_sql = self.get_conf_option('change_group_sql')
def get_list(self):
groups = []
connection = None
try:
connection = pypyodbc.connect(self.connection_str)
rows = connection.cursor().execute(self.groups_sql)
for row in rows:
groups.append(Group(row[0], row[1]))
return groups
except pypyodbc.DatabaseError as ex:
raise
finally:
if connection is not None:
connection.close()
def change_group(self, user_id, group_id):
connection = None
try:
connection = pypyodbc.connect(self.connection_str)
connection.cursor().execute(self.change_group_sql, (group_id, user_id))
connection.commit()
except:
raise
finally:
if connection is not None:
connection.close()
| stillinsecure/acl_audit | plugins/odbc_group_plugin.py | Python | mit | 1,239 |
#ChipBag.py
#Implements a container for chips
#Created by: Andrew Davis
#Created on: 1/9/2016
#Open source (MIT license)
#import statements
from Chip import *
#class definition
class ChipBag(object):
#constructor
def __init__(self, init_value):
self.__chips = [] #the array that stores the chips
self.__value = 0 #the value of the bag
self.simplify(init_value) #initialize the bag with the proper chips
#representation and string methods
def __repr__(self):
return "Chip count: $" + str(self.__value)
def __str__(self):
return self.__repr__() #use the representation method
#getters
def get_value(self):
return self.__value #return the value field
def get_chip_count(self):
return len(self._chips)
value = property(get_value) #create a getter property for the value
chip_count = property(get_chip_count) #create a getter property for the chip count
#get_chip_amt method - returns the number of any type of chip in the bag
def get_chip_amt(self, denom):
test = []
for chip in self._chips:
if chip.value == denom:
test.append(chip)
return len(test)
#pay method - removes a set amount of money from the bag, returns the new total,
#then reinitializes the bag to have the right chips
def pay(self, amt):
self.__value -= amt #subtract the amount
self.simplify(self.__value) #fix the chips
return self.__value
#receive method - adds money to the bag, returns the new total, and
#reinitializes the bag to have the right chips
def receive(self, amt):
return self.pay(-amt) #use pay() in reverse
#simplify method - takes a value and optimizes the chips in the bag to
#fit the value
def simplify(self, value):
self.__chips = [] #wipe the bag
numOfChips = int(self.__value / 5000) #get the number of gray chips needed for the initial amount
for i in range(0, numOfChips): #loop <numOfChips> times
self.__chips.append(GrayChip()) #and append a gray chip each time
self.__value -= (numOfChips * 5000) #decrease the initialization value
numOfChips = int(self.__value / 1000) #get the number of yellow chips needed for the initial amount
for i in range(0, numOfChips): #loop <numOfChips> times
self.__chips.append(YellowChip()) #and append a yellow chip each time
self.__value -= (numOfChips * 1000) #decrease the initialization value
numOfChips = int(self.__value / 500) #get the number of purple chips needed for the initial amount
for i in range(0, numOfChips): #loop <numOfChips> times
self.__chips.append(PurpleChip()) #and append a purple chip each time
self.__value -= (numOfChips * 500) #decrease the initialization value
numOfChips = int(self.__value / 100) #get the number of black chips needed for the initial amount
for i in range(0, numOfChips): #loop <numOfChips> times
self.__chips.append(BlackChip()) #and append a black chip each time
self.__value -= (numOfChips * 100) #decrease the initialization value
numOfChips = int(self.__value / 50) #get the number of blue chips needed for the initial amount
for i in range(0, numOfChips): #loop <numOfChips> times
self.__chips.append(BlueChip()) #and append a blue chip each time
self.__value -= (numOfChips * 50) #decrease the initialization value
numOfChips = int(self.__value / 25) #get the number of green chips needed for the initial amount
for i in range(0, numOfChips): #loop <numOfChips> times
self.__chips.append(GreenChip()) #and append a green chip each time
self.__value -= (numOfChips * 25) #decrease the initialization value
numOfChips = int(self.__value / 5) #get the number of red chips needed for the initial amount
for i in range(0, numOfChips): #loop <numOfChips> times
self.__chips.append(RedChip()) #and append a red chip each time
self.__value -= (numOfChips * 5) #decrease the initialization value
numOfChips = int(self.__value / 1) #get the number of white chips needed for the initial amount
for i in range(0, numOfChips): #loop <numOfChips> times
self.__chips.append(WhiteChip()) #and append a white chip each time
self.__value -= (numOfChips * 1) #decrease the initialization value
for chip in self.__chips:
self.__value += chip.value
| techgineer/casino-sim | src/ChipBag.py | Python | mit | 4,560 |
a = int(input())
b = int(input())
s = 0
c = 0
for step in range (a,b+1):
if step % 3 == 0:
s = s+step #42
c = c+1
step+=1
print(s / c)
| maisilex/Lets-Begin-Python | forAB.py | Python | mit | 159 |
# -*- coding: utf-8 -*-
# (C) 2015 Muthiah Annamalai
#
# This file is part of 'open-tamil' package tests
#
from __future__ import print_function
from opentamiltests import *
from solthiruthi.suggestions import norvig_suggestor
class WordsSuggestor(unittest.TestCase):
def test_Norvig_suggestor(self):
word = u"ஆங்கிலம்"
opts1 = norvig_suggestor(word, None, 1)
# too much memory
# opts2 = norvig_suggestor( word, None, 2)
opts2 = []
self.assertEqual(list(map(len, [opts1, opts2])), [5150, 0])
return
if __name__ == "__main__":
unittest.main()
| Ezhil-Language-Foundation/open-tamil | tests/word_suggestor.py | Python | mit | 628 |
import os
from peewee import MySQLDatabase, Model, CharField, ForeignKeyField, DateTimeField, TextField, PrimaryKeyField
db = MySQLDatabase(os.environ.get('DB_NAME'), user=os.environ.get('DB_USERNAME'), password=os.environ.get('DB_PASSWORD'),
host=os.environ.get('DB_HOST'))
class BaseModel(Model):
class Meta:
database = db
# class ExpenseCategory(BaseModel):
# name = CharField()
# description = CharField()
#
#
# class Expense(BaseModel):
# description = CharField()
# amount = CharField()
# date = DateTimeField()
# category = ForeignKeyField(ExpenseCategory, related_name='expenses')
class TaskProject(BaseModel):
id = PrimaryKeyField()
name = CharField()
description = CharField()
class TaskCategory(BaseModel):
id = PrimaryKeyField()
name = CharField()
description = CharField()
class TaskStatus(BaseModel):
id = PrimaryKeyField()
name = CharField()
description = CharField()
class TaskGoal(BaseModel):
id = PrimaryKeyField()
name = CharField()
description = CharField()
class Task(BaseModel):
id = PrimaryKeyField()
assign_date = DateTimeField()
assigned_by = CharField()
project = ForeignKeyField(TaskProject, related_name='tasks')
take_action = TextField()
assigned_to = CharField()
category = ForeignKeyField(TaskCategory, related_name='tasks')
status = ForeignKeyField(TaskStatus, related_name='tasks')
edoc = DateTimeField()
doc = DateTimeField()
goal = ForeignKeyField(TaskGoal, related_name='tasks')
action_taken = TextField()
assumption = TextField()
# ExpenseCategory.create_table(True)
# Expense.create_table(True)
try:
db.connect()
TaskProject.create_table(True)
TaskCategory.create_table(True)
TaskStatus.create_table(True)
TaskGoal.create_table(True)
Task.create_table(True)
except:
pass | arundhaj/prod-api | chalicelib/models.py | Python | mit | 1,912 |
from . import constants
class RestUpError(Exception):
pass
class HttpError(RestUpError):
status = constants.ERROR
msg = "Application Error."
def __init__(self, msg=None):
if not msg:
msg = self.__class__.msg
super(HttpError, self).__init__(msg)
class BadRequest(HttpError):
status = constants.BAD_REQUEST
msg = "Bad Request"
class Unauthorized(HttpError):
status = constants.UNAUTHORIZED
msg = "Unauthorized"
class Forbidden(HttpError):
status = constants.FORBIDDEN
msg = "Forbidden"
class NotFound(HttpError):
status = constants.NOT_FOUND
msg = "Not Found"
class NotAllowed(HttpError):
status = constants.METHOD_NOT_ALLOWED
msg = "Method Not Allowed"
class NotImplemented(HttpError):
status = constants.NOT_IMPLEMENTED
msg = "Method not Implemented"
| FFX01/django-restup | restup/exceptions.py | Python | bsd-2-clause | 862 |
# -*- coding: utf-8 -*-
#
# Copyright 2014 - Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
from oslo.config import cfg
from mistral.actions import base
from mistral.actions import std_actions
from mistral import exceptions as exc
from mistral.workbook import tasks
from mistral.workbook import actions
from mistral.openstack.common import log as logging
LOG = logging.getLogger(__name__)
_ACTION_CTX_PARAM = 'action_context'
_NAMESPACES = {}
def _find_or_create_namespace(full_name):
name = full_name.split('.')[-1]
ns = _NAMESPACES.get(name)
if not ns:
ns = base.Namespace(full_name)
_NAMESPACES[name] = ns
return ns
def get_registered_namespaces():
return _NAMESPACES.copy()
def _register_action_classes():
cfg.CONF.import_opt('action_plugins', 'mistral.config')
for py_ns in cfg.CONF.action_plugins:
ns = _find_or_create_namespace(py_ns)
ns.log()
def get_action_class(action_full_name):
"""Finds action class by full action name (i.e. 'namespace.action_name').
:param action_full_name: Full action name (that includes namespace).
:return: Action class or None if not found.
"""
arr = action_full_name.split('.')
if len(arr) != 2:
raise exc.ActionException('Invalid action name: %s' %
action_full_name)
ns = _NAMESPACES.get(arr[0])
if not ns:
return None
return ns.get_action_class(arr[1])
def _get_action_context(db_task):
return {
'workbook_name': db_task['workbook_name'],
'execution_id': db_task['execution_id'],
'task_id': db_task['id'],
'task_name': db_task['name'],
'task_tags': db_task['tags']
}
def _has_action_context_param(action_cls):
arg_spec = inspect.getargspec(action_cls.__init__)
return _ACTION_CTX_PARAM in arg_spec.args
def _create_adhoc_action(db_task):
task_spec = tasks.TaskSpec(db_task['task_spec'])
full_action_name = task_spec.get_full_action_name()
# TODO(rakhmerov): Fix model attributes during refactoring.
raw_action_spec = db_task['action_spec']
if not raw_action_spec:
return None
action_spec = actions.ActionSpec(raw_action_spec)
LOG.info('Using ad-hoc action [action=%s, db_task=%s]' %
(full_action_name, db_task))
# Create an ad-hoc action.
base_cls = get_action_class(action_spec.clazz)
action_context = None
if _has_action_context_param(base_cls):
action_context = _get_action_context(db_task)
if not base_cls:
msg = 'Ad-hoc action base class is not registered ' \
'[workbook_name=%s, action=%s, base_class=%s]' % \
(db_task['workbook_name'], full_action_name, base_cls)
raise exc.ActionException(msg)
action_params = db_task['parameters'] or {}
return std_actions.AdHocAction(action_context,
base_cls,
action_spec,
**action_params)
def create_action(db_task):
task_spec = tasks.TaskSpec(db_task['task_spec'])
full_action_name = task_spec.get_full_action_name()
action_cls = get_action_class(full_action_name)
if not action_cls:
# If action is not found in registered actions try to find ad-hoc
# action definition.
action = _create_adhoc_action(db_task)
if action:
return action
else:
msg = 'Unknown action [workbook_name=%s, action=%s]' % \
(db_task['workbook_name'], full_action_name)
raise exc.ActionException(msg)
action_params = db_task['parameters'] or {}
if _has_action_context_param(action_cls):
action_params[_ACTION_CTX_PARAM] = _get_action_context(db_task)
try:
return action_cls(**action_params)
except Exception as e:
raise exc.ActionException('Failed to create action [db_task=%s]: %s' %
(db_task, e))
# Registering actions on module load.
_register_action_classes()
| dmitryilyin/mistral | mistral/actions/action_factory.py | Python | apache-2.0 | 4,643 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" P1 tests for updating the granular Configuration parameter with scope and resource id provided.
"""
#Import Local Modules
from marvin.cloudstackTestCase import *
from marvin.cloudstackAPI import *
from marvin.integration.lib.utils import *
from marvin.integration.lib.base import *
from marvin.integration.lib.common import *
from nose.plugins.attrib import attr
#Import System modules
class TestUpdateConfigWithScope(cloudstackTestCase):
"""
Test to update a configuration (global setting) at various scopes
"""
def setUp(self):
self.apiClient = self.testClient.getApiClient()
@attr(tags=["simulator", "devcloud", "basic", "advanced"])
def test_UpdateConfigParamWithScope(self):
"""
test update configuration setting at zone level scope
@return:
"""
updateConfigurationCmd = updateConfiguration.updateConfigurationCmd()
updateConfigurationCmd.name = "use.external.dns"
updateConfigurationCmd.value = "true"
updateConfigurationCmd.scopename = "zone"
updateConfigurationCmd.scopeid = 1
updateConfigurationResponse = self.apiClient.updateConfiguration(updateConfigurationCmd)
self.debug("updated the parameter %s with value %s"%(updateConfigurationResponse.name, updateConfigurationResponse.value))
listConfigurationsCmd = listConfigurations.listConfigurationsCmd()
listConfigurationsCmd.cfgName = updateConfigurationResponse.name
listConfigurationsCmd.scopename = "zone"
listConfigurationsCmd.scopeid = 1
listConfigurationsResponse = self.apiClient.listConfigurations(listConfigurationsCmd)
self.assertNotEqual(len(listConfigurationsResponse), 0, "Check if the list API \
returns a non-empty response")
for item in listConfigurationsResponse:
if item.name == updateConfigurationResponse.name:
configParam = item
self.assertEqual(configParam.value, updateConfigurationResponse.value, "Check if the update API returned \
is the same as the one we got in the list API")
def tearDown(self):
"""
Reset the configuration back to false
@return:
"""
updateConfigurationCmd = updateConfiguration.updateConfigurationCmd()
updateConfigurationCmd.name = "use.external.dns"
updateConfigurationCmd.value = "false"
updateConfigurationCmd.scopename = "zone"
updateConfigurationCmd.scopeid = 1
self.apiClient.updateConfiguration(updateConfigurationCmd)
| mufaddalq/cloudstack-datera-driver | test/integration/smoke/test_global_settings.py | Python | apache-2.0 | 3,382 |
import os
from autotest.client import utils
from autotest.client.shared import error
def run_unittest_kvmctl(test, params, env):
"""
This is kvm userspace unit test, use kvm test harness kvmctl load binary
test case file to test various functions of the kvm kernel module.
The output of all unit tests can be found in the test result dir.
@param test: QEMU test object.
@param params: Dictionary with the test parameters.
@param env: Dictionary with test environment.
"""
case = params.get("case")
srcdir = params.get("srcdir", test.srcdir)
unit_dir = os.path.join(srcdir, "kvm_userspace", "kvm", "user")
if not os.path.isdir(unit_dir):
os.makedirs(unit_dir)
os.chdir(unit_dir)
cmd = "./kvmctl test/x86/bootstrap test/x86/%s.flat" % case
try:
results = utils.system_output(cmd)
except error.CmdError:
raise error.TestFail("Unit test %s failed" % case)
result_file = os.path.join(test.resultsdir, case)
utils.open_write_close(result_file, results)
| ehabkost/virt-test | qemu/tests/unittest_kvmctl.py | Python | gpl-2.0 | 1,048 |
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_gtm_monitor_tcp import ApiParameters
from library.modules.bigip_gtm_monitor_tcp import ModuleParameters
from library.modules.bigip_gtm_monitor_tcp import ModuleManager
from library.modules.bigip_gtm_monitor_tcp import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.compat.mock import patch
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_gtm_monitor_tcp import ApiParameters
from ansible.modules.network.f5.bigip_gtm_monitor_tcp import ModuleParameters
from ansible.modules.network.f5.bigip_gtm_monitor_tcp import ModuleManager
from ansible.modules.network.f5.bigip_gtm_monitor_tcp import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
name='foo',
parent='/Common/my-tcp',
send='the send string',
receive='the receive string',
ip='1.1.1.1',
port='80',
interval='10',
timeout='20',
ignore_down_response=True,
transparent=False,
probe_timeout='30',
reverse=True
)
p = ModuleParameters(params=args)
assert p.name == 'foo'
assert p.parent == '/Common/my-tcp'
assert p.send == 'the send string'
assert p.receive == 'the receive string'
assert p.destination == '1.1.1.1:80'
assert p.ip == '1.1.1.1'
assert p.port == 80
assert p.interval == 10
assert p.timeout == 20
assert p.ignore_down_response is True
assert p.transparent is False
assert p.probe_timeout == 30
assert p.reverse is True
def test_api_parameters(self):
args = load_fixture('load_gtm_monitor_tcp_1.json')
p = ApiParameters(params=args)
assert p.name == 'foo'
assert p.parent == '/Common/tcp'
assert p.send == 'the send string'
assert p.receive == 'the receive string'
assert p.destination == '1.1.1.1:80'
assert p.ip == '1.1.1.1'
assert p.port == 80
assert p.interval == 30
assert p.timeout == 120
assert p.ignore_down_response is False
assert p.transparent is True
assert p.probe_timeout == 5
assert p.reverse is True
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
try:
self.p1 = patch('library.modules.bigip_gtm_monitor_tcp.module_provisioned')
self.m1 = self.p1.start()
self.m1.return_value = True
except Exception:
self.p1 = patch('ansible.modules.network.f5.bigip_gtm_monitor_tcp.module_provisioned')
self.m1 = self.p1.start()
self.m1.return_value = True
def tearDown(self):
self.p1.stop()
def test_create_monitor(self, *args):
set_module_args(dict(
name='foo',
ip='10.10.10.10',
port=80,
interval=20,
timeout=30,
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(side_effect=[False, True])
mm.create_on_device = Mock(return_value=True)
mm.module_provisioned = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
def test_change_ip(self, *args):
set_module_args(dict(
name='foo',
ip='10.10.10.10',
port=80,
interval=20,
timeout=30,
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
current = ApiParameters(params=load_fixture('load_gtm_monitor_tcp_1.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(side_effect=[True, True])
mm.update_on_device = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
mm.module_provisioned = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['ip'] == '10.10.10.10'
def test_change_ignore_down_response(self, *args):
set_module_args(dict(
name='foo',
ignore_down_response=True,
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
current = ApiParameters(params=load_fixture('load_gtm_monitor_tcp_1.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(side_effect=[True, True])
mm.update_on_device = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
mm.module_provisioned = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['ignore_down_response'] is True
| alxgu/ansible | test/units/modules/network/f5/test_bigip_gtm_monitor_tcp.py | Python | gpl-3.0 | 6,969 |
#
# Copyright 2015-2017 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
from __future__ import absolute_import
from vdsm.common import commands
from . import constants
AUTOMATIC = "auto"
_SYS_ONLINE_CPUS = "/sys/devices/system/cpu/online"
def get(pid):
"""
Get the affinity of a process, by its <pid>, using taskset command.
We assume all threads of the process have the same affinity, because
this is the only usecase VDSM cares about - and requires.
Return a frozenset of ints, each one being a cpu indices on which the
process can run.
Example: frozenset([0, 1, 2, 3])
Raise cmdutils.Error on failure.
"""
command = [constants.EXT_TASKSET, '--pid', str(pid)]
out = commands.run(command, reset_cpu_affinity=False).splitlines()
return _cpu_set_from_output(out[-1])
def set(pid, cpu_set, all_tasks=False):
"""
Set the affinity of a process, by its <pid>, using taskset command.
if all_tasks evaluates to True, set the affinity for all threads of
the target process.
<cpu_set> must be an iterable whose items are ints which represent
cpu indices, on which the process will be allowed to run; the format
is the same as what the get() function returns.
Raise cmdutils.Error on failure.
"""
command = [constants.EXT_TASKSET]
if all_tasks:
command.append("--all-tasks")
command.extend((
'--pid',
'--cpu-list', ','.join(str(i) for i in cpu_set),
str(pid)
))
commands.run(command, reset_cpu_affinity=False)
def online_cpus():
"""
Return a frozenset which contains identifiers of online CPUs,
as non-negative integers.
"""
with open(_SYS_ONLINE_CPUS, 'r') as src:
return cpulist_parse(src.readline())
def pick_cpu(cpu_set):
"""
Select the best CPU VDSM should pin to.
`cpu_set' is any iterable which produces the sequence of all
available CPUs, among which VDSM should pick the best one.
"""
cpu_list = sorted(cpu_set)
return cpu_list[:2][-1]
def _cpu_set_from_output(line):
"""
Parse the output of taskset, in the format
pid ${PID}'s current affinity mask: ${HEXMASK}
and return a list of strings, each one being is a cpu index.
"""
hexmask = line.decode().rsplit(":", 1)[1].strip()
mask = int(hexmask, 16)
return frozenset(i for i in range(mask.bit_length()) if mask & (1 << i))
def cpulist_parse(cpu_range):
"""
Expand the kernel cpulist syntax (e.g. 0-2,5) into a plain
frozenset of integers (e.g. frozenset([0,1,2,5]))
The input format is like the content of the special file
/sys/devices/system/cpu/online
or the output of the 'taskset' and 'lscpu' tools.
"""
cpus = []
for item in cpu_range.split(','):
if '-' in item:
begin, end = item.split('-', 1)
cpus.extend(range(int(begin), int(end) + 1))
else:
cpus.append(int(item))
return frozenset(cpus)
| nirs/vdsm | lib/vdsm/taskset.py | Python | gpl-2.0 | 3,779 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('comercial', '0007_auto_20141006_1852'),
]
operations = [
migrations.AlterField(
model_name='contratofechado',
name='documento_proposto_legal',
field=models.CharField(max_length=100, verbose_name=b'Documento Legal do Proposto (CPF)', blank=True),
),
]
| dudanogueira/microerp | microerp/comercial/migrations/0008_auto_20141023_1202.py | Python | lgpl-3.0 | 495 |
#!/usr/bin/env python
# Copyright (c) 2007 ActiveState Software Inc.
"""The doit test suite entry point."""
import os
from os.path import dirname, abspath
import sys
import logging
import testlib
testdir_from_ns = {
None: os.curdir,
}
def setup():
sys.path.insert(0, dirname(dirname(abspath(__file__))))
if __name__ == "__main__":
retval = testlib.harness(testdir_from_ns=testdir_from_ns,
setup_func=setup)
sys.exit(retval)
| ActiveState/mk | test/test.py | Python | mit | 477 |
from .client import Client, HTTPError
from .publisher import Publisher, DebugPublisher
| RealGeeks/lead_router.py | leadrouter/__init__.py | Python | mit | 88 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
test_records = frappe.get_test_records('POS Setting')
class TestPOSSetting(unittest.TestCase):
pass
| gangadhar-kadam/verve_erp | erpnext/accounts/doctype/pos_setting/test_pos_setting.py | Python | agpl-3.0 | 278 |
## Copyright (C) 2017 Oscar Diaz Barriga
## This file is part of Comp-Process-STPatterns.
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
# /* Tumbes */
# select count(*) from t_boya_medicion_minpres
# where latitud < -3.392 and latitud > -4.078
# /* Piura */
# select count(*) from t_boya_medicion_minpres
# where latitud < -4.078 and latitud > -6.382
# /* Lambayeque */
# select count(*) from t_boya_medicion_minpres
# where latitud < -6.382 and latitud > -7.177
# /* La Libertad */
# select count(*) from t_boya_medicion_minpres
# where latitud < -7.177 and latitud > -8.9722
# /* Ancash*/
# select count(*) from t_boya_medicion_minpres
# where latitud < -8.9722 and latitud > -10.593
import glob, os
import psycopg2
import datetime
db_user = "USER"
db_host = "IP_ADDRESS"
db_password = "PASSWORD"
output = "./Output/datos_total_boya3_est7_ca1.csv"
class Departamento (object):
def __init__(self, nombre, latitud_min, latitud_max):
self.nombre = nombre
self.latitud_min = latitud_min
self.latitud_max = latitud_max
class Zona (object):
def __init__(self, start_date, end_date, nombre, latitud_min, latitud_max, temperatura, presion, salinidad):
self.start_date = start_date
self.end_date = end_date
self.nombre = nombre
self.latitud_min = latitud_min
self.latitud_max = latitud_max
self.temperatura = temperatura
self.presion = presion
self.salinidad = salinidad
class boya_data (object):
def __init__(self, temperatura, presion, salinidad):
self.temperatura = temperatura
self.presion = presion
self.salinidad = salinidad
class estacion_data (object):
# def __init__(self, temperatura_m, punto_rocio_m, presion_nivel_mar):
# self.est_temperatura_m = temperatura_m
# self.est_punto_rocio_m= punto_rocio_m
# self.est_presion_nivel_mar = presion_nivel_mar
def __init__(self, temperatura_m, punto_rocio_m, presion_nivel_mar,
presion_est_media, velocidad_viento_media, temperatura_maxima,
temperatura_minima):
self.est_temperatura_m = temperatura_m
self.est_punto_rocio_m= punto_rocio_m
self.est_presion_nivel_mar = presion_nivel_mar
self.est_presion_est_media = presion_est_media
self.est_temperatura_minima = temperatura_minima
self.est_temperatura_maxima = temperatura_maxima
self.est_velocidad_viento_media = velocidad_viento_media
class caudal_data (object):
def __init__(self, caudal):
self.caudal = caudal
def database_select_date_between(start_date, end_date):
try:
conn = psycopg2.connect("dbname='elnino' user='%s' host='%s' password='%s'"%(db_user, db_host, db_password))
except Exception, e:
print "I am unable to connect to the database " + e.pgerror
cur = conn.cursor()
try:
query = "select count(*) from t_boya_medicion_minpres where latitud < -3.392 and latitud > -4.078 AND (" \
" concat_ws('-',ano,mes,dia)::date >= '%s'::date" \
" AND" \
" concat_ws('-',ano,mes,dia)::date <= '%s'::date);"%(start_date, end_date)
# print query
cur.execute(query)
except Exception, e:
print "I can't SELECT from bar " + e.pgerror
rows = cur.fetchall()
for row in rows:
print " ", row
def database_select_date_between_lat(start_latitud, end_latitud, start_date, end_date):
try:
conn = psycopg2.connect("dbname='elnino' user='%s' host='%s' password='%s'"%(db_user, host, password))
except Exception, e:
print "I am unable to connect to the database " + e.pgerror
cur = conn.cursor()
try:
query = "select count(*) from t_boya_medicion_minpres where latitud < %s AND latitud > %s AND (" \
" concat_ws('-',ano,mes,dia)::date >= '%s'::date" \
" AND" \
" concat_ws('-',ano,mes,dia)::date <= '%s'::date);"%(start_latitud, end_latitud, start_date, end_date)
# print query
cur.execute(query)
except Exception, e:
print "I can't SELECT from bar " + e.pgerror
rows = cur.fetchall()
count = 0
for row in rows:
count = row[0]
return count
def database_select_date_between_lat_avg(start_latitud, end_latitud, start_date, end_date):
try:
conn = psycopg2.connect("dbname='elnino' user='%s' host='%s' password='%s'"%(db_user, host, password))
except Exception, e:
print "I am unable to connect to the database " + e.pgerror
cur = conn.cursor()
try:
query = "select avg(temp), avg(pres), avg(psal) from t_boya_medicion_minpres " \
" where latitud < %s AND latitud > %s AND (" \
" concat_ws('-',ano,mes,dia)::date >= '%s'::date" \
" AND" \
" concat_ws('-',ano,mes,dia)::date <= '%s'::date);"%(start_latitud, end_latitud, start_date, end_date)
# print query
cur.execute(query)
except Exception, e:
print "I can't SELECT from bar " + e.pgerror
rows = cur.fetchall()
count = 0
b_data = None
for row in rows:
b_data = boya_data(row[0], row[1], row[2])
return b_data
def database_select_date_between_lat_avg_estacion(region, start_date, end_date):
try:
conn = psycopg2.connect("dbname='elnino' user='%s' host='%s' password='%s'"%(db_user, host. password))
except Exception, e:
print "I am unable to connect to the database " + e.pgerror
cur = conn.cursor()
try:
query = "Select avg(em.temp_m), avg(em.punto_rocio_m), avg(em.presion_nivel_mar), " \
"avg(em.presion_est_m), avg(em.veloc_viento_m), avg(em.temp_max), avg(em.temp_min) " \
" From t_region r, t_estacion e, t_estacion_medicion em " \
" Where e.id_region = r.id_region AND r.nombre like '%s' " \
" AND em.id_estacion = e.id_estacion " \
" AND concat_ws('-',ano,mes,dia)::date >= '%s'::date " \
" AND concat_ws('-',ano,mes,dia)::date <= '%s'::date;"%(region, start_date, end_date)
# print query
cur.execute(query)
except Exception, e:
print "I can't SELECT from bar " + e.pgerror
rows = cur.fetchall()
count = 0
b_data = None
for row in rows:
b_data = estacion_data(row[0], row[1], row[2], row[3], row[4], row[5], row[6])
return b_data
def database_select_date_between_lat_avg_caudal(region, start_date, end_date):
try:
conn = psycopg2.connect("dbname='elnino' user='%s' host='%s' password='%s'"%(db_user, host, password))
except Exception, e:
print "I am unable to connect to the database " + e.pgerror
cur = conn.cursor()
try:
query = " Select avg(c.caudal) From t_caudal_medicion c " \
" Where c.region like '%s' AND c.caudal != 9999 " \
" AND concat_ws('-',ano,mes,dia)::date >= '%s'::date " \
" AND concat_ws('-',ano,mes,dia)::date <= '%s'::date;"%(region, start_date, end_date)
# print query
cur.execute(query)
except Exception, e:
print "I can't SELECT from bar " + e.pgerror
rows = cur.fetchall()
count = 0
c_data = None
for row in rows:
c_data = caudal_data(row[0])
return c_data
# def count_boyas_range_space_and_time(i, start_date_unix, step_date, latitude, longitude):
# t_start = start_date_unix + i * step_date
# t_end = start_date_unix + (i + 1) * step_date
# start_date = datetime.datetime.fromtimestamp(t_start).strftime("%Y-%m-%d")
# end_date = datetime.datetime.fromtimestamp(t_end).strftime("%Y-%m-%d")
# count = database_select_date_between_lat(latitude, longitude, start_date, end_date)
# print "%s -- %s -> %s" % (start_date, end_date, count)
# return count
if __name__ == '__main__':
# datebase = 1422766800
maximo = 1467522000
periodo = 18
delta = 0
toDate = 24*3600*periodo
#n = 27
#27
# 26, 16 = 8 8
# 26, 18 = 8 10
# 26, 20 = 10 10
# 24, 20 = 9 10
# 22, 22 = 12, 12 2015-03-18
# 20, 24 = 13, 11
# 14, 34 = 21, 13
departamentos = []
departamentos.append(Departamento("Tumbes", "-3.392", "-4.078"))
departamentos.append(Departamento("Piura", "-4.078", "-6.382"))
departamentos.append(Departamento("Lambayeque", "-6.382", "-7.177"))
departamentos.append(Departamento("La Libertad", "-7.177", "-8.9722"))
departamentos.append(Departamento("Ancash", "-8.9722", "-10.593"))
rango_fechas = []
rango_fechas_status = []
start_date_unix = int(datetime.datetime.strptime("2015-03-05","%Y-%m-%d").strftime("%s"))
n = (maximo - start_date_unix) / (24 * 3600 * periodo)
print n
print "2015-03-05 --- ",
print datetime.datetime.fromtimestamp(maximo).strftime("%Y-%m-%d")
for i in range(n):
t_start = start_date_unix + i * toDate
t_end = start_date_unix + (i + 1) * toDate
start_date = datetime.datetime.fromtimestamp(t_start).strftime("%Y-%m-%d")
end_date = datetime.datetime.fromtimestamp(t_end).strftime("%Y-%m-%d")
rango_fechas.append([start_date, end_date, 1])
print (start_date + " - " + end_date)
for d in range(5):
print "--------- %s -------------" % departamentos[d].nombre
t_count = 0
cero_count = 1
count = 0
i = 0
for r in rango_fechas:
start_date = r[0]
end_date = r[1]
count = database_select_date_between_lat(departamentos[d].latitud_min, departamentos[d].latitud_max, start_date, end_date)
# print "%s -- %s -> %s" % (start_date, end_date, count)
if count > 0:
t_count = t_count + 1
rango_fechas[i][2] = 1*rango_fechas[i][2]
else:
rango_fechas[i][2] = 0*rango_fechas[i][2]
# print "*%s -- %s -> %s" % (start_date, end_date, count)
cero_count = cero_count*count
i += 1
print "Fallo %s ,"%(n - t_count),
print "OK : %s"%t_count
rango_fechas_ok = []
for i in rango_fechas:
if i[2] != 0:
rango_fechas_ok.append([i[0],i[1]])
print rango_fechas_ok
with open(output, 'w') as the_file:
the_file.write("region, boya_temp, boya_salinidad, est_temp, est_pto_rocio, est_presion, "
"est_presion_est_m, est_veloc_viento_m, est_temp_max, est_temp_min, caudal\n")
for d in range(5):
print "--------- %s -------------" % departamentos[d].nombre
t_count = 0
cero_count = 1
count = 0
for r in rango_fechas_ok:
start_date = r[0]
end_date = r[1]
data_boya_avg = database_select_date_between_lat_avg(departamentos[d].latitud_min,
departamentos[d].latitud_max,
start_date, end_date)
data_estacion_avg = database_select_date_between_lat_avg_estacion(departamentos[d].nombre, start_date, end_date)
data_caudal_avg = database_select_date_between_lat_avg_caudal(departamentos[d].nombre, start_date, end_date)
print "%s, boya_temp :%s\tboya_sal :%s\t" \
"est_temp: %s\test_pto_rocio :%s\test_presion :%s\t" \
"est_presion_est_m :%s\test_veloc_viento_m :%s\test_temp_max :%s\test_temp_min :%s\t" \
"caudal:%s" % \
(departamentos[d].nombre, data_boya_avg.temperatura, data_boya_avg.salinidad,
data_estacion_avg.est_temperatura_m, data_estacion_avg.est_punto_rocio_m,
data_estacion_avg.est_presion_nivel_mar, data_estacion_avg.est_presion_est_media,
data_estacion_avg.est_velocidad_viento_media, data_estacion_avg.est_temperatura_maxima, data_estacion_avg.est_temperatura_minima,
data_caudal_avg.caudal)
linea = "%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s\n" % \
(departamentos[d].nombre, data_boya_avg.temperatura, data_boya_avg.salinidad,
data_estacion_avg.est_temperatura_m, data_estacion_avg.est_punto_rocio_m,
data_estacion_avg.est_presion_nivel_mar, data_estacion_avg.est_presion_est_media,
data_estacion_avg.est_velocidad_viento_media, data_estacion_avg.est_temperatura_maxima, data_estacion_avg.est_temperatura_minima,
data_caudal_avg.caudal)
the_file.write(linea)
| oscardbpucp/Comp-Process-STPatterns | clean_and_pretreatment/datos_total_fase1v3-mod.py | Python | gpl-3.0 | 13,307 |
def GravarRegistro(prmCodigo,prmNome,prmValor):
ponteiro = open('BancoDados.db','a')
ponteiro.write(prmCodigo+'|'+prmNome+'|'+prmValor+'\n')
ponteiro.close()
return | ronas/PythonGNF | Artur/bancodadoslib.py | Python | gpl-3.0 | 195 |
"""Student API tests."""
from rest_framework import status
from profiles.factory import StudentFactory
from profiles.serializers import StudentSerializer
from tests.utils.api import HyperlinkedAPITestCase
class StudentEndpointsTest(HyperlinkedAPITestCase):
"""Test access to the students endpoints."""
factory = StudentFactory
serializer_class = StudentSerializer
def perform_list(self):
response = self.client.get('/api/students/')
return response
def perform_retrieve(self, obj=None):
if obj is None:
obj = self.factory.create()
response = self.client.get('/api/students/{obj.pk}/'.format(obj=obj))
return response
def test_list(self):
self.assertRequiresAuth(
self.perform_list,
expected_status_code=status.HTTP_200_OK)
def test_retrieve(self):
self.assertRequiresAuth(
self.perform_retrieve,
expected_status_code=status.HTTP_200_OK)
| oser-cs/oser-website | tests/test_profiles/test_student_api.py | Python | gpl-3.0 | 989 |
'''
Created by auto_sdk on 2014-12-17 17:22:51
'''
from top.api.base import RestApi
class HotelSoldOrdersIncrementGetRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.end_modified = None
self.need_guest = None
self.need_message = None
self.page_no = None
self.page_size = None
self.start_modified = None
self.status = None
self.use_has_next = None
def getapiname(self):
return 'taobao.hotel.sold.orders.increment.get'
| CooperLuan/devops.notes | taobao/top/api/rest/HotelSoldOrdersIncrementGetRequest.py | Python | mit | 523 |
#!/usr/bin/env python
# Dogtail demo script
from dogtail.config import config
#config.debugSleep = True
#config.debugSearching = True
#config.debugTranslation = True
import dogtail.tc
from dogtail.procedural import *
from dogtail.utils import screenshot
from dogtail.predicate import GenericPredicate
# These next two lines get us translations for free. To see the script run
# translated, run it like this:
# LANG=ja_JP.UTF-8 ./gedit-test-utf8-procedural-api.py
# You might also want to set config.debugTranslation and
# config.debugSearching to True, just for fun.
#import dogtail.i18n
#dogtail.i18n.loadTranslationsFromPackageMoFiles('gedit')
from os import environ, path, remove
# Load our persistent Dogtail objects
TestString = dogtail.tc.TCString()
# Remove the output file, if it's still there from a previous run
if path.isfile(path.join(path.expandvars("$HOME"), "Desktop", "UTF8demo.txt")):
remove(path.join(path.expandvars("$HOME"), "Desktop", "UTF8demo.txt"))
# Start gedit.
run('gedit')
# Set focus on gedit
focus.application('gedit')
# Focus gedit's text buffer.
focus.text()
# Load the UTF-8 demo file. Use codecs.open() instead of open().
from codecs import open
from sys import path
utfdemo = open(path[0] + '/data/UTF-8-demo.txt')
# Load the UTF-8 demo file into the text buffer.
focus.widget.text = utfdemo.read()
# Take a screenshot of the window
#screenshot()
# Click gedit's Save button.
click.button('Save')
# Focus gedit's Save As... dialog
try:
focus.widget.findByPredicate(GenericPredicate(roleName='file chooser'))
except FocusError:
try:
# This string changed somewhere around gedit 2.13.2.
# This is the new string
focus.dialog(u'Save As\u2026')
except FocusError:
# Fall back to the old string.
focus.dialog('Save as...')
# Click the Desktop widget
click('Desktop', roleName = 'table cell')
# Focus on dialog again
try:
focus.widget.findByPredicate(GenericPredicate(roleName='file chooser'))
except FocusError:
try:
# This string changed somewhere around gedit 2.13.2.
# This is the new string
focus.dialog(u'Save As\u2026')
except FocusError:
# Fall back to the old string.
focus.dialog('Save as...')
# We want to save to the file name 'UTF8demo.txt'.
focus.text()
focus.widget.text = 'UTF8demo.txt'
# And focus on dialog again
try:
focus.widget.findByPredicate(GenericPredicate(roleName='file chooser'))
except FocusError:
try:
# This string changed somewhere around gedit 2.13.2.
# This is the new string
focus.dialog(u'Save As\u2026')
except FocusError:
# Fall back to the old string.
focus.dialog('Save as...')
# Click the Save button.
click('Save')
# Let's quit now.
click('File')
click('Quit')
# We have driven gedit now lets check to see if the saved file is the same as
# the baseline file
# Read in the "gold" file
import codecs
try:
# When reading the file, we have to make sure and tell codecs.open() which
# encoding we're using, otherwise python gets confused later.
gold = open(path[0] + '/data/UTF-8-demo.txt', encoding='utf-8').readlines()
except IOError:
print "File open failed"
# Read the test file for comparison
filepath = environ['HOME'] + '/Desktop/UTF8demo.txt'
# When reading the file, we have to make sure and tell codecs.open() which
# encoding we're using, otherwise python gets confused later.
testfile = open(filepath, encoding='utf-8').readlines()
# We now have the original and saved files as lists. Let's compare them line
# by line to see if they are the same
i = 0
for baseline in gold:
label = "line test " + str(i + 1)
TestString.compare(label, baseline, testfile[i], encoding='utf-8')
i = i + 1
| vrutkovs/dogtail | examples/gedit-test-utf8-procedural-api.py | Python | gpl-2.0 | 3,778 |
import pytest
from fibo import fib
def test_fib_ok_small():
assert fib(0) == 0
assert fib(1) == 1
assert fib(2) == 1
assert fib(3) == 2
def test_fib_raise_if_string():
with pytest.raises(TypeError):
fib("a")
fib("1")
def test_fib_raises_lt_zero():
with pytest.raises(ValueError):
fib(-1)
| feroda/lessons-python4beginners | src/fib/test_fib.py | Python | agpl-3.0 | 343 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pwnlib
def handle_pow(r):
print(r.recvuntil(b'python3 '))
print(r.recvuntil(b' solve '))
challenge = r.recvline().decode('ascii').strip()
p = pwnlib.tubes.process.process(['kctf_bypass_pow', challenge])
solution = p.readall().strip()
r.sendline(solution)
print(r.recvuntil(b'Correct\n'))
r = pwnlib.tubes.remote.remote('127.0.0.1', 1337)
print(r.recvuntil('== proof-of-work: '))
if r.recvline().startswith(b'enabled'):
handle_pow(r)
print(r.recvuntil(b'Malware Autograder'))
exit(0)
| google/google-ctf | 2021/quals/rev-polymorph/healthcheck/healthcheck.py | Python | apache-2.0 | 1,149 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class NatGatewaysOperations:
"""NatGatewaysOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
nat_gateway_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'natGatewayName': self._serialize.url("nat_gateway_name", nat_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/natGateways/{natGatewayName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
nat_gateway_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified nat gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param nat_gateway_name: The name of the nat gateway.
:type nat_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
nat_gateway_name=nat_gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'natGatewayName': self._serialize.url("nat_gateway_name", nat_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/natGateways/{natGatewayName}'} # type: ignore
async def get(
self,
resource_group_name: str,
nat_gateway_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.NatGateway":
"""Gets the specified nat gateway in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param nat_gateway_name: The name of the nat gateway.
:type nat_gateway_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NatGateway, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_11_01.models.NatGateway
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NatGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'natGatewayName': self._serialize.url("nat_gateway_name", nat_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NatGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/natGateways/{natGatewayName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
nat_gateway_name: str,
parameters: "_models.NatGateway",
**kwargs: Any
) -> Optional["_models.NatGateway"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.NatGateway"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'natGatewayName': self._serialize.url("nat_gateway_name", nat_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'NatGateway')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('NatGateway', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('NatGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/natGateways/{natGatewayName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
nat_gateway_name: str,
parameters: "_models.NatGateway",
**kwargs: Any
) -> AsyncLROPoller["_models.NatGateway"]:
"""Creates or updates a nat gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param nat_gateway_name: The name of the nat gateway.
:type nat_gateway_name: str
:param parameters: Parameters supplied to the create or update nat gateway operation.
:type parameters: ~azure.mgmt.network.v2020_11_01.models.NatGateway
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either NatGateway or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_11_01.models.NatGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.NatGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
nat_gateway_name=nat_gateway_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('NatGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'natGatewayName': self._serialize.url("nat_gateway_name", nat_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/natGateways/{natGatewayName}'} # type: ignore
async def update_tags(
self,
resource_group_name: str,
nat_gateway_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> "_models.NatGateway":
"""Updates nat gateway tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param nat_gateway_name: The name of the nat gateway.
:type nat_gateway_name: str
:param parameters: Parameters supplied to update nat gateway tags.
:type parameters: ~azure.mgmt.network.v2020_11_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NatGateway, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_11_01.models.NatGateway
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NatGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'natGatewayName': self._serialize.url("nat_gateway_name", nat_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NatGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/natGateways/{natGatewayName}'} # type: ignore
def list_all(
self,
**kwargs: Any
) -> AsyncIterable["_models.NatGatewayListResult"]:
"""Gets all the Nat Gateways in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NatGatewayListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_11_01.models.NatGatewayListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NatGatewayListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NatGatewayListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/natGateways'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.NatGatewayListResult"]:
"""Gets all nat gateways in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NatGatewayListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_11_01.models.NatGatewayListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NatGatewayListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NatGatewayListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/natGateways'} # type: ignore
| Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_11_01/aio/operations/_nat_gateways_operations.py | Python | mit | 26,847 |
#!/usr/bin/env python
traindat = '../data/fm_train_real.dat'
testdat = '../data/fm_test_real.dat'
parameter_list = [[traindat,testdat],[traindat,testdat]]
def distance_normsquared (train_fname=traindat,test_fname=testdat):
from shogun import RealFeatures, EuclideanDistance, CSVFile
feats_train=RealFeatures(CSVFile(train_fname))
feats_test=RealFeatures(CSVFile(test_fname))
distance=EuclideanDistance(feats_train, feats_train)
distance.set_disable_sqrt(True)
dm_train=distance.get_distance_matrix()
distance.init(feats_train, feats_test)
dm_test=distance.get_distance_matrix()
return distance,dm_train,dm_test
if __name__=='__main__':
print('EuclideanDistance - NormSquared')
distance_normsquared(*parameter_list[0])
| MikeLing/shogun | examples/undocumented/python/distance_normsquared.py | Python | gpl-3.0 | 737 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# @Author: Brian Cherinka, José Sánchez-Gallego, and Brett Andrews
# @Date: 2018-07-24
# @Filename: test_rss.py
# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause)
#
# @Last modified by: José Sánchez-Gallego (gallegoj@uw.edu)
# @Last modified time: 2018-08-04 13:35:39
import astropy.io.fits
import astropy.table
import numpy
import pytest
import marvin
from ..conftest import Galaxy, set_the_config
@pytest.fixture(scope='session')
def galaxy(get_params, plateifu):
"""Yield an instance of a Galaxy object for use in tests."""
release, bintype, template = get_params
set_the_config(release)
gal = Galaxy(plateifu=plateifu)
gal.set_params(bintype=bintype, template=template, release=release)
gal.set_filepaths()
gal.set_galaxy_data()
yield gal
@pytest.fixture(scope='session')
def rss_session(galaxy, mode):
# These get created only once per session.
if mode == 'auto' or str(galaxy.bintype) != 'SPX':
pytest.skip()
if mode == 'local':
rss = marvin.tools.RSS(filename=galaxy.rsspath, release=galaxy.release, mode='local')
else:
rss = marvin.tools.RSS(plateifu=galaxy.plateifu, release=galaxy.release, mode='remote')
rss.expdata = galaxy.rss
yield rss
@pytest.fixture(scope='function')
def rss(rss_session):
# In some of the tests we modify the RSS objects. Here we implement
# a setup procedure that "unloads" the RSSFiber objects and resets the
# autoload attribute.
for rssfiber in rss_session:
rssfiber.loaded = False
rss_session.autoload = True
yield rss_session
@pytest.fixture(scope='session')
def rssfiber(rss_session):
fiberid = 0
if rss_session[fiberid].loaded is False:
rss_session[fiberid].load()
yield rss_session[fiberid]
@pytest.mark.usefixtures('monkeyauth')
class TestRSS(object):
def test_rss_init(self, rss):
assert isinstance(rss, marvin.tools.RSS)
assert isinstance(rss, marvin.tools.mixins.NSAMixIn)
assert isinstance(rss, list)
assert isinstance(rss.obsinfo, astropy.table.Table)
if rss.mode == 'file':
assert isinstance(rss.data, astropy.io.fits.HDUList)
assert rss._wavelength is not None
assert len(rss) == rss._nfibers
rss.autoload = False # To make things faster for this test
assert all([isinstance(rss_fiber, marvin.tools.rss.RSSFiber) for rss_fiber in rss])
@pytest.mark.parametrize('autoload', [True, False])
def test_rss_autoload(self, rss, autoload):
rss.autoload = autoload
assert rss[0].loaded is autoload
def test_load(self, rss):
rss.autoload = False
assert rss[0].loaded is False
rss[0].load()
assert rss[0].loaded is True
def test_load_all(self, rss):
if rss.mode == 'remote':
pytest.skip()
rss.load_all()
assert all([rss_fiber.loaded is True for rss_fiber in rss])
def test_obsinfo_to_rssfiber(self, rss):
# We get it in this complicated way so that it is a different way of
# obtianing it than in the _populate_fibres method.
ifusize = int(str(rss.ifu)[0:-2])
exp_idx = 0
n_fiber = 1
for rssfiber in rss:
assert numpy.all(rss.obsinfo[exp_idx] == rssfiber.obsinfo)
n_fiber += 1
if n_fiber > ifusize:
n_fiber = 1
exp_idx += 1
def test_getcube(self, rss):
cube = rss.getCube()
assert isinstance(cube, marvin.tools.Cube)
assert cube.mode == rss.mode
assert cube.plateifu == rss.plateifu
assert cube.mangaid == rss.mangaid
assert cube.release == rss.release
def test_select_fibers(self, rss):
# Skipping for API or it will take forever. Should not matter since
# we have already tested slicing for API.
if rss.data_origin == 'api':
pytest.skip()
fibers_expnum = rss.select_fibers(exposure_no=rss.expdata['expnum'])
assert len(fibers_expnum) == rss.expdata['nfiber']
assert fibers_expnum[0].obsinfo['EXPNUM'][0] == rss.expdata['expnum']
fibers_mjd = rss.select_fibers(mjd=1234)
assert len(fibers_mjd) == 0
fibers_mjd = rss.select_fibers(mjd=rss.expdata['mjd'])
assert len(fibers_mjd) == (rss.expdata['nexp'] * rss.expdata['nfiber'])
assert fibers_mjd[0].obsinfo['MJD'][0] == rss.expdata['mjd']
@pytest.mark.usefixtures('monkeyauth')
class TestRSSFiber(object):
def test_rssfiber_spectra(self, rssfiber):
assert isinstance(rssfiber, marvin.tools.RSSFiber)
assert isinstance(rssfiber.rss, marvin.tools.RSS)
assert isinstance(rssfiber.obsinfo, astropy.table.Table)
assert hasattr(rssfiber, 'ivar')
assert isinstance(rssfiber.ivar, numpy.ndarray)
assert len(rssfiber.ivar) == len(rssfiber.wavelength)
assert hasattr(rssfiber, 'mask')
assert isinstance(rssfiber.mask, numpy.ndarray)
assert len(rssfiber.mask) == len(rssfiber.wavelength)
for dm_element in rssfiber.rss.datamodel.rss + rssfiber.rss.datamodel.spectra:
if dm_element.name == 'flux':
continue
spectrum = getattr(rssfiber, dm_element.name, None)
assert spectrum is not None
assert isinstance(spectrum, numpy.ndarray)
assert len(spectrum) == len(rssfiber.wavelength)
def test_rssfiber_data(self, rssfiber):
rss_filename = rssfiber.rss._getFullPath()
rss_hdu = astropy.io.fits.open(rss_filename)
numpy.testing.assert_allclose(rss_hdu['FLUX'].data[rssfiber.fiberid, :], rssfiber.value)
numpy.testing.assert_allclose(rss_hdu['IVAR'].data[rssfiber.fiberid, :], rssfiber.ivar)
numpy.testing.assert_array_equal(rss_hdu['MASK'].data[rssfiber.fiberid, :], rssfiber.mask)
for dm_element in rssfiber.rss.datamodel.rss:
if dm_element.name == 'flux':
continue
fits_data = rss_hdu[dm_element.fits_extension()].data[rssfiber.fiberid, :]
numpy.testing.assert_allclose(fits_data, getattr(rssfiber, dm_element.name).value)
for dm_element in rssfiber.rss.datamodel.spectra:
fits_data = rss_hdu[dm_element.fits_extension()].data
numpy.testing.assert_allclose(fits_data, getattr(rssfiber, dm_element.name).value)
def test_rssfiber_slice(self, rssfiber):
n_elements = 10
sliced = rssfiber[0:n_elements]
assert len(sliced.value) == n_elements
numpy.testing.assert_allclose(sliced.value, rssfiber.value[0:n_elements])
assert len(sliced.ivar) == n_elements
assert len(sliced.mask) == n_elements
for dm_element in rssfiber.rss.datamodel.rss + rssfiber.rss.datamodel.spectra:
if dm_element.name == 'flux':
continue
spectrum_sliced = getattr(sliced, dm_element.name, None)
assert len(spectrum_sliced) == n_elements
assert sliced.obsinfo is not None
def test_rssfiber_masked(self, rssfiber):
assert numpy.sum(rssfiber.masked.mask) > 0
def test_rssfiber_descale(self, rssfiber):
descaled = rssfiber.descale()
numpy.testing.assert_allclose(descaled.value, rssfiber.value * rssfiber.unit.scale)
assert descaled.obsinfo is not None
class TestPickling(object):
def test_pickling_file(self, temp_scratch, rss):
if rss.data_origin == 'file':
assert rss.data is not None
rss_file = temp_scratch.join('test_rss.mpf')
rss.save(str(rss_file))
assert rss_file.check() is True
rss_restored = marvin.tools.RSS.restore(str(rss_file))
assert rss_restored.data_origin == rss.data_origin
assert isinstance(rss_restored, marvin.tools.RSS)
assert len(rss_restored) > 0
assert isinstance(rss_restored[0], marvin.tools.RSSFiber)
assert numpy.sum(rss_restored[0].value) > 0
if rss.data_origin == 'file':
assert rss_restored.data is not None
else:
assert rss_restored.data is None
| sdss/marvin | tests/tools/test_rss.py | Python | bsd-3-clause | 8,275 |
# -*- coding: utf-8 -*-
## This file is part of Invenio.
## Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Functions useful for text wrapping (in a box) and indenting.
"""
__revision__ = "$Id$"
import sys
import re
import textwrap
import htmlentitydefs
import invenio.template
from invenio.config import CFG_ETCDIR
try:
import chardet
CHARDET_AVAILABLE = True
except ImportError:
CHARDET_AVAILABLE = False
try:
from unidecode import unidecode
UNIDECODE_AVAILABLE = True
except ImportError:
UNIDECODE_AVAILABLE = False
CFG_LATEX_UNICODE_TRANSLATION_CONST = {}
CFG_WRAP_TEXT_IN_A_BOX_STYLES = {
'__DEFAULT' : {
'horiz_sep' : '*',
'max_col' : 72,
'min_col' : 40,
'tab_str' : ' ',
'tab_num' : 0,
'border' : ('**', '*', '**', '** ', ' **', '**', '*', '**'),
'prefix' : '\n',
'suffix' : '\n',
'break_long' : False,
'force_horiz' : False,
},
'squared' : {
'horiz_sep' : '-',
'border' : ('+', '-', '+', '| ', ' |', '+', '-', '+')
},
'double_sharp' : {
'horiz_sep' : '#',
'border' : ('##', '#', '##', '## ', ' ##', '##', '#', '##')
},
'single_sharp' : {
'horiz_sep' : '#',
'border' : ('#', '#', '#', '# ', ' #', '#', '#', '#')
},
'single_star' : {
'border' : ('*', '*', '*', '* ', ' *', '*', '*', '*',)
},
'double_star' : {
},
'no_border' : {
'horiz_sep' : '',
'border' : ('', '', '', '', '', '', '', ''),
'prefix' : '',
'suffix' : ''
},
'conclusion' : {
'border' : ('', '', '', '', '', '', '', ''),
'prefix' : '',
'horiz_sep' : '-',
'force_horiz' : True,
},
'important' : {
'tab_num' : 1,
},
'ascii' : {
'horiz_sep' : (u'├', u'─', u'┤'),
'border' : (u'┌', u'─', u'┐', u'│ ', u' │', u'└', u'─', u'┘'),
},
'ascii_double' : {
'horiz_sep' : (u'╠', u'═', u'╣'),
'border' : (u'╔', u'═', u'╗', u'║ ', u' ║', u'╚', u'═', u'╝'),
}
}
re_unicode_lowercase_a = re.compile(unicode(r"(?u)[áàäâãå]", "utf-8"))
re_unicode_lowercase_ae = re.compile(unicode(r"(?u)[æ]", "utf-8"))
re_unicode_lowercase_oe = re.compile(unicode(r"(?u)[œ]", "utf-8"))
re_unicode_lowercase_e = re.compile(unicode(r"(?u)[éèëê]", "utf-8"))
re_unicode_lowercase_i = re.compile(unicode(r"(?u)[íìïî]", "utf-8"))
re_unicode_lowercase_o = re.compile(unicode(r"(?u)[óòöôõø]", "utf-8"))
re_unicode_lowercase_u = re.compile(unicode(r"(?u)[úùüû]", "utf-8"))
re_unicode_lowercase_y = re.compile(unicode(r"(?u)[ýÿ]", "utf-8"))
re_unicode_lowercase_c = re.compile(unicode(r"(?u)[çć]", "utf-8"))
re_unicode_lowercase_n = re.compile(unicode(r"(?u)[ñ]", "utf-8"))
re_unicode_uppercase_a = re.compile(unicode(r"(?u)[ÁÀÄÂÃÅ]", "utf-8"))
re_unicode_uppercase_ae = re.compile(unicode(r"(?u)[Æ]", "utf-8"))
re_unicode_uppercase_oe = re.compile(unicode(r"(?u)[Œ]", "utf-8"))
re_unicode_uppercase_e = re.compile(unicode(r"(?u)[ÉÈËÊ]", "utf-8"))
re_unicode_uppercase_i = re.compile(unicode(r"(?u)[ÍÌÏÎ]", "utf-8"))
re_unicode_uppercase_o = re.compile(unicode(r"(?u)[ÓÒÖÔÕØ]", "utf-8"))
re_unicode_uppercase_u = re.compile(unicode(r"(?u)[ÚÙÜÛ]", "utf-8"))
re_unicode_uppercase_y = re.compile(unicode(r"(?u)[Ý]", "utf-8"))
re_unicode_uppercase_c = re.compile(unicode(r"(?u)[ÇĆ]", "utf-8"))
re_unicode_uppercase_n = re.compile(unicode(r"(?u)[Ñ]", "utf-8"))
re_latex_lowercase_a = re.compile("\\\\[\"H'`~^vu=k]\{?a\}?")
re_latex_lowercase_ae = re.compile("\\\\ae\\{\\}?")
re_latex_lowercase_oe = re.compile("\\\\oe\\{\\}?")
re_latex_lowercase_e = re.compile("\\\\[\"H'`~^vu=k]\\{?e\\}?")
re_latex_lowercase_i = re.compile("\\\\[\"H'`~^vu=k]\\{?i\\}?")
re_latex_lowercase_o = re.compile("\\\\[\"H'`~^vu=k]\\{?o\\}?")
re_latex_lowercase_u = re.compile("\\\\[\"H'`~^vu=k]\\{?u\\}?")
re_latex_lowercase_y = re.compile("\\\\[\"']\\{?y\\}?")
re_latex_lowercase_c = re.compile("\\\\['uc]\\{?c\\}?")
re_latex_lowercase_n = re.compile("\\\\[c'~^vu]\\{?n\\}?")
re_latex_uppercase_a = re.compile("\\\\[\"H'`~^vu=k]\\{?A\\}?")
re_latex_uppercase_ae = re.compile("\\\\AE\\{?\\}?")
re_latex_uppercase_oe = re.compile("\\\\OE\\{?\\}?")
re_latex_uppercase_e = re.compile("\\\\[\"H'`~^vu=k]\\{?E\\}?")
re_latex_uppercase_i = re.compile("\\\\[\"H'`~^vu=k]\\{?I\\}?")
re_latex_uppercase_o = re.compile("\\\\[\"H'`~^vu=k]\\{?O\\}?")
re_latex_uppercase_u = re.compile("\\\\[\"H'`~^vu=k]\\{?U\\}?")
re_latex_uppercase_y = re.compile("\\\\[\"']\\{?Y\\}?")
re_latex_uppercase_c = re.compile("\\\\['uc]\\{?C\\}?")
re_latex_uppercase_n = re.compile("\\\\[c'~^vu]\\{?N\\}?")
def indent_text(text,
nb_tabs=0,
tab_str=" ",
linebreak_input="\n",
linebreak_output="\n",
wrap=False):
"""
add tabs to each line of text
@param text: the text to indent
@param nb_tabs: number of tabs to add
@param tab_str: type of tab (could be, for example "\t", default: 2 spaces
@param linebreak_input: linebreak on input
@param linebreak_output: linebreak on output
@param wrap: wethever to apply smart text wrapping.
(by means of wrap_text_in_a_box)
@return: indented text as string
"""
if not wrap:
lines = text.split(linebreak_input)
tabs = nb_tabs*tab_str
output = ""
for line in lines:
output += tabs + line + linebreak_output
return output
else:
return wrap_text_in_a_box(body=text, style='no_border',
tab_str=tab_str, tab_num=nb_tabs)
_RE_BEGINNING_SPACES = re.compile(r'^\s*')
_RE_NEWLINES_CLEANER = re.compile(r'\n+')
_RE_LONELY_NEWLINES = re.compile(r'\b\n\b')
def wrap_text_in_a_box(body='', title='', style='double_star', **args):
"""Return a nicely formatted text box:
e.g.
******************
** title **
**--------------**
** body **
******************
Indentation and newline are respected.
@param body: the main text
@param title: an optional title
@param style: the name of one of the style in CFG_WRAP_STYLES. By default
the double_star style is used.
You can further tune the desired style by setting various optional
parameters:
@param horiz_sep: a string that is repeated in order to produce a
separator row between the title and the body (if needed)
or a tuple of three characters in the form (l, c, r)
@param max_col: the maximum number of coulmns used by the box
(including indentation)
@param min_col: the symmetrical minimum number of columns
@param tab_str: a string to represent indentation
@param tab_num: the number of leveles of indentations
@param border: a tuple of 8 element in the form
(tl, t, tr, l, r, bl, b, br) of strings that represent the
different corners and sides of the box
@param prefix: a prefix string added before the box
@param suffix: a suffix string added after the box
@param break_long: wethever to break long words in order to respect
max_col
@param force_horiz: True in order to print the horizontal line even when
there is no title
e.g.:
print wrap_text_in_a_box(title='prova',
body=' 123 prova.\n Vediamo come si indenta',
horiz_sep='-', style='no_border', max_col=20, tab_num=1)
prova
----------------
123 prova.
Vediamo come
si indenta
"""
def _wrap_row(row, max_col, break_long):
"""Wrap a single row"""
spaces = _RE_BEGINNING_SPACES.match(row).group()
row = row[len(spaces):]
spaces = spaces.expandtabs()
return textwrap.wrap(row, initial_indent=spaces,
subsequent_indent=spaces, width=max_col,
break_long_words=break_long)
def _clean_newlines(text):
text = _RE_LONELY_NEWLINES.sub(' \n', text)
return _RE_NEWLINES_CLEANER.sub(lambda x: x.group()[:-1], text)
body = unicode(body, 'utf-8')
title = unicode(title, 'utf-8')
astyle = dict(CFG_WRAP_TEXT_IN_A_BOX_STYLES['__DEFAULT'])
if CFG_WRAP_TEXT_IN_A_BOX_STYLES.has_key(style):
astyle.update(CFG_WRAP_TEXT_IN_A_BOX_STYLES[style])
astyle.update(args)
horiz_sep = astyle['horiz_sep']
border = astyle['border']
tab_str = astyle['tab_str'] * astyle['tab_num']
max_col = max(astyle['max_col'] \
- len(border[3]) - len(border[4]) - len(tab_str), 1)
min_col = astyle['min_col']
prefix = astyle['prefix']
suffix = astyle['suffix']
force_horiz = astyle['force_horiz']
break_long = astyle['break_long']
body = _clean_newlines(body)
tmp_rows = [_wrap_row(row, max_col, break_long)
for row in body.split('\n')]
body_rows = []
for rows in tmp_rows:
if rows:
body_rows += rows
else:
body_rows.append('')
if not ''.join(body_rows).strip():
# Concrete empty body
body_rows = []
title = _clean_newlines(title)
tmp_rows = [_wrap_row(row, max_col, break_long)
for row in title.split('\n')]
title_rows = []
for rows in tmp_rows:
if rows:
title_rows += rows
else:
title_rows.append('')
if not ''.join(title_rows).strip():
# Concrete empty title
title_rows = []
max_col = max([len(row) for row in body_rows + title_rows] + [min_col])
mid_top_border_len = max_col \
+ len(border[3]) + len(border[4]) - len(border[0]) - len(border[2])
mid_bottom_border_len = max_col \
+ len(border[3]) + len(border[4]) - len(border[5]) - len(border[7])
top_border = border[0] \
+ (border[1] * mid_top_border_len)[:mid_top_border_len] + border[2]
bottom_border = border[5] \
+ (border[6] * mid_bottom_border_len)[:mid_bottom_border_len] \
+ border[7]
if type(horiz_sep) is tuple and len(horiz_sep) == 3:
horiz_line = horiz_sep[0] + (horiz_sep[1] * (max_col + 2))[:(max_col + 2)] + horiz_sep[2]
else:
horiz_line = border[3] + (horiz_sep * max_col)[:max_col] + border[4]
title_rows = [tab_str + border[3] + row
+ ' ' * (max_col - len(row)) + border[4] for row in title_rows]
body_rows = [tab_str + border[3] + row
+ ' ' * (max_col - len(row)) + border[4] for row in body_rows]
ret = []
if top_border:
ret += [tab_str + top_border]
ret += title_rows
if title_rows or force_horiz:
ret += [tab_str + horiz_line]
ret += body_rows
if bottom_border:
ret += [tab_str + bottom_border]
return (prefix + '\n'.join(ret) + suffix).encode('utf-8')
def wait_for_user(msg=""):
"""
Print MSG and a confirmation prompt, waiting for user's
confirmation, unless silent '--yes-i-know' command line option was
used, in which case the function returns immediately without
printing anything.
"""
if '--yes-i-know' in sys.argv:
return
print msg
try:
answer = raw_input("Please confirm by typing 'Yes, I know!': ")
except KeyboardInterrupt:
print
answer = ''
if answer != 'Yes, I know!':
sys.stderr.write("ERROR: Aborted.\n")
sys.exit(1)
return
def guess_minimum_encoding(text, charsets=('ascii', 'latin1', 'utf8')):
"""Try to guess the minimum charset that is able to represent the given
text using the provided charsets. text is supposed to be encoded in utf8.
Returns (encoded_text, charset) where charset is the first charset
in the sequence being able to encode text.
Returns (text_in_utf8, 'utf8') in case no charset is able to encode text.
@note: If the input text is not in strict UTF-8, then replace any
non-UTF-8 chars inside it.
"""
text_in_unicode = text.decode('utf8', 'replace')
for charset in charsets:
try:
return (text_in_unicode.encode(charset), charset)
except (UnicodeEncodeError, UnicodeDecodeError):
pass
return (text_in_unicode.encode('utf8'), 'utf8')
def encode_for_xml(text, wash=False, xml_version='1.0', quote=False):
"""Encodes special characters in a text so that it would be
XML-compliant.
@param text: text to encode
@return: an encoded text"""
text = text.replace('&', '&')
text = text.replace('<', '<')
if quote:
text = text.replace('"', '"')
if wash:
text = wash_for_xml(text, xml_version=xml_version)
return text
try:
unichr(0x100000)
RE_ALLOWED_XML_1_0_CHARS = re.compile(u'[^\U00000009\U0000000A\U0000000D\U00000020-\U0000D7FF\U0000E000-\U0000FFFD\U00010000-\U0010FFFF]')
RE_ALLOWED_XML_1_1_CHARS = re.compile(u'[^\U00000001-\U0000D7FF\U0000E000-\U0000FFFD\U00010000-\U0010FFFF]')
except ValueError:
# oops, we are running on a narrow UTF/UCS Python build,
# so we have to limit the UTF/UCS char range:
RE_ALLOWED_XML_1_0_CHARS = re.compile(u'[^\U00000009\U0000000A\U0000000D\U00000020-\U0000D7FF\U0000E000-\U0000FFFD]')
RE_ALLOWED_XML_1_1_CHARS = re.compile(u'[^\U00000001-\U0000D7FF\U0000E000-\U0000FFFD]')
def wash_for_xml(text, xml_version='1.0'):
"""
Removes any character which is not in the range of allowed
characters for XML. The allowed characters depends on the version
of XML.
- XML 1.0:
<http://www.w3.org/TR/REC-xml/#charsets>
- XML 1.1:
<http://www.w3.org/TR/xml11/#charsets>
@param text: input string to wash.
@param xml_version: version of the XML for which we wash the
input. Value for this parameter can be '1.0' or '1.1'
"""
if xml_version == '1.0':
return RE_ALLOWED_XML_1_0_CHARS.sub('', unicode(text, 'utf-8')).encode('utf-8')
else:
return RE_ALLOWED_XML_1_1_CHARS.sub('', unicode(text, 'utf-8')).encode('utf-8')
def wash_for_utf8(text, correct=True):
"""Return UTF-8 encoded binary string with incorrect characters washed away.
@param text: input string to wash (can be either a binary string or a Unicode string)
@param correct: whether to correct bad characters or throw exception
"""
if isinstance(text, unicode):
return text.encode('utf-8')
ret = []
while True:
try:
text.decode("utf-8")
except UnicodeDecodeError, e:
if correct:
ret.append(text[:e.start])
text = text[e.end:]
else:
raise e
else:
break
ret.append(text)
return ''.join(ret)
def nice_size(size):
"""
@param size: the size.
@type size: int
@return: a nicely printed size.
@rtype: string
"""
websearch_templates = invenio.template.load('websearch')
unit = 'B'
if size > 1024:
size /= 1024.0
unit = 'KB'
if size > 1024:
size /= 1024.0
unit = 'MB'
if size > 1024:
size /= 1024.0
unit = 'GB'
return '%s %s' % (websearch_templates.tmpl_nice_number(size, max_ndigits_after_dot=2), unit)
def remove_line_breaks(text):
"""
Remove line breaks from input, including unicode 'line
separator', 'paragraph separator', and 'next line' characters.
"""
return unicode(text, 'utf-8').replace('\f', '').replace('\n', '').replace('\r', '').replace(u'\xe2\x80\xa8', '').replace(u'\xe2\x80\xa9', '').replace(u'\xc2\x85', '').encode('utf-8')
def decode_to_unicode(text, default_encoding='utf-8'):
"""
Decode input text into Unicode representation by first using the default
encoding utf-8.
If the operation fails, it detects the type of encoding used in the given text.
For optimal result, it is recommended that the 'chardet' module is installed.
NOTE: Beware that this might be slow for *very* large strings.
If chardet detection fails, it will try to decode the string using the basic
detection function guess_minimum_encoding().
Also, bear in mind that it is impossible to detect the correct encoding at all
times, other then taking educated guesses. With that said, this function will
always return some decoded Unicode string, however the data returned may not
be the same as original data in some cases.
@param text: the text to decode
@type text: string
@param default_encoding: the character encoding to use. Optional.
@type default_encoding: string
@return: input text as Unicode
@rtype: string
"""
if not text:
return ""
try:
return text.decode(default_encoding)
except (UnicodeError, LookupError):
pass
detected_encoding = None
if CHARDET_AVAILABLE:
# We can use chardet to perform detection
res = chardet.detect(text)
if res['confidence'] >= 0.8:
detected_encoding = res['encoding']
if detected_encoding == None:
# No chardet detection, try to make a basic guess
dummy, detected_encoding = guess_minimum_encoding(text)
return text.decode(detected_encoding)
def translate_latex2unicode(text, kb_file="%s/bibconvert/KB/latex-to-unicode.kb" % \
(CFG_ETCDIR,)):
"""
This function will take given text, presumably containing LaTeX symbols,
and attempts to translate it to Unicode using the given or default KB
translation table located under CFG_ETCDIR/bibconvert/KB/latex-to-unicode.kb.
The translated Unicode string will then be returned.
If the translation table and compiled regular expression object is not
previously generated in the current session, they will be.
@param text: a text presumably containing LaTeX symbols.
@type text: string
@param kb_file: full path to file containing latex2unicode translations.
Defaults to CFG_ETCDIR/bibconvert/KB/latex-to-unicode.kb
@type kb_file: string
@return: Unicode representation of translated text
@rtype: unicode
"""
# First decode input text to Unicode
try:
text = decode_to_unicode(text)
except UnicodeDecodeError:
text = unicode(wash_for_utf8(text))
# Load translation table, if required
if CFG_LATEX_UNICODE_TRANSLATION_CONST == {}:
_load_latex2unicode_constants(kb_file)
# Find all matches and replace text
for match in CFG_LATEX_UNICODE_TRANSLATION_CONST['regexp_obj'].finditer(text):
# If LaTeX style markers {, } and $ are before or after the matching text, it
# will replace those as well
text = re.sub("[\{\$]?%s[\}\$]?" % (re.escape(match.group()),), \
CFG_LATEX_UNICODE_TRANSLATION_CONST['table'][match.group()], \
text)
# Return Unicode representation of translated text
return text
def _load_latex2unicode_constants(kb_file="%s/bibconvert/KB/latex-to-unicode.kb" % \
(CFG_ETCDIR,)):
"""
Load LaTeX2Unicode translation table dictionary and regular expression object
from KB to a global dictionary.
@param kb_file: full path to file containing latex2unicode translations.
Defaults to CFG_ETCDIR/bibconvert/KB/latex-to-unicode.kb
@type kb_file: string
@return: dict of type: {'regexp_obj': regexp match object,
'table': dict of LaTeX -> Unicode mappings}
@rtype: dict
"""
try:
data = open(kb_file)
except IOError:
# File not found or similar
sys.stderr.write("\nCould not open LaTeX to Unicode KB file. Aborting translation.\n")
return CFG_LATEX_UNICODE_TRANSLATION_CONST
latex_symbols = []
translation_table = {}
for line in data:
# The file has form of latex|--|utf-8. First decode to Unicode.
line = line.decode('utf-8')
mapping = line.split('|--|')
translation_table[mapping[0].rstrip('\n')] = mapping[1].rstrip('\n')
latex_symbols.append(re.escape(mapping[0].rstrip('\n')))
data.close()
CFG_LATEX_UNICODE_TRANSLATION_CONST['regexp_obj'] = re.compile("|".join(latex_symbols))
CFG_LATEX_UNICODE_TRANSLATION_CONST['table'] = translation_table
def translate_to_ascii(values):
"""
Transliterate the string contents of the given sequence into ascii representation.
Returns a sequence with the modified values if the module 'unidecode' is
available. Otherwise it will fall back to the inferior strip_accents function.
For example: H\xc3\xb6hne becomes Hohne.
Note: Passed strings are returned as a list.
@param values: sequence of strings to transform
@type values: sequence
@return: sequence with values transformed to ascii
@rtype: sequence
"""
if not values:
return values
if type(values) == str:
values = [values]
for index, value in enumerate(values):
if not value:
continue
if not UNIDECODE_AVAILABLE:
ascii_text = strip_accents(value)
else:
encoded_text, encoding = guess_minimum_encoding(value)
unicode_text = unicode(encoded_text.decode(encoding))
ascii_text = unidecode(unicode_text).encode('ascii')
values[index] = ascii_text
return values
def xml_entities_to_utf8(text, skip=('lt', 'gt', 'amp')):
"""
Removes HTML or XML character references and entities from a text string
and replaces them with their UTF-8 representation, if possible.
@param text: The HTML (or XML) source text.
@type text: string
@param skip: list of entity names to skip when transforming.
@type skip: iterable
@return: The plain text, as a Unicode string, if necessary.
@author: Based on http://effbot.org/zone/re-sub.htm#unescape-html
"""
def fixup(m):
text = m.group(0)
if text[:2] == "&#":
# character reference
try:
if text[:3] == "&#x":
return unichr(int(text[3:-1], 16)).encode("utf-8")
else:
return unichr(int(text[2:-1])).encode("utf-8")
except ValueError:
pass
else:
# named entity
if text[1:-1] not in skip:
try:
text = unichr(htmlentitydefs.name2codepoint[text[1:-1]]).encode("utf-8")
except KeyError:
pass
return text # leave as is
return re.sub("&#?\w+;", fixup, text)
def strip_accents(x):
"""
Strip accents in the input phrase X (assumed in UTF-8) by replacing
accented characters with their unaccented cousins (e.g. é by e).
@param x: the input phrase to strip.
@type x: string
@return: Return such a stripped X.
"""
x = re_latex_lowercase_a.sub("a", x)
x = re_latex_lowercase_ae.sub("ae", x)
x = re_latex_lowercase_oe.sub("oe", x)
x = re_latex_lowercase_e.sub("e", x)
x = re_latex_lowercase_i.sub("i", x)
x = re_latex_lowercase_o.sub("o", x)
x = re_latex_lowercase_u.sub("u", x)
x = re_latex_lowercase_y.sub("x", x)
x = re_latex_lowercase_c.sub("c", x)
x = re_latex_lowercase_n.sub("n", x)
x = re_latex_uppercase_a.sub("A", x)
x = re_latex_uppercase_ae.sub("AE", x)
x = re_latex_uppercase_oe.sub("OE", x)
x = re_latex_uppercase_e.sub("E", x)
x = re_latex_uppercase_i.sub("I", x)
x = re_latex_uppercase_o.sub("O", x)
x = re_latex_uppercase_u.sub("U", x)
x = re_latex_uppercase_y.sub("Y", x)
x = re_latex_uppercase_c.sub("C", x)
x = re_latex_uppercase_n.sub("N", x)
# convert input into Unicode string:
try:
y = unicode(x, "utf-8")
except:
return x # something went wrong, probably the input wasn't UTF-8
# asciify Latin-1 lowercase characters:
y = re_unicode_lowercase_a.sub("a", y)
y = re_unicode_lowercase_ae.sub("ae", y)
y = re_unicode_lowercase_oe.sub("oe", y)
y = re_unicode_lowercase_e.sub("e", y)
y = re_unicode_lowercase_i.sub("i", y)
y = re_unicode_lowercase_o.sub("o", y)
y = re_unicode_lowercase_u.sub("u", y)
y = re_unicode_lowercase_y.sub("y", y)
y = re_unicode_lowercase_c.sub("c", y)
y = re_unicode_lowercase_n.sub("n", y)
# asciify Latin-1 uppercase characters:
y = re_unicode_uppercase_a.sub("A", y)
y = re_unicode_uppercase_ae.sub("AE", y)
y = re_unicode_uppercase_oe.sub("OE", y)
y = re_unicode_uppercase_e.sub("E", y)
y = re_unicode_uppercase_i.sub("I", y)
y = re_unicode_uppercase_o.sub("O", y)
y = re_unicode_uppercase_u.sub("U", y)
y = re_unicode_uppercase_y.sub("Y", y)
y = re_unicode_uppercase_c.sub("C", y)
y = re_unicode_uppercase_n.sub("N", y)
# return UTF-8 representation of the Unicode string:
return y.encode("utf-8")
def show_diff(original, modified, prefix="<pre>", sufix="</pre>"):
"""
Returns the diff view between source and changed strings.
Function checks both arguments line by line and returns a string
with additional css classes for difference view
@param original: base string
@param modified: changed string
@param prefix: prefix of the output string
@param sufix: sufix of the output string
@return: string with the comparison of the records
@rtype: string
"""
import difflib
differ = difflib.Differ()
result = [prefix]
for line in differ.compare(modified.splitlines(), original.splitlines()):
if line[0] == ' ':
result.append(line.strip())
elif line[0] == '-':
# Mark as deleted
result.append('<strong class="diff_field_deleted">' + line[2:].strip() + "</strong>")
elif line[0] == '+':
# Mark as added/modified
result.append('<strong class="diff_field_added">' + line[2:].strip() + "</strong>")
else:
continue
result.append(sufix)
return '\n'.join(result)
def transliterate_ala_lc(value):
"""
Transliterate a string.
Compatibility with the ALA-LC romanization standard:
http://www.loc.gov/catdir/cpso/roman.html
Maps from one system of writing into another, letter by letter.
Uses 'unidecode' if available.
@param values: string to transform
@type values: string
@return: string transliterated
@rtype: string
"""
if not value:
return value
if UNIDECODE_AVAILABLE:
text = unidecode(value)
else:
text = translate_to_ascii(value)
text = text.pop()
return text
| nkalodimas/invenio | modules/miscutil/lib/textutils.py | Python | gpl-2.0 | 27,512 |
"""Copyright 2008 Orbitz WorldWide
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
import os
import pickle
from random import randint
from django.conf import settings
from graphite.compat import HttpResponse
from graphite.util import unpickle
def add(request):
metrics = set( request.POST['metrics'].split() )
whitelist = load_whitelist()
new_whitelist = whitelist | metrics
save_whitelist(new_whitelist)
return HttpResponse(content_type="text/plain", content="OK")
def remove(request):
metrics = set( request.POST['metrics'].split() )
whitelist = load_whitelist()
new_whitelist = whitelist - metrics
save_whitelist(new_whitelist)
return HttpResponse(content_type="text/plain", content="OK")
def show(request):
whitelist = load_whitelist()
members = '\n'.join( sorted(whitelist) )
return HttpResponse(content_type="text/plain", content=members)
def load_whitelist():
buffer = open(settings.WHITELIST_FILE, 'rb').read()
whitelist = unpickle.loads(buffer)
return whitelist
def save_whitelist(whitelist):
serialized = pickle.dumps(whitelist, protocol=-1) #do this instead of dump() to raise potential exceptions before open()
tmpfile = '%s-%d' % (settings.WHITELIST_FILE, randint(0, 100000))
try:
fh = open(tmpfile, 'wb')
fh.write(serialized)
fh.close()
if os.path.exists(settings.WHITELIST_FILE):
os.unlink(settings.WHITELIST_FILE)
os.rename(tmpfile, settings.WHITELIST_FILE)
finally:
if os.path.exists(tmpfile):
os.unlink(tmpfile)
| obfuscurity/graphite-web | webapp/graphite/whitelist/views.py | Python | apache-2.0 | 1,997 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""Wrapper layers: layers that augment the functionality of another layer.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from tensorflow.contrib.keras.python.keras import backend as K
from tensorflow.contrib.keras.python.keras.engine import InputSpec
from tensorflow.contrib.keras.python.keras.engine import Layer
from tensorflow.contrib.keras.python.keras.utils.generic_utils import has_arg
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import base as tf_base_layers
class Wrapper(Layer):
"""Abstract wrapper base class.
Wrappers take another layer and augment it in various ways.
Do not use this class as a layer, it is only an abstract base class.
Two usable wrappers are the `TimeDistributed` and `Bidirectional` wrappers.
Arguments:
layer: The layer to be wrapped.
"""
def __init__(self, layer, **kwargs):
self.layer = layer
# Tracks mapping of Wrapper inputs to inner layer inputs. Useful when
# the inner layer has update ops that depend on its inputs (as opposed
# to the inputs to the Wrapper layer).
self._input_map = {}
super(Wrapper, self).__init__(**kwargs)
def build(self, input_shape=None):
self.built = True
@property
def activity_regularizer(self):
if hasattr(self.layer, 'activity_regularizer'):
return self.layer.activity_regularizer
else:
return None
@property
def trainable_weights(self):
return self.layer.trainable_weights
@property
def non_trainable_weights(self):
return self.layer.non_trainable_weights
@property
def updates(self):
if hasattr(self.layer, 'updates'):
return self.layer.updates
return []
def get_updates_for(self, inputs=None):
# If the wrapper modifies the inputs, use the modified inputs to
# get the updates from the inner layer.
inner_inputs = inputs
if inputs is not None:
uid = tf_base_layers._object_list_uid(inputs)
if uid in self._input_map:
inner_inputs = self._input_map[uid]
updates = self.layer.get_updates_for(inner_inputs)
updates += super(Wrapper, self).get_updates_for(inputs)
return updates
@property
def losses(self):
if hasattr(self.layer, 'losses'):
return self.layer.losses
return []
def get_losses_for(self, inputs=None):
if inputs is None:
losses = self.layer.get_losses_for(None)
return losses + super(Wrapper, self).get_losses_for(None)
return super(Wrapper, self).get_losses_for(inputs)
@property
def constraints(self):
return self.layer.constraints
def get_weights(self):
return self.layer.get_weights()
def set_weights(self, weights):
self.layer.set_weights(weights)
def get_config(self):
config = {
'layer': {
'class_name': self.layer.__class__.__name__,
'config': self.layer.get_config()
}
}
base_config = super(Wrapper, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
from tensorflow.contrib.keras.python.keras.layers import deserialize as deserialize_layer # pylint: disable=g-import-not-at-top
layer = deserialize_layer(
config.pop('layer'), custom_objects=custom_objects)
return cls(layer, **config)
class TimeDistributed(Wrapper):
"""This wrapper allows to apply a layer to every temporal slice of an input.
The input should be at least 3D, and the dimension of index one
will be considered to be the temporal dimension.
Consider a batch of 32 samples,
where each sample is a sequence of 10 vectors of 16 dimensions.
The batch input shape of the layer is then `(32, 10, 16)`,
and the `input_shape`, not including the samples dimension, is `(10, 16)`.
You can then use `TimeDistributed` to apply a `Dense` layer
to each of the 10 timesteps, independently:
```python
# as the first layer in a model
model = Sequential()
model.add(TimeDistributed(Dense(8), input_shape=(10, 16)))
# now model.output_shape == (None, 10, 8)
```
The output will then have shape `(32, 10, 8)`.
In subsequent layers, there is no need for the `input_shape`:
```python
model.add(TimeDistributed(Dense(32)))
# now model.output_shape == (None, 10, 32)
```
The output will then have shape `(32, 10, 32)`.
`TimeDistributed` can be used with arbitrary layers, not just `Dense`,
for instance with a `Conv2D` layer:
```python
model = Sequential()
model.add(TimeDistributed(Conv2D(64, (3, 3)),
input_shape=(10, 299, 299, 3)))
```
Arguments:
layer: a layer instance.
"""
def __init__(self, layer, **kwargs):
super(TimeDistributed, self).__init__(layer, **kwargs)
self.supports_masking = True
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
assert len(input_shape) >= 3
self.input_spec = InputSpec(shape=input_shape)
child_input_shape = [input_shape[0]] + input_shape[2:]
if not self.layer.built:
self.layer.build(child_input_shape)
self.layer.built = True
super(TimeDistributed, self).build()
self.built = True
def _compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
child_input_shape = tensor_shape.TensorShape([input_shape[0]] +
input_shape[2:])
child_output_shape = self.layer._compute_output_shape( # pylint: disable=protected-access
child_input_shape).as_list()
timesteps = input_shape[1]
return tensor_shape.TensorShape([child_output_shape[0], timesteps] +
child_output_shape[1:])
def call(self, inputs, training=None, mask=None):
kwargs = {}
if has_arg(self.layer.call, 'training'):
kwargs['training'] = training
uses_learning_phase = False # pylint: disable=redefined-outer-name
input_shape = K.int_shape(inputs)
if input_shape[0]:
# batch size matters, use rnn-based implementation
def step(x, _):
global uses_learning_phase # pylint: disable=global-variable-undefined
output = self.layer.call(x, **kwargs)
if hasattr(output, '_uses_learning_phase'):
uses_learning_phase = (output._uses_learning_phase or
uses_learning_phase)
return output, []
_, outputs, _ = K.rnn(
step,
inputs,
initial_states=[],
unroll=False)
y = outputs
else:
# No batch size specified, therefore the layer will be able
# to process batches of any size.
# We can go with reshape-based implementation for performance.
input_length = input_shape[1]
if not input_length:
input_length = K.shape(inputs)[1]
# Shape: (num_samples * timesteps, ...). And track the
# transformation in self._input_map.
input_uid = tf_base_layers._object_list_uid(inputs)
inputs = K.reshape(inputs, (-1,) + input_shape[2:])
self._input_map[input_uid] = inputs
# (num_samples * timesteps, ...)
y = self.layer.call(inputs, **kwargs)
if hasattr(y, '_uses_learning_phase'):
uses_learning_phase = y._uses_learning_phase
# Shape: (num_samples, timesteps, ...)
output_shape = self._compute_output_shape(input_shape).as_list()
y = K.reshape(y, (-1, input_length) + tuple(output_shape[2:]))
# Apply activity regularizer if any:
if (hasattr(self.layer, 'activity_regularizer') and
self.layer.activity_regularizer is not None):
regularization_loss = self.layer.activity_regularizer(y)
self.add_loss(regularization_loss, inputs)
if uses_learning_phase:
y._uses_learning_phase = True
return y
class Bidirectional(Wrapper):
"""Bidirectional wrapper for RNNs.
Arguments:
layer: `Recurrent` instance.
merge_mode: Mode by which outputs of the
forward and backward RNNs will be combined.
One of {'sum', 'mul', 'concat', 'ave', None}.
If None, the outputs will not be combined,
they will be returned as a list.
Raises:
ValueError: In case of invalid `merge_mode` argument.
Examples:
```python
model = Sequential()
model.add(Bidirectional(LSTM(10, return_sequences=True), input_shape=(5,
10)))
model.add(Bidirectional(LSTM(10)))
model.add(Dense(5))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
```
"""
def __init__(self, layer, merge_mode='concat', weights=None, **kwargs):
super(Bidirectional, self).__init__(layer, **kwargs)
if merge_mode not in ['sum', 'mul', 'ave', 'concat', None]:
raise ValueError('Invalid merge mode. '
'Merge mode should be one of '
'{"sum", "mul", "ave", "concat", None}')
self.forward_layer = copy.copy(layer)
config = layer.get_config()
config['go_backwards'] = not config['go_backwards']
self.backward_layer = layer.__class__.from_config(config)
self.forward_layer.name = 'forward_' + self.forward_layer.name
self.backward_layer.name = 'backward_' + self.backward_layer.name
self.merge_mode = merge_mode
if weights:
nw = len(weights)
self.forward_layer.initial_weights = weights[:nw // 2]
self.backward_layer.initial_weights = weights[nw // 2:]
self.stateful = layer.stateful
self.return_sequences = layer.return_sequences
self.supports_masking = True
def get_weights(self):
return self.forward_layer.get_weights() + self.backward_layer.get_weights()
def set_weights(self, weights):
nw = len(weights)
self.forward_layer.set_weights(weights[:nw // 2])
self.backward_layer.set_weights(weights[nw // 2:])
def _compute_output_shape(self, input_shape):
input_shape = tuple(tensor_shape.TensorShape(input_shape).as_list())
if self.merge_mode in ['sum', 'ave', 'mul']:
return self.forward_layer._compute_output_shape(input_shape) # pylint: disable=protected-access
elif self.merge_mode == 'concat':
shape = self.forward_layer._compute_output_shape(input_shape).as_list() # pylint: disable=protected-access
shape[-1] *= 2
return tensor_shape.TensorShape(shape)
elif self.merge_mode is None:
shape = self.forward_layer._compute_output_shape(input_shape) # pylint: disable=protected-access
return [shape, copy.copy(shape)]
def call(self, inputs, training=None, mask=None):
kwargs = {}
if has_arg(self.layer.call, 'training'):
kwargs['training'] = training
if has_arg(self.layer.call, 'mask'):
kwargs['mask'] = mask
y = self.forward_layer.call(inputs, **kwargs)
y_rev = self.backward_layer.call(inputs, **kwargs)
if self.return_sequences:
y_rev = K.reverse(y_rev, 1)
if self.merge_mode == 'concat':
output = K.concatenate([y, y_rev])
elif self.merge_mode == 'sum':
output = y + y_rev
elif self.merge_mode == 'ave':
output = (y + y_rev) / 2
elif self.merge_mode == 'mul':
output = y * y_rev
elif self.merge_mode is None:
output = [y, y_rev]
# Properly set learning phase
if 0 < self.layer.dropout + self.layer.recurrent_dropout:
if self.merge_mode is None:
for out in output:
out._uses_learning_phase = True
else:
output._uses_learning_phase = True
return output
def reset_states(self):
self.forward_layer.reset_states()
self.backward_layer.reset_states()
def build(self, input_shape):
with K.name_scope(self.forward_layer.name):
self.forward_layer.build(input_shape)
with K.name_scope(self.backward_layer.name):
self.backward_layer.build(input_shape)
self.built = True
def compute_mask(self, inputs, mask):
if self.return_sequences:
if not self.merge_mode:
return [mask, mask]
else:
return mask
else:
return None
@property
def trainable_weights(self):
if hasattr(self.forward_layer, 'trainable_weights'):
return (self.forward_layer.trainable_weights +
self.backward_layer.trainable_weights)
return []
@property
def non_trainable_weights(self):
if hasattr(self.forward_layer, 'non_trainable_weights'):
return (self.forward_layer.non_trainable_weights +
self.backward_layer.non_trainable_weights)
return []
@property
def updates(self):
if hasattr(self.forward_layer, 'updates'):
return self.forward_layer.updates + self.backward_layer.updates
return []
@property
def losses(self):
if hasattr(self.forward_layer, 'losses'):
return self.forward_layer.losses + self.backward_layer.losses
return []
@property
def constraints(self):
constraints = {}
if hasattr(self.forward_layer, 'constraints'):
constraints.update(self.forward_layer.constraints)
constraints.update(self.backward_layer.constraints)
return constraints
def get_config(self):
config = {'merge_mode': self.merge_mode}
base_config = super(Bidirectional, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| pavelchristof/gomoku-ai | tensorflow/contrib/keras/python/keras/layers/wrappers.py | Python | apache-2.0 | 14,145 |
from datetime import datetime
from django.contrib.auth.models import User
from django.db import models
from django.utils.translation import ugettext_lazy as _
from djblets.util.db import ConcurrencyManager
from reviewboard.reviews.models import Group, ReviewRequest
class ReviewRequestVisit(models.Model):
"""
A recording of the last time a review request was visited by a user.
Users have one ReviewRequestVisit entry in the database per review
request they've visited. This is used to keep track of any updates
to review requests they've already seen, so that we can intelligently
inform them that new discussions have taken place.
"""
user = models.ForeignKey(User, related_name="review_request_visits")
review_request = models.ForeignKey(ReviewRequest, related_name="visits")
timestamp = models.DateTimeField(_('last visited'), default=datetime.now)
# Set this up with a ConcurrencyManager to help prevent race conditions.
objects = ConcurrencyManager()
class Meta:
unique_together = ("user", "review_request")
class Profile(models.Model):
"""User profile. Contains some basic configurable settings"""
user = models.ForeignKey(User, unique=True)
# This will redirect new users to the account settings page the first time
# they log in (or immediately after creating an account). This allows
# people to fix their real name and join groups.
first_time_setup_done = models.BooleanField(default=False,
verbose_name=_("first time setup done"),
help_text=_("Indicates whether the user has already gone through "
"the first time setup process by saving their user "
"preferences."))
collapsed_diffs = models.BooleanField(default=True,
verbose_name=_("collapsed diffs"),
help_text=_("Indicates whether diffs should be shown in their "
"collapsed state by default."))
wordwrapped_diffs = models.BooleanField(default=True,
help_text=_("This field is unused and will be removed in a future "
"version."))
syntax_highlighting = models.BooleanField(default=True,
verbose_name=_("syntax highlighting"),
help_text=_("Indicates whether the user wishes to see "
"syntax highlighting in the diffs."))
# Indicate whether submitted review requests should appear in the
# review request lists (excluding the dashboard).
show_submitted = models.BooleanField(default=True)
sort_review_request_columns = models.CharField(max_length=256, blank=True)
sort_dashboard_columns = models.CharField(max_length=256, blank=True)
sort_submitter_columns = models.CharField(max_length=256, blank=True)
sort_group_columns = models.CharField(max_length=256, blank=True)
review_request_columns = models.CharField(max_length=256, blank=True)
dashboard_columns = models.CharField(max_length=256, blank=True)
submitter_columns = models.CharField(max_length=256, blank=True)
group_columns = models.CharField(max_length=256, blank=True)
# A list of starred review requests. This allows users to monitor a
# review request and receive e-mails on updates without actually being
# on the reviewer list or commenting on the review. This is similar to
# adding yourself to a CC list.
starred_review_requests = models.ManyToManyField(ReviewRequest, blank=True,
related_name="starred_by")
# A list of watched groups. This is so that users can monitor groups
# without actually joining them, preventing e-mails being sent to the
# user and review requests from entering the Incoming Reviews list.
starred_groups = models.ManyToManyField(Group, blank=True,
related_name="starred_by")
def __unicode__(self):
return self.user.username
| asutherland/opc-reviewboard | reviewboard/accounts/models.py | Python | mit | 3,945 |
import argparse
from pokersim.Table import Table
from pokersim.Player import Player
from pokersim.Recorder import Recorder
rec = Recorder()
parser = argparse.ArgumentParser(description='Set up a poker game')
#parser.add_argument('-n', '--numplayers', type=int, nargs='?', default=10, help='Number of players')
parser.add_argument('-d', '--numhands', type=int, nargs='?', default=1, help='Number of hands')
#parser.add_argument('-c', '--chips', action='append', type=int, nargs='+', help='Number of chips')
parser.add_argument('-p', '--players', action='append', nargs=2, metavar=('number_of_chips', 'brain_name'), help='Player info: chips brain_name')
def main():
args = vars(parser.parse_args())
print args
# Get from command line:
# Type of players
# Chips for players
numplayers = len(args['players'])
if numplayers < 3:
print 'At least 3 players are needed. You selected', numplayers
return
#if args['chips'] is not None and len(args['chips']) > numplayers:
#numplayers = len(args['chips'])
table = Table()
for i in xrange(numplayers):
player = Player(int(args['players'][i][0]), args['players'][i][1])
player.sit(table, i)
for i in xrange(args['numhands']):
table.deal()
print 'After', args['numhands'], 'hands:'
for player in table.players.values():
print 'Player', player.position, 'has', player.chips, 'chips'
print 'The Table has', table.box, 'chips'
| adamlincoln/pokersim | src/pokersim/__init__.py | Python | gpl-3.0 | 1,480 |
'''
A logistic regression learning algorithm example using TensorFlow library.
This example is using the MNIST database of handwritten digits
(http://yann.lecun.com/exdb/mnist/)
Author: Aymeric Damien
Project: https://github.com/aymericdamien/TensorFlow-Examples/
'''
from __future__ import print_function
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
# Parameters
learning_rate = 0.01
training_epochs = 25
batch_size = 100
display_step = 1
def load_data():
# Import MNIST data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
return mnist
def inputs_placeholder():
# tf Graph Input
x = tf.placeholder(tf.float32, [None, 784]) # mnist data image of shape 28*28=784
y = tf.placeholder(tf.float32, [None, 10]) # 0-9 digits recognition => 10 classes
return [x, y]
def model(x, y):
# Set model weights
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
# Construct model
pred = tf.nn.softmax(tf.matmul(x, W) + b) # Softmax
# Minimize error using cross entropy
cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred), axis=1))
# Gradient Descent
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
# Test model
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
# Calculate accuracy
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
return [cost, optimizer, accuracy]
def training(sess, mnist, x, y, cost, optimizer, accuracy):
'''
data: mnist
graph: x, y, cost, optimizer, accuracy
'''
# Training cycle
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(mnist.train.num_examples/batch_size)
# Loop over all batches
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# Run optimization op (backprop) and cost op (to get loss value)
_, c = sess.run([optimizer, cost], feed_dict={x: batch_xs,
y: batch_ys})
# Compute average loss
avg_cost += c / total_batch
# Display logs per epoch step
if (epoch+1) % display_step == 0:
print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost))
print("Optimization Finished!")
print("Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))
if __name__ == '__main__':
''' '''
# load data
mnist = load_data()
# define inputs
[x, y] = inputs_placeholder()
# model
[cost, optimizer, accuracy] = model(x, y)
# Launch the graph
with tf.Session() as sess:
# Initializing the variables
init = tf.global_variables_initializer()
sess.run(init)
# training
training(sess, mnist, x, y, cost, optimizer, accuracy)
| trhongbinwang/data_science_journey | deep_learning/tensorflow/tutorials/tutorial3/examples/2_BasicModels/logistic_regression.py | Python | apache-2.0 | 3,004 |
"""
Message Queue wrapper
"""
__RCSID__ = "$Id$"
from DIRAC.FrameworkSystem.private.standardLogging.Handler.MessageQueueHandler import MessageQueueHandler
from DIRAC.Resources.LogBackends.AbstractBackend import AbstractBackend
from DIRAC.FrameworkSystem.private.standardLogging.Formatter.JsonFormatter import JsonFormatter
class MessageQueueBackend(AbstractBackend):
"""
MessageQueueBackend is used to create an abstraction of the handler and the formatter concepts from logging.
Here, we have:
- MessageQueueHandler: which is a custom handler created in DIRAC to send
log records to a Message Queue server. You can find it in: FrameworkSys./private/standardlogging/Handler
- BaseFormatter: is a custom Formatter object, created for DIRAC in order to get the appropriate display.
You can find it in FrameworkSystem/private/standardLogging/Formatter
"""
def __init__(self):
"""
Initialization of the MessageQueueBackend
"""
super(MessageQueueBackend, self).__init__(None, JsonFormatter)
self.__queue = ''
def createHandler(self, parameters=None):
"""
Each backend can initialize its attributes and create its handler with them.
:params parameters: dictionary of parameters. ex: {'FileName': file.log}
"""
if parameters is not None:
self.__queue = parameters.get("MsgQueue", self.__queue)
self._handler = MessageQueueHandler(self.__queue)
def setLevel(self, level):
"""
No possibility to set the level of the MessageQueue handler.
It is not set by default so it can send all Log Records of all levels to the MessageQueue.
"""
pass
| arrabito/DIRAC | Resources/LogBackends/MessageQueueBackend.py | Python | gpl-3.0 | 1,639 |
# Copyright (c) MetaCommunications, Inc. 2003-2007
#
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import xml.sax.saxutils
import zipfile
import ftplib
import time
import stat
import xml.dom.minidom
import xmlrpclib
import httplib
import os.path
import string
import sys
import re
import urlparse
def process_xml_file( input_file, output_file ):
utils.log( 'Processing test log "%s"' % input_file )
f = open( input_file, 'r' )
xml = f.readlines()
f.close()
for i in range( 0, len(xml)):
xml[i] = string.translate( xml[i], utils.char_translation_table )
output_file.writelines( xml )
def process_test_log_files( output_file, dir, names ):
for file in names:
if os.path.basename( file ) == 'test_log.xml':
process_xml_file( os.path.join( dir, file ), output_file )
def collect_test_logs( input_dirs, test_results_writer ):
__log__ = 1
utils.log( 'Collecting test logs ...' )
for input_dir in input_dirs:
utils.log( 'Walking directory "%s" ...' % input_dir )
os.path.walk( input_dir, process_test_log_files, test_results_writer )
dart_status_from_result = {
'succeed': 'passed',
'fail': 'failed',
'note': 'passed',
'': 'notrun'
}
dart_project = {
'trunk': 'Boost_HEAD',
'': 'Boost_HEAD'
}
dart_track = {
'full': 'Nightly',
'incremental': 'Continuous',
'': 'Experimental'
}
ascii_only_table = ""
for i in range(0,256):
if chr(i) == '\n' or chr(i) == '\r':
ascii_only_table += chr(i)
elif i < 32 or i >= 0x80:
ascii_only_table += '?'
else:
ascii_only_table += chr(i)
class xmlrpcProxyTransport(xmlrpclib.Transport):
def __init__(self, proxy):
self.proxy = proxy
def make_connection(self, host):
self.realhost = host
return httplib.HTTP(self.proxy)
def send_request(self, connection, handler, request_body):
connection.putrequest('POST','http://%s%s' % (self.realhost,handler))
def send_host(self, connection, host):
connection.putheader('Host',self.realhost)
def publish_test_logs(
input_dirs,
runner_id, tag, platform, comment_file, timestamp, user, source, run_type,
dart_server = None,
http_proxy = None,
**unused
):
__log__ = 1
utils.log( 'Publishing test logs ...' )
dart_rpc = None
dart_dom = {}
def _publish_test_log_files_ ( unused, dir, names ):
for file in names:
if os.path.basename( file ) == 'test_log.xml':
utils.log( 'Publishing test log "%s"' % os.path.join(dir,file) )
if dart_server:
log_xml = open(os.path.join(dir,file)).read().translate(ascii_only_table)
#~ utils.log( '--- XML:\n%s' % log_xml)
#~ It seems possible to get an empty XML result file :-(
if log_xml == "": continue
log_dom = xml.dom.minidom.parseString(log_xml)
test = {
'library': log_dom.documentElement.getAttribute('library'),
'test-name': log_dom.documentElement.getAttribute('test-name'),
'toolset': log_dom.documentElement.getAttribute('toolset')
}
if not test['test-name'] or test['test-name'] == '':
test['test-name'] = 'unknown'
if not test['toolset'] or test['toolset'] == '':
test['toolset'] = 'unknown'
if not dart_dom.has_key(test['toolset']):
dart_dom[test['toolset']] = xml.dom.minidom.parseString(
'''<?xml version="1.0" encoding="UTF-8"?>
<DartSubmission version="2.0" createdby="collect_and_upload_logs.py">
<Site>%(site)s</Site>
<BuildName>%(buildname)s</BuildName>
<Track>%(track)s</Track>
<DateTimeStamp>%(datetimestamp)s</DateTimeStamp>
</DartSubmission>
''' % {
'site': runner_id,
'buildname': "%s -- %s (%s)" % (platform,test['toolset'],run_type),
'track': dart_track[run_type],
'datetimestamp' : timestamp
} )
submission_dom = dart_dom[test['toolset']]
for node in log_dom.documentElement.childNodes:
if node.nodeType == xml.dom.Node.ELEMENT_NODE:
if node.firstChild:
log_data = xml.sax.saxutils.escape(node.firstChild.data)
else:
log_data = ''
test_dom = xml.dom.minidom.parseString('''<?xml version="1.0" encoding="UTF-8"?>
<Test>
<Name>.Test.Boost.%(tag)s.%(library)s.%(test-name)s.%(type)s</Name>
<Status>%(result)s</Status>
<Measurement name="Toolset" type="text/string">%(toolset)s</Measurement>
<Measurement name="Timestamp" type="text/string">%(timestamp)s</Measurement>
<Measurement name="Log" type="text/text">%(log)s</Measurement>
</Test>
''' % {
'tag': tag,
'library': test['library'],
'test-name': test['test-name'],
'toolset': test['toolset'],
'type': node.nodeName,
'result': dart_status_from_result[node.getAttribute('result')],
'timestamp': node.getAttribute('timestamp'),
'log': log_data
})
submission_dom.documentElement.appendChild(
test_dom.documentElement.cloneNode(1) )
for input_dir in input_dirs:
utils.log( 'Walking directory "%s" ...' % input_dir )
os.path.walk( input_dir, _publish_test_log_files_, None )
if dart_server:
try:
rpc_transport = None
if http_proxy:
rpc_transport = xmlrpcProxyTransport(http_proxy)
dart_rpc = xmlrpclib.ServerProxy(
'http://%s/%s/Command/' % (dart_server,dart_project[tag]),
rpc_transport )
for dom in dart_dom.values():
#~ utils.log('Dart XML: %s' % dom.toxml('utf-8'))
dart_rpc.Submit.put(xmlrpclib.Binary(dom.toxml('utf-8')))
except Exception, e:
utils.log('Dart server error: %s' % e)
def upload_to_ftp( tag, results_file, ftp_proxy, debug_level, ftp_url ):
if not ftp_url:
ftp_host = 'boost.cowic.de'
ftp_url = ''.join(['ftp','://anonymous','@',ftp_host,'/boost/do-not-publish-this-url/results/'])
utils.log( 'Uploading log archive "%s" to %s/%s' % ( results_file, ftp_url, tag ) )
ftp_parts = urlparse.urlparse(ftp_url)
ftp_netloc = re.split('[@]',ftp_parts[1])
ftp_user = re.split('[:]',ftp_netloc[0])[0]
ftp_password = re.split('[:]',ftp_netloc[0]+':anonymous')[1]
ftp_site = re.split('[:]',ftp_netloc[1])[0]
ftp_path = ftp_parts[2]
if not ftp_proxy:
ftp = ftplib.FTP( ftp_site )
ftp.set_debuglevel( debug_level )
ftp.login( ftp_user, ftp_password )
else:
utils.log( ' Connecting through FTP proxy server "%s"' % ftp_proxy )
ftp = ftplib.FTP( ftp_proxy )
ftp.set_debuglevel( debug_level )
ftp.set_pasv (0) # turn off PASV mode
ftp.login( '%s@%s' % (ftp_user,ftp_site), ftp_password )
ftp.cwd( ftp_path )
try:
ftp.cwd( tag )
except ftplib.error_perm:
for dir in tag.split( '/' ):
ftp.mkd( dir )
ftp.cwd( dir )
f = open( results_file, 'rb' )
ftp.storbinary( 'STOR %s' % os.path.basename( results_file ), f )
ftp.quit()
def copy_comments( results_xml, comment_file ):
results_xml.startElement( 'comment', {} )
if os.path.exists( comment_file ):
utils.log( 'Reading comments file "%s"...' % comment_file )
f = open( comment_file, 'r' )
try:
results_xml.characters( f.read() )
finally:
f.close()
else:
utils.log( 'Warning: comment file "%s" is not found.' % comment_file )
results_xml.endElement( 'comment' )
def compress_file( file_path, archive_path ):
utils.log( 'Compressing "%s"...' % file_path )
try:
z = zipfile.ZipFile( archive_path, 'w', zipfile.ZIP_DEFLATED )
z.write( file_path, os.path.basename( file_path ) )
z.close()
utils.log( 'Done writing "%s".'% archive_path )
except Exception, msg:
utils.log( 'Warning: Compressing falied (%s)' % msg )
utils.log( ' Trying to compress using a platform-specific tool...' )
try: import zip_cmd
except ImportError:
script_dir = os.path.dirname( os.path.abspath( sys.argv[0] ) )
utils.log( 'Could not find \'zip_cmd\' module in the script directory (%s).' % script_dir )
raise Exception( 'Compressing failed!' )
else:
if os.path.exists( archive_path ):
os.unlink( archive_path )
utils.log( 'Removing stale "%s".' % archive_path )
zip_cmd.main( file_path, archive_path )
utils.log( 'Done compressing "%s".' % archive_path )
def read_timestamp( file ):
if not os.path.exists( file ):
result = time.gmtime()
utils.log( 'Warning: timestamp file "%s" does not exist'% file )
utils.log( 'Using current UTC time (%s)' % result )
return result
return time.gmtime( os.stat( file ).st_mtime )
def collect_logs(
results_dir
, runner_id
, tag
, platform
, comment_file
, timestamp_file
, user
, source
, run_type
, dart_server = None
, http_proxy = None
, revision = ''
, **unused
):
timestamp = time.strftime( '%Y-%m-%dT%H:%M:%SZ', read_timestamp( timestamp_file ) )
if dart_server:
publish_test_logs( [ results_dir ],
runner_id, tag, platform, comment_file, timestamp, user, source, run_type,
dart_server = dart_server,
http_proxy = http_proxy )
results_file = os.path.join( results_dir, '%s.xml' % runner_id )
results_writer = open( results_file, 'w' )
utils.log( 'Collecting test logs into "%s"...' % results_file )
results_xml = xml.sax.saxutils.XMLGenerator( results_writer )
results_xml.startDocument()
results_xml.startElement(
'test-run'
, {
'tag': tag
, 'platform': platform
, 'runner': runner_id
, 'timestamp': timestamp
, 'source': source
, 'run-type': run_type
, 'revision': revision
}
)
copy_comments( results_xml, comment_file )
collect_test_logs( [ results_dir ], results_writer )
results_xml.endElement( "test-run" )
results_xml.endDocument()
results_writer.close()
utils.log( 'Done writing "%s".' % results_file )
compress_file(
results_file
, os.path.join( results_dir,'%s.zip' % runner_id )
)
def upload_logs(
results_dir
, runner_id
, tag
, user
, ftp_proxy
, debug_level
, send_bjam_log = False
, timestamp_file = None
, dart_server = None
, ftp_url = None
, **unused
):
logs_archive = os.path.join( results_dir, '%s.zip' % runner_id )
upload_to_ftp( tag, logs_archive, ftp_proxy, debug_level, ftp_url )
if send_bjam_log:
bjam_log_path = os.path.join( results_dir, 'bjam.log' )
if not timestamp_file:
timestamp_file = bjam_log_path
timestamp = time.strftime( '%Y-%m-%d-%H-%M-%S', read_timestamp( timestamp_file ) )
logs_archive = os.path.join( results_dir, '%s.%s.log.zip' % ( runner_id, timestamp ) )
compress_file( bjam_log_path, logs_archive )
upload_to_ftp( '%s/logs' % tag, logs_archive, ftp_proxy, debug_level, ftp_url )
def collect_and_upload_logs(
results_dir
, runner_id
, tag
, platform
, comment_file
, timestamp_file
, user
, source
, run_type
, revision = None
, ftp_proxy = None
, debug_level = 0
, send_bjam_log = False
, dart_server = None
, http_proxy = None
, ftp_url = None
, **unused
):
collect_logs(
results_dir
, runner_id
, tag
, platform
, comment_file
, timestamp_file
, user
, source
, run_type
, revision = revision
, dart_server = dart_server
, http_proxy = http_proxy
)
upload_logs(
results_dir
, runner_id
, tag
, user
, ftp_proxy
, debug_level
, send_bjam_log
, timestamp_file
, dart_server = dart_server
, ftp_url = ftp_url
)
def accept_args( args ):
args_spec = [
'locate-root='
, 'runner='
, 'tag='
, 'platform='
, 'comment='
, 'timestamp='
, 'source='
, 'run-type='
, 'user='
, 'ftp-proxy='
, 'proxy='
, 'debug-level='
, 'send-bjam-log'
, 'help'
, 'dart-server='
, 'revision='
, 'ftp='
]
options = {
'--tag' : 'trunk'
, '--platform' : sys.platform
, '--comment' : 'comment.html'
, '--timestamp' : 'timestamp'
, '--user' : None
, '--source' : 'SVN'
, '--run-type' : 'full'
, '--ftp-proxy' : None
, '--proxy' : None
, '--debug-level' : 0
, '--dart-server' : 'beta.boost.org:8081'
, '--revision' : None
, '--ftp' : None
}
utils.accept_args( args_spec, args, options, usage )
return {
'results_dir' : options[ '--locate-root' ]
, 'runner_id' : options[ '--runner' ]
, 'tag' : options[ '--tag' ]
, 'platform' : options[ '--platform']
, 'comment_file' : options[ '--comment' ]
, 'timestamp_file' : options[ '--timestamp' ]
, 'user' : options[ '--user' ]
, 'source' : options[ '--source' ]
, 'run_type' : options[ '--run-type' ]
, 'ftp_proxy' : options[ '--ftp-proxy' ]
, 'http_proxy' : options[ '--proxy' ]
, 'debug_level' : int(options[ '--debug-level' ])
, 'send_bjam_log' : options.has_key( '--send-bjam-log' )
, 'dart_server' : options[ '--dart-server' ]
, 'revision' : options[ '--revision' ]
, 'ftp' : options[ '--ftp' ]
}
commands = {
'collect-and-upload' : collect_and_upload_logs
, 'collect-logs' : collect_logs
, 'upload-logs' : upload_logs
}
def usage():
print 'Usage: %s [command] [options]' % os.path.basename( sys.argv[0] )
print '''
Commands:
\t%s
Options:
\t--locate-root directory to to scan for "test_log.xml" files
\t--runner runner ID (e.g. "Metacomm")
\t--timestamp path to a file which modification time will be used
\t as a timestamp of the run ("timestamp" by default)
\t--comment an HTML comment file to be inserted in the reports
\t ("comment.html" by default)
\t--tag the tag for the results ("trunk" by default)
\t--user SourceForge user name for a shell account (optional)
\t--source where Boost sources came from ("SVN" or "tarball";
\t "SVN" by default)
\t--run-type "incremental" or "full" ("full" by default)
\t--send-bjam-log in addition to regular XML results, send in full bjam
\t log of the regression run
\t--proxy HTTP proxy server address and port (e.g.
\t 'http://www.someproxy.com:3128', optional)
\t--ftp-proxy FTP proxy server (e.g. 'ftpproxy', optional)
\t--debug-level debugging level; controls the amount of debugging
\t output printed; 0 by default (no debug output)
\t--dart-server The dart server to send results to.
\t--ftp The ftp URL to upload results to.
''' % '\n\t'.join( commands.keys() )
def main():
if len(sys.argv) > 1 and sys.argv[1] in commands:
command = sys.argv[1]
args = sys.argv[ 2: ]
else:
command = 'collect-and-upload'
args = sys.argv[ 1: ]
commands[ command ]( **accept_args( args ) )
if __name__ != '__main__': import utils
else:
# in absense of relative import...
xsl_path = os.path.abspath( os.path.dirname( sys.argv[ 0 ] ) )
while os.path.basename( xsl_path ) != 'xsl_reports': xsl_path = os.path.dirname( xsl_path )
sys.path.append( xsl_path )
import utils
main()
| scs/uclinux | lib/boost/boost_1_38_0/tools/regression/src/collect_and_upload_logs.py | Python | gpl-2.0 | 17,570 |
from gensim import models
with open("D:\\Kuangyichen\\PythonRepository\\MedicineSCI\\d2017.bin", "r") as readMH:
w =open('C:\\Users\\xmu\\Desktop\\MH.txt','w')
MH_List = []
cor1 = set()
for line in readMH:
term = str(line).strip().split(" = ")
if term[0] == "MH":
cor1.add(term[1])
MH_List.append(term[1])
w.write(term[1]+'\n')
readMH.close()
print(MH_List)
print("------------------------------------------------")
#---------------------------------------------------------------
lda1 = models.LdaModel.load('D:/Kuangyichen/PythonRepository/MedicineTool/temp/_2004-2005lda_moedel')
show = lda1.show_topics(num_words=400,formatted=False)
dict_show = dict(show)
#dict_show[i]
list = []
cor2 = set()
for i in dict(dict_show[1]).iterkeys():
list.append(str(i).split(' [')[0])
cor2.add(str(i).split(' [')[0])
print i
#----------------------------------------------------------------
# path = 'D:\\Kuangyichen\\PythonRepository\\MedicineTool\\item_list.txt'
# path = "D:\\Kuangyichen\\JavaRepository\\LDAGibbsSampling-master\\data\\LdaResults\\termList.txt"
# with open(path,"r") as readWord:
# Word_List = []
# cor2 = set()
# for line in readWord:
# term = str(line).split()[0]
# cor2.add(term)
# Word_List.append(term)
# print(Word_List)
# print("------------------------------------------------")
cor3 = cor1 & cor2
print(cor3)
with open("C:\\Users\\xmu\\Desktop\\test2.txt","w") as write:
for word in cor3:
write.write(word+"\n")
| EachenKuang/PythonRepository | MedicineSCI/Tools/getMH.py | Python | apache-2.0 | 1,560 |
#!/usr/bin/env python
from road import Road
import time
# impacts default behavior for most states
SPEED_LIMIT = 10
# all traffic in lane (besides ego) follow these speeds
LANE_SPEEDS = [6,7,8,9]
# LANE_SPEEDS = [5,6,7,8]
# Number of available "cells" which should have traffic
TRAFFIC_DENSITY = 0.15
# At each timestep, ego can set acceleration to value between
# -MAX_ACCEL and MAX_ACCEL
MAX_ACCEL = 2
# s value and lane number of goal.
GOAL = (300, 0)
# These affect the visualization
FRAMES_PER_SECOND = 4
AMOUNT_OF_ROAD_VISIBLE = 40
def main():
road = Road(SPEED_LIMIT, TRAFFIC_DENSITY, LANE_SPEEDS)
road.update_width = AMOUNT_OF_ROAD_VISIBLE
road.populate_traffic()
ego_config = config = {
'speed_limit' : SPEED_LIMIT,
'num_lanes' : len(LANE_SPEEDS),
'goal' : GOAL,
'max_acceleration': MAX_ACCEL
}
road.add_ego(2,0, ego_config)
timestep = 0
while road.get_ego().s <= GOAL[0]:
timestep += 1
if timestep > 150:
print ("Taking too long to reach goal. Go faster!")
break
road.advance()
print (road)
time.sleep(float(1.0) / FRAMES_PER_SECOND)
ego = road.get_ego()
if ego.lane == GOAL[1]:
print ("You got to the goal in {} seconds!".format(timestep))
else:
print ("You missed the goal. You are in lane {} instead of {}.".format(ego.lane, GOAL[1]))
if __name__ == "__main__":
main()
| mlandry1/CarND | Labs/Term3/Lesson 4 - 16/python3/CarND - Behavior Planner/simulate_behavior.py | Python | mit | 1,446 |
#!/usr/bin/env python3
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
gi.require_version('AppIndicator3', '0.1')
from gi.repository import AppIndicator3 as AppIndicator
import signal
## Gtk
## https://lazka.github.io/pgi-docs/index.html#Gtk-3.0
## https://lazka.github.io/pgi-docs/index.html#Gtk-3.0/classes.html
## AppIndicator3
## https://lazka.github.io/pgi-docs/index.html#AppIndicator3-0.1/
## https://lazka.github.io/pgi-docs/index.html#AppIndicator3-0.1/classes.html
class App:
app_name = 'DemoApp'
icon_name_on_app_activate = 'folder'
icon_name_on_app_deactivate = 'view-restore'
icon_name_btn_app_quit = 'application-exit'
def __init__(self):
self.init_signal()
self.init_menu()
def init_signal (self):
## https://docs.python.org/3/library/signal.html
## https://docs.python.org/2/library/signal.html
signal.signal(signal.SIGINT, signal.SIG_DFL)
def init_menu(self):
# https://lazka.github.io/pgi-docs/index.html#Gtk-3.0/classes/Menu.html
# https://lazka.github.io/pgi-docs/index.html#Gtk-3.0/classes/Menu.html#Gtk.Menu.new
self.menu = menu = Gtk.Menu()
# https://lazka.github.io/pgi-docs/index.html#Gtk-3.0/classes/MenuItem.html
# https://lazka.github.io/pgi-docs/index.html#Gtk-3.0/classes/MenuItem.html#Gtk.MenuItem.new
item = Gtk.MenuItem.new_with_label('about')
item.connect('activate', self.do_show_about)
menu.append(item)
#http://www.pygtk.org/pygtk2reference/gtk-stock-items.html
#https://lazka.github.io/pgi-docs/Gtk-3.0/classes/Image.html#Gtk.Image.new_from_icon_name
img = Gtk.Image.new_from_icon_name(self.icon_name_btn_app_quit, 16)
## item = Gtk.MenuItem.new_with_label('quit')
## https://lazka.github.io/pgi-docs/Gtk-3.0/classes/ImageMenuItem.html#Gtk.ImageMenuItem.new_with_label
item = Gtk.ImageMenuItem.new_with_label('quit')
#item.connect('activate', Gtk.main_quit)
item.connect('activate', self.on_quit_app)
item.set_image(img)
menu.append(item)
#print(dir(Gtk))
#print(Gtk.STOCK_QUIT)
## item = Gtk.MenuItem.new_with_label('quit')
## http://www.pygtk.org/pygtk2reference/gtk-stock-items.html
## https://lazka.github.io/pgi-docs/index.html#Gtk-3.0/constants.html#Gtk.STOCK_QUIT
## The “Quit” item and icon.
## Deprecated since version 3.10: Use named icon “application-exit” or the label “_Quit”.
## item = Gtk.ImageMenuItem.new_from_stock(Gtk.STOCK_QUIT)
item = Gtk.ImageMenuItem.new_from_stock(Gtk.STOCK_QUIT)
#item.connect('activate', Gtk.main_quit)
item.connect('activate', self.on_quit_app)
menu.append(item)
menu.show_all()
# https://lazka.github.io/pgi-docs/index.html#AppIndicator3-0.1/classes/Indicator.html
# https://lazka.github.io/pgi-docs/index.html#AppIndicator3-0.1/classes/Indicator.html#AppIndicator3.Indicator.new
# new (id, icon_name, category)
self.indicator = indicator = AppIndicator.Indicator.new(
self.app_name,
self.icon_name_on_app_activate,
AppIndicator.IndicatorCategory.APPLICATION_STATUS
)
# https://lazka.github.io/pgi-docs/index.html#AppIndicator3-0.1/classes/Indicator.html#AppIndicator3.Indicator.set_menu
indicator.set_menu(menu)
# https://lazka.github.io/pgi-docs/index.html#AppIndicator3-0.1/classes/Indicator.html#AppIndicator3.Indicator.set_status
indicator.set_status(AppIndicator.IndicatorStatus.ACTIVE)
#type(indicator)
#<class 'gi.repository.AppIndicator3.Indicator'>
def do_show_about(self, menu_item):
print('')
print('do_show_about:')
print(' menu_item:', menu_item)
def on_quit_app(self, menu_item):
print('')
print('on_quit_app:')
print(' menu_item:', menu_item)
Gtk.main_quit()
def on_activate_app(self, menu_item):
print('')
print('on_activate_app:')
print(' menu_item:', menu_item)
self.do_show_app()
self.do_switch_on_activate_icon()
def do_show_app(self):
# https://lazka.github.io/pgi-docs/index.html#Gtk-3.0/classes/Widget.html#Gtk.Widget.show
# self.win.show();
# https://lazka.github.io/pgi-docs/index.html#Gtk-3.0/classes/Window.html#Gtk.Window.present
self.win.present()
def do_hide_app(self):
self.win.hide()
def do_switch_on_deactivate_icon(self):
# https://specifications.freedesktop.org/icon-naming-spec/icon-naming-spec-latest.html
self.indicator.set_icon(self.icon_name_on_app_deactivate)
def do_switch_on_activate_icon(self):
# https://specifications.freedesktop.org/icon-naming-spec/icon-naming-spec-latest.html
self.indicator.set_icon(self.icon_name_on_app_activate)
def run(self):
Gtk.main()
def main():
app = App()
app.run()
if __name__ == '__main__':
main()
| foreachsam/book-lang-python | example/subject/gtk/appindicator/composite/demo-daemon/main.py | Python | mit | 4,595 |
import pymorphy2
def mrph(lemmas):
"""
Guesses lemmas for unknown words, using pymorphy analyzer, returns list of lemmas.
"""
morph = pymorphy2.MorphAnalyzer()
lemma = []
for elem in lemmas:
tag_token = morph.parse(elem)[0].normal_form
lem = [tag_token]
lemma.extend(lem)
return lemma
| azilya/Zaliznyak-s-grammatical-dictionary | gdictionary/morph.py | Python | lgpl-3.0 | 350 |
# FusionPBX Plugin for the Media Server api
import psycopg2
import psycopg2.extras
import json
import uuid
# Defining a dialplan object, which allows you to define how the
# dialplan within a domain behaves
class cos_dialplan():
name = None
dialplan_xml = None
def __init__(self):
self.dialplan_xml = []
def add(self,data):
dialplan_entry = {}
dialplan_entry['name'] = data['name']
dialplan_entry['number'] = data['number']
dialplan_entry['continue'] = data['continue']
dialplan_entry['order'] = data['order']
dialplan_entry['description'] = data['description']
dialplan_entry['enabled'] = data['enabled']
dialplan_entry['hostname'] = data['hostname']
dialplan_entry['logic'] = data['logic']
self.dialplan_xml.append(dialplan_entry)
class cos():
domain_uuid=""
db = ""
dialplan_list = []
domain = None
def __init__(self, domain,data=None):
if domain.db:
self.db = domain.db
self.domain_uuid = domain.domain_id
self.domain = domain
# Load up some default CoS definitions
# The default class of service will use the global dialplan within FUSIONPBX
# In other words no changes to the Domain dialplan manager
cos_default = cos_dialplan()
cos_default.name="default"
cos_default.dialplan_xml = None
self.dialplan_list.append(cos_default)
# This class of service will disable inbound caller id
cos_standard = cos_dialplan()
cos_standard.name="standard"
entry = {}
entry['name'] = "block_caller_id"
entry['number'] = ""
entry['continue'] = "true"
entry['order'] = 700
entry['enabled'] = "true"
entry['description'] = "Block inbound caller id"
entry['hostname'] = ""
entry['logic'] = "<extension name=\"local_extension\" continue=\"true\" uuid=\"fbba5244-7453-4390-8976-2c307140529g\"> \
<condition field=\"${user_exists}\" expression=\"true\"> \
<action application=\"export\" data=\"sip_cid_type=none\"/> \
<action application=\"export\" data=\"origination_caller_id_name=Blocked\"/> \
<action application=\"export\" data=\"origination_caller_id_number=0000000000\"/> \
</condition> \
</extension>"
cos_standard.add(entry)
self.dialplan_list.append(cos_standard)
if data is not None and 'settings' in data:
domain_settings = data['settings']
cos_settings = cos_dialplan()
cos_settings.name="domain_settings"
entry = {}
entry['name'] = "domain_settings"
entry['number'] = ""
entry['continue'] = "true"
entry['order'] = 20
entry['enabled'] = "true"
entry['description'] = "Domain Settings"
entry['hostname'] = ""
entry['logic'] = "<extension name=\"domain_settings\" continue=\"true\" uuid=\"af2cd91f-1db2-4742-b8c2-ef519ba6e8b5\"> \
<condition field=\"\" expression=\"\"> \
<action application=\"set\" data=\"default_language={}\"/> \
<action application=\"set\" data=\"default_dialect={}\"/> \
<action application=\"set\" data=\"default_voice={}\"/> \
</condition> \
</extension>".format(domain_settings['default_language'],domain_settings['default_dialect'],domain_settings['default_voice'])
cos_settings.add(entry)
self.dialplan_list.append(cos_settings)
def create(self, name):
psycopg2.extras.register_uuid()
app_uuid = uuid.uuid4()
cur = self.db.cursor()
for cos_dp in self.dialplan_list:
print("{},{}".format(cos_dp.name,name))
if cos_dp.name == name:
if cos_dp.dialplan_xml == None:
continue
for xml in cos_dp.dialplan_xml:
dialplan_uuid = uuid.uuid4()
query = "insert into v_dialplans (domain_uuid,dialplan_uuid,app_uuid,dialplan_context, \
dialplan_name,dialplan_number,dialplan_continue,dialplan_order,dialplan_enabled, \
dialplan_description, hostname, dialplan_xml) values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
values = [self.domain_uuid,dialplan_uuid,app_uuid,self.domain.name, \
xml['name'], xml['number'], xml['continue'], \
xml['order'], xml['enabled'],xml['description'], \
xml['hostname'],xml['logic']]
cur.execute(query,values)
self.db.commit()
cur.close()
def delete(self):
psycopg2.extras.register_uuid()
cur = self.db.cursor()
cur.execute("""delete from v_dialplans where domain_uuid = %s""",(self.domain_uuid))
self.db.commit()
cur.close()
return True
class mediaserver():
hostname=''
port=''
auth_type=''
username=''
password=''
dbname="fusionpbx"
db=''
def __init__(self, config):
self.hostname =config.hostname
self.port = config.port
self.auth_type = config.auth_type
self.username = config.username
self.password = config.password
self.dbname = config.dbname
def testConnection(self):
print("testConnection")
pass
def getConnection(self):
try:
db = psycopg2.connect(host=self.hostname,port=self.port,user=self.username,password=self.password,dbname=self.dbname)
if db is not None:
print("Connection to FusionPBX: {} database was successful".format(self.hostname))
return db
except Exception as ex:
raise
def closeConnection(self):
db.close()
class domain():
domain_id=""
name = ""
enabled = ""
description = ""
cos = "" # Class of Service
db = None
def toJSON(self):
return json.dumps(self, default=lambda o: o.__dict__,
sort_keys=True, indent=4)
class domains():
mediaserver=''
db=''
domain_list = []
def __init__(self, mediaserver):
self.mediaserver = mediaserver
self.db = self.mediaserver.getConnection()
def create(self,data):
#Todo: Check if the domain exists before creating it
# call it in any place of your program
# before working with UUID objects in PostgreSQL
psycopg2.extras.register_uuid()
domain_uuid = uuid.uuid4()
cur = self.db.cursor()
# Check for Duplicate domains
cur.execute("""select domain_name from v_domains where domain_name = %s""",(data['name'],))
rows = cur.fetchall()
if len(rows) > 0:
raise Exception ("The domain already exists")
return
# Create the new domain
cur.execute("""insert into v_domains (domain_uuid,domain_name,domain_enabled,domain_description) \
values (%s,%s,%s,%s)""",(domain_uuid,data['name'],data['enabled'],data['description']))
self.db.commit()
d = domain()
d.domain_id = domain_uuid
d.name = data['name']
d.enabled = data['enabled']
d.description = data['description']
d.db = self.db
d.cos = data['cos']
cur.close()
return d
def read(self,domain_id=None):
cur = self.db.cursor()
query = "select domain_uuid,domain_name,domain_enabled,domain_description from v_domains"
values = []
if domain_id:
query = query + " where domain_uuid = %s"
values.append(domain_id)
cur.execute(query,(values))
rows = cur.fetchall()
if rows is not None:
for row in rows:
d = {}
d['domain_uuid'] = row[0]
d['domain_id'] = row[0]
d['name']= row[1]
d['enabled'] = row[2]
d['description'] = row[3]
self.domain_list.append(d)
return self.domain_list
def update(self,data):
cur = self.db.cursor()
# Convert UUID string to UUID type
domain_uuid=data['domain_id']
# Get the current data for the domain from the database
current_data = self.read(domain_uuid)
# Update the current data with the new data
if len(current_data) >= 1:
current_data[0]['name'] = data['name']
current_data[0]['enabled'] = data['enabled']
current_data[0]['description'] = data['description']
cur.execute("""update v_domains set domain_name= %s,domain_enabled = %s,domain_description = %s where domain_uuid= %s""", \
(current_data[0]['name'],current_data[0]['enabled'],current_data[0]['description'],domain_uuid))
rows = cur.rowcount
self.db.commit()
cur.close()
return True
def delete(self,domain_id):
#Todo: Check if the domain exists before creating it
# call it in any place of your program
# before working with UUID objects in PostgreSQL
psycopg2.extras.register_uuid()
domain_uuid = uuid.uuid4()
cur = self.db.cursor()
# Delete from domains
cur.execute("""delete from v_domains where domain_uuid = %s""",(domain_id,))
# Delete the inbound dialplan for the domain
cur.execute("""delete from v_dialplans where domain_uuid = %s""",(domain_id,))
self.db.commit()
cur.close()
return True
def getExtensions():
pass
class extension():
extension_id=""
domain_id=""
account_code=""
extension=""
password=""
outbound_caller_number=""
outbound_caller_name=""
vm_enabled=True
vm_password=""
vm_notify_email=""
enabled=False
call_timeout=30
def __init__(self):
pass
def toJSON(self):
return json.dumps(self, default=lambda o: o.__dict__,
sort_keys=True, indent=4)
class extensions():
mediaserver=None
domain=None
domain_name=''
domain_uuid=''
db=''
extension_list = []
def __init__(self, mediaserver, domain ,extension=None):
self.mediaserver = mediaserver
self.db = self.mediaserver.getConnection()
self.domain = domain
self.domain_uuid = self.domain['domain_id']
self.domain_name = self.domain['name']
def create(self,data):
psycopg2.extras.register_uuid()
extension_uuid = uuid.uuid4()
voicemail_uuid = uuid.uuid4()
cur = self.db.cursor()
cur.execute("""insert into v_extensions (extension_uuid,domain_uuid,extension,password, \
user_context,call_timeout,enabled,outbound_caller_id_number, \
outbound_caller_id_name,accountcode) \
values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)""", \
(extension_uuid, self.domain_uuid, data.extension, \
data.password, self.domain['name'], data.call_timeout, data.enabled, \
data.outbound_caller_number,data.outbound_caller_name,data.account_code))
# Create voicemail box
query = "insert into v_voicemails (domain_uuid,voicemail_uuid,voicemail_id, \
voicemail_password,greeting_id,voicemail_alternate_greet_id, \
voicemail_mail_to,voicemail_sms_to,voicemail_transcription_enabled,voicemail_attach_file, \
voicemail_file,voicemail_local_after_email,voicemail_enabled,voicemail_description, \
voicemail_name_base64,voicemail_tutorial) \
values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s) "
values = [self.domain_uuid,voicemail_uuid, \
data.extension,data.vm_password,None,None,data.vm_notify_email,None,None,None,'attach',"true", data.vm_enabled,None,None,None]
cur.execute(query,values)
self.db.commit()
cur.close()
def read(self, extension_id=None):
cur = self.db.cursor()
query = "select extension_uuid,domain_uuid,extension,password,user_context,call_timeout::integer as call_timeout, \
enabled,outbound_caller_id_number,outbound_caller_id_name, accountcode from v_extensions where domain_uuid = %s"
values = [self.domain_uuid]
if extension_id:
query = query + " and extension_uuid = %s"
values.append(extension_id)
cur.execute(query,(values))
rows = cur.fetchall()
if rows is not None:
for row in rows:
d = {}
d['extensions_id'] = row[0]
d['domain_uuid']= row[1]
d['extension'] = row[2]
#d['password'] = row[3]
d['user_context'] = row[4]
d['call_timeout'] = row[5]
d['enabled'] = row[6]
d['outbound_caller_number'] = row[7]
d['outbound_caller_name'] = row[8]
d['account_code'] = row[9]
self.extension_list.append(d)
return self.extension_list
def update(self,data):
psycopg2.extras.register_uuid()
cur = self.db.cursor()
query="update v_extensions set "
values=[]
db_data = {}
#Map canaical format to database format
db_data['domain_uuid'] = data['domain_id']
db_data['extension'] = data['extension']
db_data['password'] = data['password']
db_data['enabled'] = data['enabled']
db_data['accountcode'] = data['account_code']
db_data['outbound_caller_id_number'] = data['outbound_caller_number']
db_data['outbound_caller_id_name'] = data['outbound_caller_name']
db_data['call_timeout'] = data['call_timeout']
for element in db_data:
# Don't process Voice Mail Attributes
if "vm_" in element:
continue
query = query + "{} = %s,".format(element)
values.append(db_data[element])
# Remove the last comma
if query[len(query)-1] == ',':
query = query[:-1]
query = query + " where extension_uuid = '{}'".format(self.getExtensionID(db_data['extension']))
print(query)
print(values)
cur.execute(query,(values))
self.db.commit()
cur.close()
def getExtensionID(self,extension):
cur = self.db.cursor()
query = "select extension_uuid from v_extensions where domain_uuid = %s and extension = %s"
values = [self.domain_uuid,extension]
cur.execute(query,(values))
rows = cur.fetchall()
if rows is not None:
for row in rows:
return row[0]
cur.close()
def delete(self,extension):
#Todo: Check if the domain exists before creating it
# call it in any place of your program
# before working with UUID objects in PostgreSQL
psycopg2.extras.register_uuid()
domain_uuid = uuid.uuid4()
cur = self.db.cursor()
cur.execute("""delete from v_extensions where domain_uuid = %s and extension = %s""",(self.domain_uuid,extension))
cur.execute("""delete from v_voicemails where domain_uuid = %s and voicemail_id = %s""",(self.domain_uuid,extension))
self.db.commit()
cur.close()
return True
| dOpensource/dsiprouter | gui/modules/api/mediaserver/plugin/fusion/interface.py | Python | apache-2.0 | 15,668 |
from django import template
from django.template import Library, Node
from django.core.urlresolvers import reverse
from reporting import site
register = Library()
class ReportUrlNode(template.Node):
report_path = None
def __init__(self, report_path):
self.report_path = template.Variable(report_path)
def render(self, context):
report_path = self.report_path.resolve(context)
root_url = reverse('reporting_root', args=[''])
report_url = site.get_report_by_path(report_path)['url']
request = context['request']
return root_url + report_url + '?' + request.META['QUERY_STRING']
def do_get_report_url(parser, token):
try:
parts = token.split_contents()
report_path = parts[1]
except KeyError:
raise template.TemplateSyntaxError, "You must inform report path"
return ReportUrlNode(report_path)
register.tag('get_report_url', do_get_report_url)
| marcydoty/geraldo | reporting/templatetags/reporting_tags.py | Python | lgpl-3.0 | 957 |
# -*- coding: utf-8 -*-
"""
Magic Reload Library
Luke Campagnola 2010
Python reload function that actually works (the way you expect it to)
- No re-importing necessary
- Modules can be reloaded in any order
- Replaces functions and methods with their updated code
- Changes instances to use updated classes
- Automatically decides which modules to update by comparing file modification times
Does NOT:
- re-initialize exting instances, even if __init__ changes
- update references to any module-level objects
ie, this does not reload correctly:
from module import someObject
print someObject
..but you can use this instead: (this works even for the builtin reload)
import module
print module.someObject
"""
import inspect, os, sys, gc, traceback
try:
import __builtin__ as builtins
except ImportError:
import builtins
from .debug import printExc
def reloadAll(prefix=None, debug=False):
"""Automatically reload everything whose __file__ begins with prefix.
- Skips reload if the file has not been updated (if .pyc is newer than .py)
- if prefix is None, checks all loaded modules
"""
failed = []
changed = []
for modName, mod in list(sys.modules.items()): ## don't use iteritems; size may change during reload
if not inspect.ismodule(mod):
continue
if modName == '__main__':
continue
## Ignore if the file name does not start with prefix
if not hasattr(mod, '__file__') or os.path.splitext(mod.__file__)[1] not in ['.py', '.pyc']:
continue
if prefix is not None and mod.__file__[:len(prefix)] != prefix:
continue
## ignore if the .pyc is newer than the .py (or if there is no pyc or py)
py = os.path.splitext(mod.__file__)[0] + '.py'
pyc = py + 'c'
if py not in changed and os.path.isfile(pyc) and os.path.isfile(py) and os.stat(pyc).st_mtime >= os.stat(py).st_mtime:
#if debug:
#print "Ignoring module %s; unchanged" % str(mod)
continue
changed.append(py) ## keep track of which modules have changed to insure that duplicate-import modules get reloaded.
try:
reload(mod, debug=debug)
except:
printExc("Error while reloading module %s, skipping\n" % mod)
failed.append(mod.__name__)
if len(failed) > 0:
raise Exception("Some modules failed to reload: %s" % ', '.join(failed))
def reload(module, debug=False, lists=False, dicts=False):
"""Replacement for the builtin reload function:
- Reloads the module as usual
- Updates all old functions and class methods to use the new code
- Updates all instances of each modified class to use the new class
- Can update lists and dicts, but this is disabled by default
- Requires that class and function names have not changed
"""
if debug:
print("Reloading %s" % str(module))
## make a copy of the old module dictionary, reload, then grab the new module dictionary for comparison
oldDict = module.__dict__.copy()
builtins.reload(module)
newDict = module.__dict__
## Allow modules access to the old dictionary after they reload
if hasattr(module, '__reload__'):
module.__reload__(oldDict)
## compare old and new elements from each dict; update where appropriate
for k in oldDict:
old = oldDict[k]
new = newDict.get(k, None)
if old is new or new is None:
continue
if inspect.isclass(old):
if debug:
print(" Updating class %s.%s (0x%x -> 0x%x)" % (module.__name__, k, id(old), id(new)))
updateClass(old, new, debug)
elif inspect.isfunction(old):
depth = updateFunction(old, new, debug)
if debug:
extra = ""
if depth > 0:
extra = " (and %d previous versions)" % depth
print(" Updating function %s.%s%s" % (module.__name__, k, extra))
elif lists and isinstance(old, list):
l = old.len()
old.extend(new)
for i in range(l):
old.pop(0)
elif dicts and isinstance(old, dict):
old.update(new)
for k in old:
if k not in new:
del old[k]
## For functions:
## 1) update the code and defaults to new versions.
## 2) keep a reference to the previous version so ALL versions get updated for every reload
def updateFunction(old, new, debug, depth=0, visited=None):
#if debug and depth > 0:
#print " -> also updating previous version", old, " -> ", new
old.__code__ = new.__code__
old.__defaults__ = new.__defaults__
if visited is None:
visited = []
if old in visited:
return
visited.append(old)
## finally, update any previous versions still hanging around..
if hasattr(old, '__previous_reload_version__'):
maxDepth = updateFunction(old.__previous_reload_version__, new, debug, depth=depth+1, visited=visited)
else:
maxDepth = depth
## We need to keep a pointer to the previous version so we remember to update BOTH
## when the next reload comes around.
if depth == 0:
new.__previous_reload_version__ = old
return maxDepth
## For classes:
## 1) find all instances of the old class and set instance.__class__ to the new class
## 2) update all old class methods to use code from the new class methods
def updateClass(old, new, debug):
## Track town all instances and subclasses of old
refs = gc.get_referrers(old)
for ref in refs:
try:
if isinstance(ref, old) and ref.__class__ is old:
ref.__class__ = new
if debug:
print(" Changed class for %s" % safeStr(ref))
elif inspect.isclass(ref) and issubclass(ref, old) and old in ref.__bases__:
ind = ref.__bases__.index(old)
## Does not work:
#ref.__bases__ = ref.__bases__[:ind] + (new,) + ref.__bases__[ind+1:]
## reason: Even though we change the code on methods, they remain bound
## to their old classes (changing im_class is not allowed). Instead,
## we have to update the __bases__ such that this class will be allowed
## as an argument to older methods.
## This seems to work. Is there any reason not to?
## Note that every time we reload, the class hierarchy becomes more complex.
## (and I presume this may slow things down?)
ref.__bases__ = ref.__bases__[:ind] + (new,old) + ref.__bases__[ind+1:]
if debug:
print(" Changed superclass for %s" % safeStr(ref))
#else:
#if debug:
#print " Ignoring reference", type(ref)
except:
print("Error updating reference (%s) for class change (%s -> %s)" % (safeStr(ref), safeStr(old), safeStr(new)))
raise
## update all class methods to use new code.
## Generally this is not needed since instances already know about the new class,
## but it fixes a few specific cases (pyqt signals, for one)
for attr in dir(old):
oa = getattr(old, attr)
if inspect.ismethod(oa):
try:
na = getattr(new, attr)
except AttributeError:
if debug:
print(" Skipping method update for %s; new class does not have this attribute" % attr)
continue
if hasattr(oa, 'im_func') and hasattr(na, 'im_func') and oa.__func__ is not na.__func__:
depth = updateFunction(oa.__func__, na.__func__, debug)
#oa.im_class = new ## bind old method to new class ## not allowed
if debug:
extra = ""
if depth > 0:
extra = " (and %d previous versions)" % depth
print(" Updating method %s%s" % (attr, extra))
## And copy in new functions that didn't exist previously
for attr in dir(new):
if not hasattr(old, attr):
if debug:
print(" Adding missing attribute %s" % attr)
setattr(old, attr, getattr(new, attr))
## finally, update any previous versions still hanging around..
if hasattr(old, '__previous_reload_version__'):
updateClass(old.__previous_reload_version__, new, debug)
## It is possible to build classes for which str(obj) just causes an exception.
## Avoid thusly:
def safeStr(obj):
try:
s = str(obj)
except:
try:
s = repr(obj)
except:
s = "<instance of %s at 0x%x>" % (safeStr(type(obj)), id(obj))
return s
## Tests:
# write modules to disk, import, then re-write and run again
if __name__ == '__main__':
doQtTest = True
try:
from PyQt4 import QtCore
if not hasattr(QtCore, 'Signal'):
QtCore.Signal = QtCore.pyqtSignal
#app = QtGui.QApplication([])
class Btn(QtCore.QObject):
sig = QtCore.Signal()
def emit(self):
self.sig.emit()
btn = Btn()
except:
raise
print("Error; skipping Qt tests")
doQtTest = False
import os
if not os.path.isdir('test1'):
os.mkdir('test1')
open('test1/__init__.py', 'w')
modFile1 = "test1/test1.py"
modCode1 = """
import sys
class A(object):
def __init__(self, msg):
object.__init__(self)
self.msg = msg
def fn(self, pfx = ""):
print(pfx+"A class: %%s %%s" %% (str(self.__class__), str(id(self.__class__))))
print(pfx+" %%s: %d" %% self.msg)
class B(A):
def fn(self, pfx=""):
print(pfx+"B class:", self.__class__, id(self.__class__))
print(pfx+" %%s: %d" %% self.msg)
print(pfx+" calling superclass.. (%%s)" %% id(A) )
A.fn(self, " ")
"""
modFile2 = "test2.py"
modCode2 = """
from test1.test1 import A
from test1.test1 import B
a1 = A("ax1")
b1 = B("bx1")
class C(A):
def __init__(self, msg):
#print "| C init:"
#print "| C.__bases__ = ", map(id, C.__bases__)
#print "| A:", id(A)
#print "| A.__init__ = ", id(A.__init__.im_func), id(A.__init__.im_func.__code__), id(A.__init__.im_class)
A.__init__(self, msg + "(init from C)")
def fn():
print("fn: %s")
"""
open(modFile1, 'w').write(modCode1%(1,1))
open(modFile2, 'w').write(modCode2%"message 1")
import test1.test1 as test1
import test2
print("Test 1 originals:")
A1 = test1.A
B1 = test1.B
a1 = test1.A("a1")
b1 = test1.B("b1")
a1.fn()
b1.fn()
#print "function IDs a1 bound method: %d a1 func: %d a1 class: %d b1 func: %d b1 class: %d" % (id(a1.fn), id(a1.fn.im_func), id(a1.fn.im_class), id(b1.fn.im_func), id(b1.fn.im_class))
from test2 import fn, C
if doQtTest:
print("Button test before:")
btn.sig.connect(fn)
btn.sig.connect(a1.fn)
btn.emit()
#btn.sig.emit()
print("")
#print "a1.fn referrers:", sys.getrefcount(a1.fn.im_func), gc.get_referrers(a1.fn.im_func)
print("Test2 before reload:")
fn()
oldfn = fn
test2.a1.fn()
test2.b1.fn()
c1 = test2.C('c1')
c1.fn()
os.remove(modFile1+'c')
open(modFile1, 'w').write(modCode1%(2,2))
print("\n----RELOAD test1-----\n")
reloadAll(os.path.abspath(__file__)[:10], debug=True)
print("Subclass test:")
c2 = test2.C('c2')
c2.fn()
os.remove(modFile2+'c')
open(modFile2, 'w').write(modCode2%"message 2")
print("\n----RELOAD test2-----\n")
reloadAll(os.path.abspath(__file__)[:10], debug=True)
if doQtTest:
print("Button test after:")
btn.emit()
#btn.sig.emit()
#print "a1.fn referrers:", sys.getrefcount(a1.fn.im_func), gc.get_referrers(a1.fn.im_func)
print("Test2 after reload:")
fn()
test2.a1.fn()
test2.b1.fn()
print("\n==> Test 1 Old instances:")
a1.fn()
b1.fn()
c1.fn()
#print "function IDs a1 bound method: %d a1 func: %d a1 class: %d b1 func: %d b1 class: %d" % (id(a1.fn), id(a1.fn.im_func), id(a1.fn.im_class), id(b1.fn.im_func), id(b1.fn.im_class))
print("\n==> Test 1 New instances:")
a2 = test1.A("a2")
b2 = test1.B("b2")
a2.fn()
b2.fn()
c2 = test2.C('c2')
c2.fn()
#print "function IDs a1 bound method: %d a1 func: %d a1 class: %d b1 func: %d b1 class: %d" % (id(a1.fn), id(a1.fn.im_func), id(a1.fn.im_class), id(b1.fn.im_func), id(b1.fn.im_class))
os.remove(modFile1+'c')
os.remove(modFile2+'c')
open(modFile1, 'w').write(modCode1%(3,3))
open(modFile2, 'w').write(modCode2%"message 3")
print("\n----RELOAD-----\n")
reloadAll(os.path.abspath(__file__)[:10], debug=True)
if doQtTest:
print("Button test after:")
btn.emit()
#btn.sig.emit()
#print "a1.fn referrers:", sys.getrefcount(a1.fn.im_func), gc.get_referrers(a1.fn.im_func)
print("Test2 after reload:")
fn()
test2.a1.fn()
test2.b1.fn()
print("\n==> Test 1 Old instances:")
a1.fn()
b1.fn()
print("function IDs a1 bound method: %d a1 func: %d a1 class: %d b1 func: %d b1 class: %d" % (id(a1.fn), id(a1.fn.__func__), id(a1.fn.__self__.__class__), id(b1.fn.__func__), id(b1.fn.__self__.__class__)))
print("\n==> Test 1 New instances:")
a2 = test1.A("a2")
b2 = test1.B("b2")
a2.fn()
b2.fn()
print("function IDs a1 bound method: %d a1 func: %d a1 class: %d b1 func: %d b1 class: %d" % (id(a1.fn), id(a1.fn.__func__), id(a1.fn.__self__.__class__), id(b1.fn.__func__), id(b1.fn.__self__.__class__)))
os.remove(modFile1)
os.remove(modFile2)
os.remove(modFile1+'c')
os.remove(modFile2+'c')
os.system('rm -r test1')
#
# Failure graveyard ahead:
#
"""Reload Importer:
Hooks into import system to
1) keep a record of module dependencies as they are imported
2) make sure modules are always reloaded in correct order
3) update old classes and functions to use reloaded code"""
#import imp, sys
## python's import hook mechanism doesn't work since we need to be
## informed every time there is an import statement, not just for new imports
#class ReloadImporter:
#def __init__(self):
#self.depth = 0
#def find_module(self, name, path):
#print " "*self.depth + "find: ", name, path
##if name == 'PyQt4' and path is None:
##print "PyQt4 -> PySide"
##self.modData = imp.find_module('PySide')
##return self
##return None ## return none to allow the import to proceed normally; return self to intercept with load_module
#self.modData = imp.find_module(name, path)
#self.depth += 1
##sys.path_importer_cache = {}
#return self
#def load_module(self, name):
#mod = imp.load_module(name, *self.modData)
#self.depth -= 1
#print " "*self.depth + "load: ", name
#return mod
#def pathHook(path):
#print "path hook:", path
#raise ImportError
#sys.path_hooks.append(pathHook)
#sys.meta_path.append(ReloadImporter())
### replace __import__ with a wrapper that tracks module dependencies
#modDeps = {}
#reloadModule = None
#origImport = __builtins__.__import__
#def _import(name, globals=None, locals=None, fromlist=None, level=-1, stack=[]):
### Note that stack behaves as a static variable.
##print " "*len(importStack) + "import %s" % args[0]
#stack.append(set())
#mod = origImport(name, globals, locals, fromlist, level)
#deps = stack.pop()
#if len(stack) > 0:
#stack[-1].add(mod)
#elif reloadModule is not None: ## If this is the top level import AND we're inside a module reload
#modDeps[reloadModule].add(mod)
#if mod in modDeps:
#modDeps[mod] |= deps
#else:
#modDeps[mod] = deps
#return mod
#__builtins__.__import__ = _import
### replace
#origReload = __builtins__.reload
#def _reload(mod):
#reloadModule = mod
#ret = origReload(mod)
#reloadModule = None
#return ret
#__builtins__.reload = _reload
#def reload(mod, visited=None):
#if visited is None:
#visited = set()
#if mod in visited:
#return
#visited.add(mod)
#for dep in modDeps.get(mod, []):
#reload(dep, visited)
#__builtins__.reload(mod)
| andrewpaulreeves/soapy | soapy/pyqtgraph/reload.py | Python | gpl-3.0 | 16,989 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-08-04 20:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('backend_citizen', '0006_profile_is_organization'),
]
operations = [
migrations.AddField(
model_name='profile',
name='is_journalist',
field=models.BooleanField(default=False),
),
]
| ciudadanointeligente/votainteligente-portal-electoral | backend_citizen/migrations/0007_profile_is_journalist.py | Python | gpl-3.0 | 472 |
from django.conf.urls import include, url
from django.views.generic.base import RedirectView
from django.conf import settings
from django.contrib import admin
admin.autodiscover()
from . import views
from projects.views import screenshot, upload, diff, diffPage, tease
urlpatterns = [
url(r'^$', tease),
url(r'^admin/', include(admin.site.urls)),
url(r'^favicon\.ico$', RedirectView.as_view(url='/static/favicon.ico')),
url(r'^projects/', include('projects.urls')),
# Access controlled screenshot images
url(r'^screenshot/(?P<id>[0-9a-f\-]{36})', screenshot),
url(r'^screenshot/diff/(?P<before_id>[0-9a-f\-]{36})/(?P<after_id>[0-9a-f\-]{36})', diff),
url(r'^(?P<username>[^/]+)/(?P<project>[^/]+)/(?P<branch>[^/]+)/(?P<page>[^/]+)/(?P<build>[^/]+)/diff/(?P<new_build>[^/]+)', diffPage),
# Authenticated screenshot uploads
url(r'^(?P<username>[^/]+)/(?P<project>[^/]+)', upload),
]
| deckar01/narcis | narcis/urls.py | Python | mit | 952 |
# Copyright (c) 2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Sandberg
from m5.objects import *
from arm_generic import *
root = LinuxArmFSSystem(mem_mode='atomic',
mem_class=DDR3_1600_x64,
cpu_class=AtomicSimpleCPU,
num_cpus=2).create_root()
| prodromou87/gem5 | tests/configs/realview-simple-atomic-dual.py | Python | bsd-3-clause | 2,345 |
# ################################################################################
# ##
# ## https://github.com/NetASM/NetASM-python
# ##
# ## File:
# ## __init__.py
# ##
# ## Project:
# ## NetASM: A Network Assembly Language for Programmable Dataplanes
# ##
# ## Author:
# ## Muhammad Shahbaz
# ##
# ## Copyright notice:
# ## Copyright (C) 2014 Princeton University
# ## Network Operations and Internet Security Lab
# ##
# ## Licence:
# ## This file is a part of the NetASM development base package.
# ##
# ## This file is free code: you can redistribute it and/or modify it under
# ## the terms of the GNU Lesser General Public License version 2.1 as
# ## published by the Free Software Foundation.
# ##
# ## This package is distributed in the hope that it will be useful, but
# ## WITHOUT ANY WARRANTY; without even the implied warranty of
# ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# ## Lesser General Public License for more details.
# ##
# ## You should have received a copy of the GNU Lesser General Public
# ## License along with the NetASM source package. If not, see
# ## http://www.gnu.org/licenses/.
__author__ = 'shahbaz' | 8l/NetASM-python | netasm/examples/controllers/pox/__init__.py | Python | gpl-2.0 | 1,280 |
import json
from idpproxy.social.oauth import OAuth
import oauth2 as oauth
#from xml.etree import ElementTree as ET
import logging
logger = logging.getLogger(__name__)
__author__ = 'rohe0002'
class LinkedIn(OAuth):
def __init__(self, client_id, client_secret, **kwargs):
OAuth.__init__(self, client_id, client_secret, **kwargs)
def get_profile(self, info_set):
token = oauth.Token(key=info_set["oauth_token"][0],
secret=info_set["oauth_token_secret"][0])
client = oauth.Client(self.consumer, token)
resp, content = client.request(self.extra["userinfo_endpoint"], "GET")
# # content in XML :-(
# logger.debug("UserInfo XML: %s" % content)
# res = {}
# root = ET.fromstring(content)
# for child in root:
# res[child.tag] = child.text
res = json.loads(content)
logger.debug("userinfo: %s" % res)
res["user_id"] = info_set["oauth_token"]
return resp, res | rohe/IdPproxy | src/idpproxy/social/linkedin/__init__.py | Python | bsd-2-clause | 1,002 |
#!/usr/bin/env python
from gimpfu import *
import random
# create an output function that redirects to gimp's Error Console
def gprint(text):
pdb.gimp_message(text)
return
# our script
def origami_fill(image, drawable, text_value, int_value) :
image.undo_group_start()
# Determine colors from image
layerColorPick = image.layers[0]
colorBase = generate_color_base(image, layerColorPick, int_value)
image.resize(1000, 1000, 0, 0)
layerPaperWhite = gimp.Layer(image, "paper white", 1000, 1000, RGB_IMAGE, 100, NORMAL_MODE)
image.add_layer(layerPaperWhite)
layerPaperWhite.fill(WHITE_FILL)
layerPaperBase = gimp.Layer(image, "paper base", 1000, 1000, RGB_IMAGE, 85, NORMAL_MODE)
image.add_layer(layerPaperBase)
gimp.set_foreground(generate_color_noise(colorBase[2]))
layerPaperBase.fill(FOREGROUND_FILL)
layerBase = gimp.Layer(image, "base", 1000, 1000, RGB_IMAGE, 100, NORMAL_MODE)
layerBase.add_alpha()
image.add_layer(layerBase)
layerBase.fill(TRANSPARENT_FILL)
if (text_value == "crane"):
create_crane(image, layerBase, colorBase)
elif (text_value == "butterfly"):
create_butterfly(image, layerBase, colorBase)
elif (text_value == "iris"):
create_iris(image, layerBase, colorBase)
else:
create_crane(image, layerBase, colorBase)
# Merge all layers down
pdb.gimp_selection_none(image)
layerBase = image.merge_down(layerBase, 0)
layerBase = image.merge_down(layerBase, 0)
layerBase.name = "Origami Fill"
image.undo_group_end()
return
"""Returns list of 3 color bases from the layer based on provided int seed."""
def generate_color_base(image, layerColorPick, int_value):
colorBase = [(50,50,50), (100,100,100), (150,150,150)]
if (int_value % 4 == 0):
colorBase[0] = pdb.gimp_image_pick_color(image, layerColorPick, layerColorPick.width*1/4, layerColorPick.height*1/4, False, True, 10)
colorBase[1] = pdb.gimp_image_pick_color(image, layerColorPick, layerColorPick.width*1/2, layerColorPick.height*1/2, False, True, 10)
colorBase[2] = pdb.gimp_image_pick_color(image, layerColorPick, layerColorPick.width*3/4, layerColorPick.height*3/4, False, True, 10)
elif (int_value % 4 == 1):
colorBase[0] = pdb.gimp_image_pick_color(image, layerColorPick, layerColorPick.width*3/4, layerColorPick.height*3/4, False, True, 10)
colorBase[1] = pdb.gimp_image_pick_color(image, layerColorPick, layerColorPick.width*1/2, layerColorPick.height*1/2, False, True, 10)
colorBase[2] = pdb.gimp_image_pick_color(image, layerColorPick, layerColorPick.width*1/4, layerColorPick.height*1/4, False, True, 10)
elif (int_value % 4 == 2):
colorBase[0] = pdb.gimp_image_pick_color(image, layerColorPick, layerColorPick.width*1/3, layerColorPick.height*1/3, False, True, 10)
colorBase[1] = pdb.gimp_image_pick_color(image, layerColorPick, layerColorPick.width*2/3, layerColorPick.height*1/3, False, True, 10)
colorBase[2] = pdb.gimp_image_pick_color(image, layerColorPick, layerColorPick.width*1/2, layerColorPick.height*2/3, False, True, 10)
elif (int_value % 4 == 3):
colorBase[0] = pdb.gimp_image_pick_color(image, layerColorPick, layerColorPick.width*1/3, layerColorPick.height*2/3, False, True, 10)
colorBase[1] = pdb.gimp_image_pick_color(image, layerColorPick, layerColorPick.width*2/3, layerColorPick.height*2/3, False, True, 10)
colorBase[2] = pdb.gimp_image_pick_color(image, layerColorPick, layerColorPick.width*1/2, layerColorPick.height*1/3, False, True, 10)
return colorBase
"""Returns a color tuple with a random delta noise applied to the provided color tuple"""
def generate_color_noise(color):
sign = random.randint(0,1)
sign = -1 if (sign == 0) else 1
delta = random.randint(25,50)
newR = max(min(255, color[0] + sign*delta), 0)
newG = max(min(255, color[1] + sign*random.randint(delta-10, delta+10)), 0)
newB = max(min(255, color[2] + sign*random.randint(delta-10, delta+10)), 0)
return (newR, newG, newB)
"""Creates a crane on the base layer"""
def create_crane(image, layerBase, colorBase):
pdb.gimp_image_select_polygon(image, CHANNEL_OP_REPLACE, 10, [0,0 , 750,250 , 1000,1000 , 250,750 , 0,0])
gimp.set_foreground(generate_color_noise(colorBase[0]))
pdb.gimp_edit_fill(layerBase, FOREGROUND_FILL)
pdb.gimp_image_select_polygon(image, CHANNEL_OP_REPLACE, 10, [0,0 , 325,225 , 300,300 , 225,325 , 0,0])
pdb.gimp_image_select_polygon(image, CHANNEL_OP_ADD, 10, [1000,1000 , 1000-325,1000-225 , 1000-300,1000-300 , 1000-225,1000-325 , 1000,1000])
gimp.set_foreground(colorBase[0])
pdb.gimp_edit_fill(layerBase, FOREGROUND_FILL)
pdb.gimp_image_select_polygon(image, CHANNEL_OP_REPLACE, 6, [0,1000 , 200,500 , 500,800 , 0,1000])
pdb.gimp_image_select_polygon(image, CHANNEL_OP_ADD, 6, [1000,0 , 800,500 , 500,200 , 1000,0])
gimp.set_foreground(colorBase[1])
pdb.gimp_edit_fill(layerBase, FOREGROUND_FILL)
pdb.gimp_image_select_polygon(image, CHANNEL_OP_REPLACE, 8, [1000,1000 , 800,1000 , 850,850 , 1000,800])
gimp.set_foreground(colorBase[2])
pdb.gimp_edit_fill(layerBase, FOREGROUND_FILL)
"""Creates a crane on the base layer"""
def create_butterfly(image, layerBase, colorBase):
pdb.gimp_image_select_polygon(image, CHANNEL_OP_REPLACE, 8, [0,1000 , 300,1000 , 300,900 , 0,900])
pdb.gimp_image_select_polygon(image, CHANNEL_OP_ADD, 8, [1000,1000 , 1000-300,1000 , 1000-300,900 , 1000,900])
gimp.set_foreground(generate_color_noise(colorBase[1]))
pdb.gimp_edit_fill(layerBase, FOREGROUND_FILL)
pdb.gimp_image_select_polygon(image, CHANNEL_OP_REPLACE, 10, [0,0 , 160,240, 160,410, 0,500, 0,0])
pdb.gimp_image_select_polygon(image, CHANNEL_OP_ADD, 10, [1000,0 , 1000,500, 840,410, 840,240, 1000,0])
gimp.set_foreground(colorBase[0])
pdb.gimp_edit_fill(layerBase, FOREGROUND_FILL)
pdb.gimp_image_select_polygon(image, CHANNEL_OP_REPLACE, 6, [0,500 , 200,700 , 0,1000])
pdb.gimp_image_select_polygon(image, CHANNEL_OP_ADD, 6, [1000,500 , 800,700 , 1000,1000])
gimp.set_foreground(colorBase[1])
pdb.gimp_edit_fill(layerBase, FOREGROUND_FILL)
pdb.gimp_image_select_polygon(image, CHANNEL_OP_REPLACE, 8, [1000,0 , 840,240, 650,0, 1000,0])
pdb.gimp_image_select_polygon(image, CHANNEL_OP_ADD, 8, [0,0 , 350,0, 160,240, 0,0])
gimp.set_foreground(colorBase[2])
pdb.gimp_edit_fill(layerBase, FOREGROUND_FILL)
pdb.gimp_image_select_polygon(image, CHANNEL_OP_REPLACE, 8, [350,350 , 650,350 , 650,650 , 350,650])
gimp.set_foreground(generate_color_noise(colorBase[0]))
pdb.gimp_edit_fill(layerBase, FOREGROUND_FILL)
pdb.gimp_image_select_polygon(image, CHANNEL_OP_REPLACE, 6, [160,240 , 160,410 , 500,500])
pdb.gimp_image_select_polygon(image, CHANNEL_OP_ADD, 6, [1000-160,240 , 1000-160,410 , 500,500])
gimp.set_foreground(generate_color_noise(colorBase[2]))
pdb.gimp_edit_fill(layerBase, FOREGROUND_FILL)
pdb.gimp_image_select_polygon(image, CHANNEL_OP_REPLACE, 8, [350,650 , 650,650 , 650,850 , 350,850])
gimp.set_foreground(generate_color_noise(colorBase[1]))
pdb.gimp_edit_fill(layerBase, FOREGROUND_FILL)
"""Creates an iris flower on the base layer"""
def create_iris(image, layerBase, colorBase):
pdb.gimp_image_select_polygon(image, CHANNEL_OP_REPLACE, 6, [0,0 , 500,500 , 500,0])
gimp.set_foreground(generate_color_noise(colorBase[2]))
pdb.gimp_edit_fill(layerBase, FOREGROUND_FILL)
pdb.gimp_image_select_polygon(image, CHANNEL_OP_REPLACE, 6, [1000,0 , 500,500 , 500,0])
gimp.set_foreground(generate_color_noise(colorBase[2]))
pdb.gimp_edit_fill(layerBase, FOREGROUND_FILL)
pdb.gimp_image_select_polygon(image, CHANNEL_OP_REPLACE, 6, [500,1000 , 500,500 , 0,1000])
gimp.set_foreground(generate_color_noise(colorBase[2]))
pdb.gimp_edit_fill(layerBase, FOREGROUND_FILL)
pdb.gimp_image_select_polygon(image, CHANNEL_OP_REPLACE, 6, [500,1000 , 500,500 , 1000,1000])
gimp.set_foreground(generate_color_noise(colorBase[2]))
pdb.gimp_edit_fill(layerBase, FOREGROUND_FILL)
pdb.gimp_image_select_polygon(image, CHANNEL_OP_REPLACE, 8, [0,500 , 500,0 , 1000,500 , 500,1000])
gimp.set_foreground(colorBase[0])
pdb.gimp_edit_fill(layerBase, FOREGROUND_FILL)
pdb.gimp_image_select_polygon(image, CHANNEL_OP_REPLACE, 8, [0,0 , 350,150 , 500,500 , 150,350])
pdb.gimp_image_select_polygon(image, CHANNEL_OP_ADD, 8, [1000,0 , 1000-350,150 , 500,500 , 1000-150,350])
pdb.gimp_image_select_polygon(image, CHANNEL_OP_ADD, 8, [0,1000 , 350,1000-150 , 500,500 , 150,1000-350])
pdb.gimp_image_select_polygon(image, CHANNEL_OP_ADD, 8, [1000,1000 , 1000-350,1000-150 , 500,500 , 1000-150,1000-350])
gimp.set_foreground(colorBase[1])
pdb.gimp_edit_fill(layerBase, FOREGROUND_FILL)
# This is the plugin registration function
register(
"origami_fill",
"Origami Fill",
"Low-Poly Origami Fill",
"Arpit Sheth & Justin Selig",
"Computing in the Arts @ Cornell University",
"May 2014",
"<Image>/MyScripts/Origami Fill",
"*",
[
(PF_STRING, 'origami_obj', 'Origami Object', 'crane'),
(PF_INT, 'img_analysis_procedure', 'Random Integer (Image Analysis)', 2014)
],
[],
origami_fill,
)
main() | shetharp/Origami-Fill | origamifill.py | Python | mit | 9,519 |
"""
Author: Dr. Mohamed Amine Bouhlel <mbouhlel@umich.edu>
Dr. Nathalie.bartoli <nathalie@onera.fr>
This package is distributed under New BSD license.
TO DO:
- define outputs['sol'] = self.sol
"""
import numpy as np
from sklearn import linear_model
from smt.surrogate_models.surrogate_model import SurrogateModel
from smt.utils.caching import cached_operation
class LS(SurrogateModel):
"""
Least square model.
This model uses the linear_model.LinearRegression class from scikit-learn.
Default-parameters from scikit-learn are used herein.
"""
name = "LS"
def _initialize(self):
super(LS, self)._initialize()
declare = self.options.declare
supports = self.supports
declare(
"data_dir",
values=None,
types=str,
desc="Directory for loading / saving cached data; None means do not save or load",
)
supports["derivatives"] = True
############################################################################
# Model functions
############################################################################
def _new_train(self):
"""
Train the model
"""
pts = self.training_points
if 0 in pts[None]:
x = pts[None][0][0]
y = pts[None][0][1]
self.mod = linear_model.LinearRegression()
self.mod.fit(x, y)
def _train(self):
"""
Train the model
"""
inputs = {"self": self}
with cached_operation(inputs, self.options["data_dir"]) as outputs:
if outputs:
self.sol = outputs["sol"]
else:
self._new_train()
# outputs['sol'] = self.sol
def _predict_values(self, x):
"""
Evaluates the model at a set of points.
Arguments
---------
x : np.ndarray [n_evals, dim]
Evaluation point input variable values
Returns
-------
y : np.ndarray
Evaluation point output variable values
"""
y = self.mod.predict(x)
return y
def _predict_derivatives(self, x, kx):
"""
Evaluates the derivatives at a set of points.
Arguments
---------
x : np.ndarray [n_evals, dim]
Evaluation point input variable values
kx : int
The 0-based index of the input variable with respect to which derivatives are desired.
Returns
-------
y : np.ndarray
Derivative values.
"""
# Initialization
n_eval, n_features_x = x.shape
y = np.ones((n_eval, self.ny)) * self.mod.coef_[:, kx]
return y
| SMTorg/smt | smt/surrogate_models/ls.py | Python | bsd-3-clause | 2,766 |
from optparse import OptionParser
import pandas
# import matplotlib.pyplot as plt
from simulate import simulate
parser = OptionParser()
parser.add_option("--max-d", type="int", default=4)
options, _ = parser.parse_args()
res = pandas.DataFrame()
for d in range(1, options.max_d + 1):
print "\n\nd=", d, "\n"
df = simulate(n=1000, k=3, d=d, m=1, progress=True)
s = df.groupby(['p']).mean()['found']
s.name = d
res = res.append(s)
res = res.transpose()
res.to_pickle('mod/barplots_d_%s.pkl' % options.max_d)
# res.plot(kind='bar')
# plt.show()
| aravart/speech-games | theory/exp_prob_found.py | Python | mit | 565 |
from __future__ import unicode_literals
from django.db import models
from django.conf import settings
from django.template.loader import render_to_string
from tinymce import models as tinymce_models
from getyourdata.models import BaseModel
class HomePageManager(models.Manager):
def create_default(self):
"""
Create default home pages
"""
home_page = HomePage()
for lang_code, lang_name in settings.LANGUAGES:
setattr(
home_page, "content_%s" % lang_code,
render_to_string(
"home/default.html", {"lang_code": lang_code}))
home_page.save()
return home_page
def get_default_content():
return ""
class HomePage(BaseModel):
admin_name = models.CharField(
max_length=30, default='default', unique=True)
content = tinymce_models.HTMLField(blank=True, default=get_default_content)
objects = HomePageManager()
class Meta:
verbose_name = 'Home page'
verbose_name_plural = 'Home page'
def __unicode__(self):
return self.admin_name
class FaqContent(BaseModel):
title = models.CharField(
max_length=75,
default="")
priority = models.IntegerField(default=777)
content = tinymce_models.HTMLField(blank=True, default='')
| sakset/getyourdata | getyourdata/home/models.py | Python | mit | 1,332 |
###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2022, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('simple09.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
# Test data out of range. These should be ignored.
worksheet.write('A0', 'foo')
worksheet.write(-1, -1, 'foo')
worksheet.write(0, -1, 'foo')
worksheet.write(-1, 0, 'foo')
worksheet.write(1048576, 0, 'foo')
worksheet.write(0, 16384, 'foo')
workbook.close()
self.assertExcelEqual()
| jmcnamara/XlsxWriter | xlsxwriter/test/comparison/test_simple09.py | Python | bsd-2-clause | 1,048 |
import phantom.rules as phantom
import json
#
# F5 firewall
#
# Copyright (c) 2016 World Wide Technology, Inc.
# All rights reserved.
#
# author: Joel W. King, World Wide Technology
#
# revisions:
# 16 June 2016 | Changes to parameters, create rule name base on the source IP
#
def block_IP_cb(action, success, container, results, handle):
if not success:
return
return
def on_start(container):
ips = set(phantom.collect(container, 'artifact:*.cef.sourceAddress'))
boiler_plate = { "action" : "reject", "policy" : None, "rule name" : None, "partition" : "Common"}
for ip in ips:
parameters = boiler_plate
parameters["policy"] = "Phantom_Inbound" # Policy name must exist on the F5 BIG-IP
parameters["source"] = ip # Source IP we are blocking
parameters["rule name"] = "Phantom" + ip # Make the rule name based on the source IP address
phantom.debug("PARAMETERS \n%s" % parameters)
phantom.act('block ip', parameters=[parameters], assets=["f5"], callback=block_IP_cb)
return
def on_finish(container, summary):
phantom.debug("Summary: " + summary)
return
| joelwking/Phantom-Cyber | f5_firewall/f5_firewall_playbook.py | Python | mit | 1,240 |
# -*- coding: utf-8 -*-
from . import communication, account_invoice, account
| linkitspa/l10n-italy | l10n_it_invoices_data_communication/models/__init__.py | Python | agpl-3.0 | 79 |
#!/usr/bin/env python
"""encode/decode base58 in the same way that Bitcoin does"""
import hashlib
import math
__b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
__b58base = len(__b58chars)
getNewRIPEMD160 = None
getNewSHA256 = None
def b58encode(v):
""" encode v, which is a string of bytes, to base58.
"""
long_value = 0L
for (i, c) in enumerate(v[::-1]):
long_value += (256**i) * ord(c)
result = ''
while long_value >= __b58base:
div, mod = divmod(long_value, __b58base)
result = __b58chars[mod] + result
long_value = div
result = __b58chars[long_value] + result
# Bitcoin does a little leading-zero-compression:
# leading 0-bytes in the input become leading-1s
nPad = 0
for c in v:
if c == '\0': nPad += 1
else: break
return (__b58chars[0]*nPad) + result
def b58decode(v, length):
""" decode v into a string of len bytes
"""
long_value = 0L
for (i, c) in enumerate(v[::-1]):
long_value += __b58chars.find(c) * (__b58base**i)
result = ''
while long_value >= 256:
div, mod = divmod(long_value, 256)
result = chr(mod) + result
long_value = div
result = chr(long_value) + result
nPad = 0
for c in v:
if c == __b58chars[0]: nPad += 1
else: break
result = chr(0)*nPad + result
if length is not None and len(result) != length:
return None
return result
def getNewRIPEMD160ByCrypto(public_key=""):
return RIPEMD160.new(public_key)
def getNewRIPEMD160ByHashlib(public_key=""):
newRIPEMD160 = hashlib.new('ripemd160')
newRIPEMD160.update(public_key)
return newRIPEMD160
def getNewSHA256ByCrypto(public_key=""):
return SHA256.new(public_key)
def getNewSHA256ByHashlib(public_key=""):
return hashlib.sha256(public_key)
try:
# Python Crypto library is at: http://www.dlitz.net/software/pycrypto/
# Needed for RIPEMD160 hash function, used to compute
# Bitcoin addresses from internal public keys.
import Crypto.Hash.RIPEMD160 as RIPEMD160
getNewRIPEMD160 = getNewRIPEMD160ByCrypto
except ImportError:
try:
test = getNewRIPEMD160ByHashlib()
getNewRIPEMD160 = getNewRIPEMD160ByHashlib
except ImportError:
print("Can not import RIPEMD160")
try:
# Python Crypto library is at: http://www.dlitz.net/software/pycrypto/
# Needed for RIPEMD160 hash function, used to compute
# Bitcoin addresses from internal public keys.
import Crypto.Hash.SHA256 as SHA256
getNewSHA256 = getNewSHA256ByCrypto
except ImportError:
try:
test = getNewSHA256ByHashlib()
getNewSHA256 = getNewSHA256ByHashlib
except ImportError:
print("Can not import SHA256")
def hash_160(public_key):
if getNewSHA256 == None or getNewRIPEMD160 == None:
return ''
h1 = getNewSHA256(public_key).digest()
h2 = getNewRIPEMD160(h1).digest()
return h2
def public_key_to_bc_address(public_key):
h160 = hash_160(public_key)
return hash_160_to_bc_address(h160)
def hash_160_to_bc_address(h160):
if getNewSHA256 == None:
return ''
vh160 = "\x00"+h160 # \x00 is version 0
h3=getNewSHA256(getNewSHA256(vh160).digest()).digest()
addr=vh160+h3[0:4]
return b58encode(addr)
def bc_address_to_hash_160(addr):
bytes = b58decode(addr, 25)
return bytes[1:21]
if __name__ == '__main__':
x = '005cc87f4a3fdfe3a2346b6953267ca867282630d3f9b78e64'.decode('hex_codec')
encoded = b58encode(x)
print encoded, '19TbMSWwHvnxAKy12iNm3KdbGfzfaMFViT'
print b58decode(encoded, len(x)).encode('hex_codec'), x.encode('hex_codec')
| Unthinkingbit/bitcointools | base58.py | Python | mit | 3,506 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.