repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
zzqcn/wireshark | tools/wireshark_gen.py | Python | gpl-2.0 | 100,934 | 0.003616 | # -*- python -*-
#
# wireshark_gen.py (part of idl2wrs)
#
# Author : Frank Singleton (frank.singleton@ericsson.com)
#
# | Copyright (C) 2001 Frank Singleton, Ericsson Inc.
#
# This file is a backend to "omniidl", used to generate "Wireshark"
# dissectors from CORBA IDL descriptions. The output language generated
# is "C". It will generate code to use the GIOP/IIOP get_CDR_XXX API.
#
# Please see packet-giop.h in Wireshark distro for API description.
# Wireshark is available at h | ttps://www.wireshark.org/
#
# Omniidl is part of the OmniOrb distribution, and is available at
# http://omniorb.sourceforge.net
#
# SPDX-License-Identifier: GPL-2.0-or-later
# Description:
#
# Omniidl Back-end which parses an IDL list of "Operation" nodes
# passed from wireshark_be2.py and generates "C" code for compiling
# as a dissector for Wireshark.
#
#
# Strategy (sneaky but ...)
#
# problem: I dont know what variables to declare until AFTER the helper functions
# have been built, so ...
#
# There are 2 passes through genHelpers, the first one is there just to
# make sure the fn_hash data struct is populated properly.
# The second pass is the real thing, generating code and declaring
# variables (from the 1st pass) properly.
"""Wireshark IDL compiler back-end."""
from __future__ import print_function
import collections
import tempfile
from omniidl import idlast, idltype, idlutil, output
# Output class, generates "C" src code for the sub-dissector
#
# in:
#
#
# self - me
# st - output stream
# node - a reference to an Operations object.
# name - scoped name (Module::Module::Interface:: .. ::Operation
# TODO -- FS
#
# 1. generate hf[] data for searchable fields (but what is searchable?) [done, could be improved]
# 2. add item instead of add_text() [done]
# 3. sequence handling [done]
# 4. User Exceptions [done]
# 5. Fix arrays, and structs containing arrays [done]
# 6. Handle pragmas.
# 7. Exception can be common to many operations, so handle them outside the
# operation helper functions [done]
# 8. Automatic variable declaration [done, improve, still get some collisions.add variable delegator function ]
# For example, mutlidimensional arrays.
# 9. wchar and wstring handling [giop API needs improving]
# 10. Support Fixed [done]
# 11. Support attributes (get/set) [started, needs language mapping option, perhaps wireshark GUI option
# to set the attribute function prefix or suffix ? ] For now the prefix is "_get" and "_set"
# eg: attribute string apple => _get_apple and _set_apple
#
# 12. Implement IDL "union" code [done]
# 13. Implement support for plugins [done]
# 14. Don't generate code for empty operations (cf: exceptions without members)
# 15. Generate code to display Enums numerically and symbolically [done]
# 16. Place structs/unions in subtrees
# 17. Recursive struct and union handling [done]
# 18. Improve variable naming for display (eg: structs, unions etc) [done]
#
# Also test, Test, TEST
# Strategy:
# For every operation and attribute do
# For return val and all parameters do
# find basic IDL type for each parameter
# output get_CDR_xxx
# output exception handling code
# output attribute handling code
class wireshark_gen_C:
# Some string constants for our templates
c_u_octet8 = "guint64 u_octet8;"
c_s_octet8 = "gint64 s_octet8;"
c_u_octet4 = "guint32 u_octet4;"
c_s_octet4 = "gint32 s_octet4;"
c_u_octet2 = "guint16 u_octet2;"
c_s_octet2 = "gint16 s_octet2;"
c_u_octet1 = "guint8 u_octet1;"
c_s_octet1 = "gint8 s_octet1;"
c_float = "gfloat my_float;"
c_double = "gdouble my_double;"
c_seq = "const gchar *seq = NULL;" # pointer to buffer of gchars
c_i = "guint32 i_" # loop index
c_i_lim = "guint32 u_octet4_loop_" # loop limit
c_u_disc = "guint32 disc_u_" # unsigned int union discriminant variable name (enum)
c_s_disc = "gint32 disc_s_" # signed int union discriminant variable name (other cases, except Enum)
def __init__(self, st, protocol_name, dissector_name, description, debug=False, aggressive=False):
self.DEBUG = debug
self.AGGRESSIVE = aggressive
self.st = output.Stream(tempfile.TemporaryFile(mode="w"), 4) # for first pass only
self.st_save = st # where 2nd pass should go
self.protoname = protocol_name # Protocol Name (eg: ECHO)
self.dissname = dissector_name # Dissector name (eg: echo)
self.description = description # Detailed Protocol description (eg: Echo IDL Example)
self.exlist = [] # list of exceptions used in operations.
#self.curr_sname # scoped name of current opnode or exnode I am visiting, used for generating "C" var declares
self.fn_hash = {} # top level hash to contain key = function/exception and val = list of variable declarations
# ie a hash of lists
self.fn_hash_built = 0 # flag to indicate the 1st pass is complete, and the fn_hash is correctly
# populated with operations/vars and exceptions/vars
def genCode(self, oplist, atlist, enlist, stlist, unlist): # operation, attribute, enums, struct and union lists
"""Main entry point, controls sequence of generated code."""
# sneaky .. call it now, to populate the fn_hash
# so when I come to that exception later, I have the variables to
# declare already.
self.genHelpers(oplist, stlist, unlist)
self.genExceptionHelpers(oplist)
self.genAttributeHelpers(atlist)
self.fn_hash_built = 1 # DONE, so now I know , see genOperation()
self.st = self.st_save
self.genHeader() # initial dissector comments
self.genWrsCopyright()
self.genGPL()
self.genIncludes()
self.genPrototype()
self.genProtocol()
self.genDeclares(oplist, atlist, enlist, stlist, unlist)
if len(atlist) > 0:
self.genAtList(atlist) # string constant declares for Attributes
if len(enlist) > 0:
self.genEnList(enlist) # string constant declares for Enums
self.genExceptionHelpers(oplist) # helper function to decode user exceptions that have members
self.genExceptionDelegator(oplist) # finds the helper function to decode a user exception
if len(atlist) > 0:
self.genAttributeHelpers(atlist) # helper function to decode "attributes"
self.genHelpers(oplist, stlist, unlist) # operation, struct and union decode helper functions
self.genMainEntryStart(oplist)
self.genOpDelegator(oplist)
self.genAtDelegator(atlist)
self.genMainEntryEnd()
self.gen_proto_register(oplist, atlist, stlist, unlist)
self.gen_proto_reg_handoff(oplist)
# All the dissectors are now built-in
#self.gen_plugin_register()
#self.dumpvars() # debug
self.genModelines()
def genHeader(self):
"""Generate Standard Wireshark Header Comments"""
self.st.out(self.template_Header, dissector_name=self.dissname)
if self.DEBUG:
print("//XXX genHeader")
def genWrsCopyright(self):
if self.DEBUG:
print("//XXX genWrsCopyright")
self.st.out(self.template_wireshark_copyright)
def genModelines(self):
if self.DEBUG:
print("//XXX genModelines")
self.st.out(self.template_Modelines)
def genGPL(self):
if self.DEBUG:
print("//XXX genGPL")
self.st.out(self.template_GPL)
def genIncludes(self):
if self.DEBUG:
print("//XXX genIncludes")
self.st.out(self.template_Includes)
def genOpDeclares(self, op):
"""" Generate hf variables for operation filters
in: opnode ( an operation node)
"""
if self.DEBUG:
print("//XXX genOpDeclares")
print("//XXX return type = ", op.returnT |
conikuvat/shoottikala | shoottikala/__init__.py | Python | mit | 61 | 0 | default_app | _config = 'shoottikala.apps.ShoottikalaApp | Config'
|
micahflee/securedrop | securedrop/journalist_app/col.py | Python | agpl-3.0 | 3,437 | 0 | # -*- coding: utf-8 -*-
from flask import (Blueprint, redirect, url_for, render_template, flash,
request, abort, send_file, current_app)
from flask_babel import gettext
from | sqlalchemy.orm.exc import NoResultFound
import crypto_util
import store
from db import db_session, Submission
from journalist_app.decorators import login_required
from | journalist_app.forms import ReplyForm
from journalist_app.utils import (make_star_true, make_star_false, get_source,
delete_collection, col_download_unread,
col_download_all, col_star, col_un_star,
col_delete)
def make_blueprint(config):
view = Blueprint('col', __name__)
@view.route('/add_star/<filesystem_id>', methods=('POST',))
@login_required
def add_star(filesystem_id):
make_star_true(filesystem_id)
db_session.commit()
return redirect(url_for('main.index'))
@view.route("/remove_star/<filesystem_id>", methods=('POST',))
@login_required
def remove_star(filesystem_id):
make_star_false(filesystem_id)
db_session.commit()
return redirect(url_for('main.index'))
@view.route('/<filesystem_id>')
@login_required
def col(filesystem_id):
form = ReplyForm()
source = get_source(filesystem_id)
source.has_key = crypto_util.getkey(filesystem_id)
return render_template("col.html", filesystem_id=filesystem_id,
source=source, form=form)
@view.route('/delete/<filesystem_id>', methods=('POST',))
@login_required
def delete_single(filesystem_id):
"""deleting a single collection from its /col page"""
source = get_source(filesystem_id)
delete_collection(filesystem_id)
flash(gettext("{source_name}'s collection deleted")
.format(source_name=source.journalist_designation),
"notification")
return redirect(url_for('main.index'))
@view.route('/process', methods=('POST',))
@login_required
def process():
actions = {'download-unread': col_download_unread,
'download-all': col_download_all, 'star': col_star,
'un-star': col_un_star, 'delete': col_delete}
if 'cols_selected' not in request.form:
flash(gettext('No collections selected.'), 'error')
return redirect(url_for('main.index'))
# getlist is cgi.FieldStorage.getlist
cols_selected = request.form.getlist('cols_selected')
action = request.form['action']
if action not in actions:
return abort(500)
method = actions[action]
return method(cols_selected)
@view.route('/<filesystem_id>/<fn>')
@login_required
def download_single_submission(filesystem_id, fn):
"""Sends a client the contents of a single submission."""
if '..' in fn or fn.startswith('/'):
abort(404)
try:
Submission.query.filter(
Submission.filename == fn).one().downloaded = True
db_session.commit()
except NoResultFound as e:
current_app.logger.error(
"Could not mark " + fn + " as downloaded: %s" % (e,))
return send_file(store.path(filesystem_id, fn),
mimetype="application/pgp-encrypted")
return view
|
thorwhalen/ut | ml/feature_extraction/sequential_var_sets.py | Python | mit | 5,117 | 0.000782 | import itertools
import re
class PVar:
p = re.compile('^(.+)-(\d+)$|^(.+)$')
def __init__(self, var: str, i: int = 0):
self.var = var
self.i = i
def _tuple_for_ordering(self):
return (self.i, self.var)
def __eq__(self, other):
return self._tuple_for_ordering().__eq__(other._tuple_for_ordering())
# return self.var == other.var and self.i == other.i
def __lt__(self, other):
return self._tuple_for_ordering().__lt__(other._tuple_for_ordering())
def __le__(self, other):
return self._tuple_for_ordering().__le__(other._tuple_for_ordering())
def __gt__(self, other):
return self._tuple_for_ordering().__gt__(other._tuple_for_ordering())
def __ge__(self, other):
return self._tuple_for_ordering().__ge__(other._tuple_for_ordering())
@classmethod
def from_(cls, x):
if isinstance(x, cls):
return cls(x.var, x.i) # make a copy of the Key object
elif isinstance(x, tuple):
return cls(*x) # assume it's a (var, i) tuple
elif isinstance(x, str):
return cls.from_str(x)
else:
return cls(*x.__iter__())
def __iter__(self):
return self.var, self.i
def __repr__(self):
return f"{self.__class__.__name__}('{self.var}', {self.i})"
def __hash__(self):
return hash(self.__repr__())
def __str__(self):
if self.i == 0:
return f"{self.var}"
else:
return f"{self.var}{self.i}"
@classmethod
def from_str(cls, s):
s = s.strip()
m = cls.p.match(s)
g = m.groups()
if g[-1] is None:
return cls(var=g[0], i=-int(g[1]))
else:
return cls(var=g[-1])
def __getitem__(self, i):
return PVar(self.var, self.i + i)
def __add__(self, i):
return PVar(self.var, self.i + i)
def __sub__(self, i):
return PVar(self.var, self.i - i)
def __mul__(self, other):
return VarSet([self, other])
class VarSet:
def __init__(self, *varset):
if len(varset) == 1 and isinstance(varset[0], (tuple, list, VarSet)):
varset = varset[0]
varset = list(map(PVar.from_, varset))
self.varset = sorted(varset)
self.min_abs_i = abs(min(x.i for x in self)) # TODO: Not enough: Need to check on upper bound of sliding win
@property
def varset_strs(self):
return list(map(str, self.varset))
def __mul__(self, other):
if isinstance(other, PVar):
return VarSet(self.varset + [other])
else:
return VarSet(self.varset + other)
def __eq__(self, other):
if len(self.varset) != len(other.varset):
return False
else:
for k, kk in zip(self.varset, other.varset):
if k != kk:
return False
return True
def __iter__(self):
return iter(self.varset)
def __repr__(self):
s = ", ".join(map(lambda x: "'{}'".format(x), self.varset))
return f"{self.__class__.__name__}({s})"
def __str__(self):
return "(" + ", ".join(map(str, self.varset)) + ")"
def __hash__(self):
return hash(self.__repr__())
def __getitem__(self, i):
varset = [k[i] for k in self.varset]
return VarSet(varset)
class VarSetFactory:
@staticmethod
def single_dim_markovs(varnames):
return map(lambda v: VarSet([(v, -1), (v, 0)]), varnames)
@staticmethod
def pairs(varnames):
return map(VarSet, itertools.combinations(map(PVar, varnames), 2))
@staticmethod
| def tuples(varnames, tuple_size: int = 2):
return map(VarSet, itertools.combinations(map(PVar, varnames), tuple_size))
@staticmethod
def markov_pairs(varnames):
return map(lambda v: VarSet(PVar(v[0], -1), PVa | r(v[1], 0)), itertools.product(varnames, varnames))
@staticmethod
def from_str(s):
return VarSet(*map(PVar.from_str, list(s[1:-1].split(','))))
# @classmethod
# def pairs(cls, vars):
# return list(itertools.combinations(vars, 2))
def extract_kps(df, kps):
# keep only elements of kps that have columns for them
cols = set(df.columns)
_kps = list()
for k in kps:
if k.var in cols:
_kps.append(k)
for i in range(len(df)):
if i >= kps.min_abs_i:
yield tuple(df[k.var].iloc[i + k.i] for k in _kps)
class DfData:
def __init__(self, df):
self.df = df
def __getitem__(self, k):
if isinstance(k, PVar):
return self.df[k.var].iloc[k.i]
elif isinstance(k, VarSet):
return tuple(map(self.__getitem__, k))
def extract_with_key_pattern_sets(self, kps_list):
for i in range(len(self.df)):
for kps in kps_list:
if i >= kps.min_abs_i:
yield kps, self[kps[i]]
def extract_kps(self, kps):
for i in range(len(self.df)):
if i >= kps.min_abs_i:
yield self[kps[i]]
|
rasata/ansible | test/units/vars/test_variable_manager.py | Python | gpl-3.0 | 10,563 | 0.002083 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from collections import defaultdict
from six import iteritems
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, MagicMock
from ansible.inventory import Inventory
from ansible.playbook.play import Play
from ansible.vars import VariableManager
from units.mock.loader import DictDataLoader
class TestVariableManager(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_basic_manager(self):
fake_loader = DictDataLoader({})
v = VariableManager()
vars = v.get_vars(loader=fake_loader, use_cache=False)
if 'omit' in vars:
del vars['omit']
if 'vars' in vars:
del vars['vars']
if 'ansible_version' in vars:
del vars['ansible_version']
self.assertEqual(vars, dict(playbook_dir='.'))
def test_variable_manager_extra_vars(self):
fake_loader = DictDataLoader({})
extra_vars = dict(a=1, b=2, c=3)
v = VariableManager()
v.extra_vars = extra_vars
vars = v.get_vars(loader=fake_loader, use_cache=False)
for (key, val) in iteritems(extra_vars):
self.assertEqual(vars.get(key), val)
self.assertIsNot(v.extra_vars, extra_vars)
def test_variable_manager_host_vars_file(self):
fake_loader = DictDataLoader({
"host_vars/hostname1.yml": """
foo: bar
""",
"other_path/host_vars/hostname1.yml": """
foo: bam
baa: bat
""",
})
v = VariableManager()
v.add_host_vars_file("host_vars/hostname1.yml", loader=fake_loader)
v.add_host_vars_file("other_path/host_vars/hostname1.yml", loader=fake_loader)
self.assertIn("hostname1", v._host_vars_files)
self.assertEqual(v._host_vars_files["hostname1"], [dict(foo="bar"), dict(foo="bam", baa="bat")])
mock_host = MagicMock()
mock_host.get_name.return_value = "hostname1"
mock_host.get_vars.return_value = dict()
mock_host.get_groups.return_value = ()
mock_host.get_group_vars.return_value = dict()
self.assertEqual(v.get_vars(loader=fake_loader, host=mock_host, use_cache=False).get("foo"), "bam")
self.assertEqual(v.get_vars(loader=fake_loader, host=mock_host, use_cache=False).get("baa"), "bat")
def test_variable_manager_group_vars_file(self):
fake_loader = DictDataLoader({
"group_vars/all.yml": """
foo: bar
""",
"group_vars/somegroup.yml": """
bam: baz
""",
"other_path/group_vars/somegroup.yml": """
baa: bat
"""
})
v = VariableManager()
v.add_group_vars_file("group_vars/all.yml", loader=fake_loader)
v.add_group_vars_file("group_vars/somegroup.yml", loader=fake_loader)
v.add_group_vars_file("other_path/group_vars/somegroup.yml", loader=fake_loader)
self.assertIn("somegroup", v._group_vars_files)
self.assertEqual(v._group_vars_files["all"], [dict(foo="bar")])
self.assertEqual(v._group_vars_files["somegroup"], [dict(bam="baz"), dict(baa="bat")])
mock_group = MagicMock()
mock_group.name = "somegroup"
mock_group.get_ancestors.return_value = ()
mock_group.get_vars.return_value = dict()
mock_host = MagicMock()
mock_host.get_name.return_value = "hostname1"
mock_host.get_vars.return_value = dict()
mock_host.get_groups.return_value = (mock_group,)
mock_host.get_group_vars.return_value = dict()
vars = v.get_vars(loader=fake_loader, host=mock_host, use_cache=False)
self.assertEqual(vars.get("foo"), "bar")
self.assertEqual(vars.get("baa"), "bat")
def test_variable_manager_play_vars(self):
fake_loader = DictDataLoader({})
mock_play = MagicMock()
mock_play.get_vars.return_value = dict(foo="bar")
mock_play.get_roles.return_value = []
mock_play.get_vars_files.return_value = []
v = VariableManager()
self.assertEqual(v.get_vars(loader=fake_loader, play=mock_play, use_cache=False).get("foo"), "bar")
def test_variable_manager_play_vars_files(self):
fake_loader = DictDataLoader({
"/path/to/somefile.yml": """
foo: bar
"""
})
mock_play = MagicMock()
mock_play.get_vars.return_value = dict()
mock_play.get_roles.return_value = []
mock_play.get_vars_files.return_value = ['/path/to/somefile.yml']
v = VariableManager()
self.assertEqual(v.get_vars(loader=fake_loader, play=mock_play, use_cache=False).get("foo"), "bar")
def test_variable_manager_task_vars(self):
fake_loader = DictDataLoader({})
mock_task = MagicMock()
mock_task._role = None
mock_task.get_vars.return_value = dict(foo="bar")
v = VariableManager()
self.assertEqual(v.get_vars(loader=fake_loader, task=mock_task, use_cache=False).get("foo"), "bar")
def test_variable_manager_precedence(self):
'''
Tests complex variations and combinations of get_vars( | ) with different
objects to modify the context under which variables are merged.
'''
v = VariableManager()
v._fact_cache = defaultdict(dict)
fake_loader = DictDataLoader({
# inventory1
'/etc/ansible/inventory1': """
[group2:children]
group1
[group1]
host1 host_var=host_var_from_inventory_host1
[group1:vars]
| group_var = group_var_from_inventory_group1
[group2:vars]
group_var = group_var_from_inventory_group2
""",
# role defaults_only1
'/etc/ansible/roles/defaults_only1/defaults/main.yml': """
default_var: "default_var_from_defaults_only1"
host_var: "host_var_from_defaults_only1"
group_var: "group_var_from_defaults_only1"
group_var_all: "group_var_all_from_defaults_only1"
extra_var: "extra_var_from_defaults_only1"
""",
'/etc/ansible/roles/defaults_only1/tasks/main.yml': """
- debug: msg="here i am"
""",
# role defaults_only2
'/etc/ansible/roles/defaults_only2/defaults/main.yml': """
default_var: "default_var_from_defaults_only2"
host_var: "host_var_from_defaults_only2"
group_var: "group_var_from_defaults_only2"
group_var_all: "group_var_all_from_defaults_only2"
extra_var: "extra_var_from_defaults_only2"
""",
})
inv1 = Inventory(loader=fake_loader, variable_manager=v, host_list='/etc/ansible/inventory1')
inv1.set_playbook_basedir('./')
play1 = Play.load(dict(
hosts=['all'],
roles=['defaults_only1', 'defaults_only2'],
), loader=fake_loader, variable_manager=v)
# first we assert that the defaults as viewed as a whole are the merged results
# of the defaults from each role, with the last role defined "winning" when
# there is a variable naming conflict
res = v.ge |
deepnn/coffee | covfefe/frameworks/torch/objectives.py | Python | mit | 1,703 | 0.014093 | from __future__ import absolute_import
from __future__ import print_function
import torch.nn as nn
def l1_loss(size_ave=True):
return nn.L1Loss(size_average=size_ave)
def mse_loss(size_ave=True):
return nn.MSELoss(size_average=size_ave)
def ce_loss(loss_weight=None, size_ave=True):
return nn.CrossEntropyLoss(weight=loss_weight,size_average=size_ave)
def log_loss(loss_weight=None, size_ave=True, dim=2):
if dim == 1:
return nn.NLLLoss(weight=loss_weight,size_average=size_ave)
elif dim == 2:
return nn.NLLLoss2d(weight=loss_weight,size_average=size_ave)
def kldiv_loss(loss_weight=None, size_ave=True):
return nn.KLDivLoss(weight=loss_weight,size_average=size_ave)
def bce_loss(loss_weight=None, size_ave=True):
return nn.BCELoss(weight=loss_weight,size_average=size_ave)
def mr_loss(margin=0, size_ave=True):
return nn.MarginRankingLoss(margin=margin,size_average=size_ave)
def he_loss(size_ave=True):
return nn.HingeEmbeddingLoss(size_average=size_ave)
def mlm_loss(size_ave=True):
return nn.MultiLabelMarginLoss(si | ze_average=size_ave)
def smoothl1_loss(size_ave=True):
return nn.SmoothL1Loss(size_average=size_ave)
def sm_loss(size_ave=True):
return nn.SoftMarginLoss(size_average=size_ave)
def mlsm_loss(loss_weight=None, size_ave=True):
return nn.MultiLabelSoftMarginLoss(weight=loss_weight,size_average=size_ave)
def cosem_loss(margin=0, size_ave=True):
return nn.CosineEmbeddingLoss(margin=margin, size_average=size_ave)
def mm_loss(p=1, margin=1, loss_weight=None, size | _ave=True):
return nn.MultiMarginLoss(p=p, margin=margin,
weight=loss_weight,size_average=size_ave)
|
IIITS/iiits.ac.in | iiits/migrations/0026_auto_20160426_1232.py | Python | mit | 1,265 | 0.003162 | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-04-26 12:32
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('iiits', '0025_auto_20160425_1937'),
]
operations = [
migrations.CreateModel(
name='Staff',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.Char | Field(max_length=100)),
('photo', models.ImageField | (upload_to='iiits/static/iiits/images/staff')),
('email', models.EmailField(max_length=254)),
],
),
migrations.CreateModel(
name='StaffDesignation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
],
),
migrations.AddField(
model_name='staff',
name='designation',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='iiits.StaffDesignation'),
),
]
|
mviitanen/marsmod | mcp/runtime/getchangedsrc.py | Python | gpl-2.0 | 1,420 | 0.002817 | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 3 02:10:23 2011
@author: IxxI
@version: v1.0
"""
import sys
import logging
from optparse import OptionParser
from commands import Commands, CLIENT, SERVER
from mcp import getchangedsrc_side
def main():
parser = OptionParser(version='MCP %s' % Commands.fullversion())
parser.add_option('--client', dest='only_client', action='store_true', help='only process client', default=False)
parser.add_option('--server', dest='only_server', action='store_true', help='only process server', default=False)
parser.add_option('-c', '--config', dest='config | ', help='additional configuration file')
options, _ = parser.parse_args()
getchangedsrc(options.config, options.only_client, options.only_server)
def getchangedsrc(co | nffile, only_client, only_server):
try:
commands = Commands(conffile)
# client or server
process_client = True
process_server = True
if only_client and not only_server:
process_server = False
if only_server and not only_client:
process_client = False
if process_client:
getchangedsrc_side(commands, CLIENT)
if process_server:
getchangedsrc_side(commands, SERVER)
except Exception: # pylint: disable-msg=W0703
logging.exception('FATAL ERROR')
sys.exit(1)
if __name__ == '__main__':
main()
|
goll/flask_app | provisioning/files/wsgi.py | Python | mit | 109 | 0 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
from | dr import app
if __name | __ == '__main__':
app.run()
|
RRCKI/pilot | HPC/HPCJob.py | Python | apache-2.0 | 1,933 | 0.005691 |
import argparse
import logging
import os
import sys
from mpi4py import MPI
from pandayoda.yodacore import Yoda
from pandayoda.yodaexe import Droid
import logging
logging.basicConfig(level=logging.DEBUG)
def main(globalWorkDir, localWorkDir):
comm = | MPI.COMM_WORLD
mpirank = comm.Get_rank()
# Create separate working directory for each rank
| from os.path import abspath as _abspath, join as _join
curdir = _abspath (localWorkDir)
wkdirname = "rank_%s" % str(mpirank)
wkdir = _abspath (_join(curdir,wkdirname))
if not os.path.exists(wkdir):
os.makedirs (wkdir)
os.chdir (wkdir)
if mpirank==0:
yoda = Yoda.Yoda(globalWorkDir, localWorkDir)
yoda.run()
else:
droid = Droid.Droid(globalWorkDir, localWorkDir)
droid.run()
if __name__ == "__main__":
usage = """
usage: %(prog)s <command> [options] [args]
Commands:
help <command> Output help for one of the commands below
"""
oparser = argparse.ArgumentParser(prog=os.path.basename(sys.argv[0]), add_help=True)
oparser.add_argument('--globalWorkingDir', dest="globalWorkingDir", default=None, help="Global share working directory")
oparser.add_argument('--localWorkingDir', dest="localWorkingDir", default=None, help="Local working directory. if it's not set, it will use global working directory")
if len(sys.argv) == 1:
oparser.print_help()
sys.exit(-1)
args = oparser.parse_args(sys.argv[1:])
if args.globalWorkingDir is None:
print "Global working directory is needed."
oparser.print_help()
sys.exit(-1)
if args.localWorkingDir is None:
args.localWorkingDir = args.globalWorkingDir
try:
main(args.globalWorkingDir, args.localWorkingDir)
print "HPCJob-Yoda success"
sys.exit(0)
except Exception as e:
print "HPCJob-Yoda failed"
print(e)
sys.exit(1)
|
saurabh6790/frappe | frappe/desk/form/save.py | Python | mit | 1,676 | 0.03043 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe, json
from frappe.desk.form.load import run_onload
@frappe.whitelist()
def savedocs(doc, action):
"""save / submit / update doclist"""
try:
doc = frappe.get_doc(json.loads(doc))
set_local_name(doc)
# action
doc.docstatus = {"Save":0, "Submit": 1, "Update": 1, "Cancel": 2}[action]
if doc.docstatus==1:
doc.submit()
else:
doc.save()
# update recent documents
run_onload(doc)
send_updated_docs(doc)
frappe.msgprint(frappe._("Saved"), indicator='green', alert=True)
except Exception:
frappe.errprint(frappe.utils.get_traceback())
raise
@frappe.whitelist()
def cancel(doctype=None, name=None, workflow_state_fieldname=None, workflow_state=No | ne):
"""cancel a doclist"""
try:
doc = frappe.get_doc(doctype, name)
if workflow_state_fieldname and workflow_state:
doc.set(workflow_state_field | name, workflow_state)
doc.cancel()
send_updated_docs(doc)
frappe.msgprint(frappe._("Cancelled"), indicator='red', alert=True)
except Exception:
frappe.errprint(frappe.utils.get_traceback())
raise
def send_updated_docs(doc):
from .load import get_docinfo
get_docinfo(doc)
d = doc.as_dict()
if hasattr(doc, 'localname'):
d["localname"] = doc.localname
frappe.response.docs.append(d)
def set_local_name(doc):
def _set_local_name(d):
if doc.get('__islocal') or d.get('__islocal'):
d.localname = d.name
d.name = None
_set_local_name(doc)
for child in doc.get_all_children():
_set_local_name(child)
if doc.get("__newname"):
doc.name = doc.get("__newname")
|
onespacemedia/cms-redirects | redirects/migrations/0002_auto_20160805_1654.py | Python | mit | 907 | 0.002205 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration | ):
dependencies = [
('redirects', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='redirect',
name= | 'regular_expression',
field=models.BooleanField(default=False, help_text=b"This will allow using regular expressions to match and replace patterns in URLs. See the <a href='https://docs.python.org/2/library/re.html' target='_blank'>Python regular expression documentation for details."),
),
migrations.AddField(
model_name='redirect',
name='test_path',
field=models.CharField(help_text=b'You will need to specify a test path to ensure your regular expression is valid.', max_length=200, null=True, blank=True),
),
]
|
denisenkom/django-sqlserver | tests/select_related_onetoone/tests.py | Python | mit | 11,118 | 0.001979 | from __future__ import unicode_literals
import django
from django.core.exceptions import FieldError
from django.test import SimpleTestCase, TestCase
from .models import (
AdvancedUserStat, Child1, Child2, Child3, Child4, Image, LinkedList,
Parent1, Parent2, Product, StatDetails, User, UserProfile, UserStat,
UserStatResult,
)
class ReverseSelectRelatedTestCase(TestCase):
def setUp(self):
user = User.objects.create(username="test")
UserProfile.objects.create(user=user, state="KS", city="Lawrence")
results = UserStatResult.objects.create(results='first results')
userstat = UserStat.objects.create(user=user, posts=150,
results=results)
StatDetails.objects.create(base_stats=userstat, comments=259)
user2 = User.objects.create(username="bob")
results2 = UserStatResult.objects.create(results='moar results')
advstat = AdvancedUserStat.objects.create(user=user2, posts=200, karma=5,
results=results2)
StatDetails.objects.create(base_stats=advstat, comments=250)
p1 = Parent1(name1="Only Parent1")
p1.save()
c1 = Child1(name1="Child1 Parent1", name2="Child1 Parent2", value=1)
c1.save()
p2 = Parent2(name2="Child2 Parent2")
p2.save()
c2 = Child2(name1="Child2 Parent1", parent2=p2, value=2)
c2.save()
def test_basic(self):
with self.assertNumQueries(1):
u = User.objects.select_related("userprofile").get(username="test")
self.assertEqual(u.userprofile.state, "KS")
def test_follow_next_level(self):
with self.assertNumQueries(1):
u = User.objects.select_related("userstat__results").get(username="test")
self.assertEqual(u.userstat.posts, 150)
self.assertEqual(u.userstat.results.results, 'first results')
def test_follow_two(self):
with self.assertNumQueries(1):
u = User.objects.select_related("userprofile", "userstat").get(username="test")
self.assertEqual(u.userprofile.state, "KS")
self.assertEqual(u.userstat.posts, 150)
def test_follow_two_next_level(self):
with self.assertNumQueries(1):
u = User.objects.select_related("userstat__results", "userstat__statdetails").get(username="test")
self.assertEqual(u.userstat.results.results, 'first results')
self.assertEqual(u.userstat.statdetails.comments, 259)
def test_forward_and_back(self):
with self.assertNumQueries(1):
stat = UserStat.objects.select_related("user__userprofile").get(user__username="test")
self.assertEqual(stat.user.userprofile.state, 'KS')
self.assertEqual(stat.user.userstat.posts, 150)
def test_back_and_forward(self):
with self.assertNumQueries(1):
u = User.objects.select_related("userstat").get(username="test")
self.assertEqual(u.userstat.user.username, 'test')
def test_not_followed_by_default(self):
with self.assertNumQueries(2):
u = User.objects.select_related().get(username="test")
self.assertEqual(u.userstat.posts, 150)
def test_follow_from_child_class(self):
with self.assertNumQueries(1):
stat = AdvancedUserStat.objects.select_related('user', 'statdetails').get(posts=200)
self.assertEqual(stat.statdetails.comments, 250)
self.assertEqual(st | at.user.username, 'bob')
def test_follow_inheritance(self):
with self.assertNumQueries(1):
stat = UserStat.objects.select_related('user', 'advanceduserstat').get(posts=200)
self.assertEqual(stat.advanceduserstat.posts, 200)
self.assertEqual(stat.user.username, 'bob')
with self.assertNumQueries(1):
self.assertEqual(stat.advanceduserstat.user.username, 'bob')
def test_nullab | le_relation(self):
im = Image.objects.create(name="imag1")
p1 = Product.objects.create(name="Django Plushie", image=im)
p2 = Product.objects.create(name="Talking Django Plushie")
with self.assertNumQueries(1):
result = sorted(Product.objects.select_related("image"), key=lambda x: x.name)
self.assertEqual([p.name for p in result], ["Django Plushie", "Talking Django Plushie"])
self.assertEqual(p1.image, im)
# Check for ticket #13839
self.assertIsNone(p2.image)
def test_missing_reverse(self):
"""
Ticket #13839: select_related() should NOT cache None
for missing objects on a reverse 1-1 relation.
"""
with self.assertNumQueries(1):
user = User.objects.select_related('userprofile').get(username='bob')
with self.assertRaises(UserProfile.DoesNotExist):
user.userprofile
def test_nullable_missing_reverse(self):
"""
Ticket #13839: select_related() should NOT cache None
for missing objects on a reverse 0-1 relation.
"""
Image.objects.create(name="imag1")
with self.assertNumQueries(1):
image = Image.objects.select_related('product').get()
with self.assertRaises(Product.DoesNotExist):
image.product
def test_parent_only(self):
with self.assertNumQueries(1):
p = Parent1.objects.select_related('child1').get(name1="Only Parent1")
with self.assertNumQueries(0):
with self.assertRaises(Child1.DoesNotExist):
p.child1
def test_multiple_subclass(self):
with self.assertNumQueries(1):
p = Parent1.objects.select_related('child1').get(name1="Child1 Parent1")
self.assertEqual(p.child1.name2, 'Child1 Parent2')
def test_onetoone_with_subclass(self):
with self.assertNumQueries(1):
p = Parent2.objects.select_related('child2').get(name2="Child2 Parent2")
self.assertEqual(p.child2.name1, 'Child2 Parent1')
def test_onetoone_with_two_subclasses(self):
with self.assertNumQueries(1):
p = Parent2.objects.select_related('child2', "child2__child3").get(name2="Child2 Parent2")
self.assertEqual(p.child2.name1, 'Child2 Parent1')
with self.assertRaises(Child3.DoesNotExist):
p.child2.child3
p3 = Parent2(name2="Child3 Parent2")
p3.save()
c2 = Child3(name1="Child3 Parent1", parent2=p3, value=2, value3=3)
c2.save()
with self.assertNumQueries(1):
p = Parent2.objects.select_related('child2', "child2__child3").get(name2="Child3 Parent2")
self.assertEqual(p.child2.name1, 'Child3 Parent1')
self.assertEqual(p.child2.child3.value3, 3)
self.assertEqual(p.child2.child3.value, p.child2.value)
self.assertEqual(p.child2.name1, p.child2.child3.name1)
def test_multiinheritance_two_subclasses(self):
with self.assertNumQueries(1):
p = Parent1.objects.select_related('child1', 'child1__child4').get(name1="Child1 Parent1")
self.assertEqual(p.child1.name2, 'Child1 Parent2')
self.assertEqual(p.child1.name1, p.name1)
with self.assertRaises(Child4.DoesNotExist):
p.child1.child4
Child4(name1='n1', name2='n2', value=1, value4=4).save()
with self.assertNumQueries(1):
p = Parent2.objects.select_related('child1', 'child1__child4').get(name2="n2")
self.assertEqual(p.name2, 'n2')
self.assertEqual(p.child1.name1, 'n1')
self.assertEqual(p.child1.name2, p.name2)
self.assertEqual(p.child1.value, 1)
self.assertEqual(p.child1.child4.name1, p.child1.name1)
self.assertEqual(p.child1.child4.name2, p.child1.name2)
self.assertEqual(p.child1.child4.value, p.child1.value)
self.assertEqual(p.child1.child4.value4, 4)
def test_inheritance_deferred(self):
if django.VERSION < (1, 10, 0):
self.skipTest('does not work on older |
wschoenell/chimera_imported_googlecode | src/chimera/core/tests/test_log.py | Python | gpl-2.0 | 1,086 | 0.007366 |
from chimera.core.chimeraobject import ChimeraObject
from chimera.core.manager import Manager
from chimera.core.exceptions import ChimeraException
from nose.tools import assert_raises
import chimera.core.log
import logging
log = logging.getLogger("chimera.test_log")
class TestLog (object):
def test_log (self):
class Simple (ChimeraObject):
def __init__ (self):
ChimeraObject.__init__(self)
def answer (self):
try:
raise ChimeraException("I'm an Exception, sorry.")
except ChimeraException:
| self.log.exception("from except: wow, exception caught.")
raise ChimeraException("I'm a new Exception, sorry again")
manager = Manager()
manager.addClass(Simple, "simple")
simple = ma | nager.getProxy(Simple)
try:
simple.answer()
except ChimeraException, e:
assert e.cause != None
log.exception("wow, something wrong")
manager.shutdown()
|
google-research/tiny-differentiable-simulator | python/examples/whole_body_control/torque_stance_leg_controller.py | Python | apache-2.0 | 10,138 | 0.011442 | # Lint as: python3
"""A torque based stance controller framework."""
from __future__ import absolute_import
from __future__ import division
#from __future__ import google_type_annotations
from __future__ import print_function
import os
import sys
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0, parentdir)
use_cpp_mpc = True
import numpy as np
import os
import glob
import random
import matplotlib.pyplot as plt
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--input", type=str, default="mpc_20201028-164829_mpc_inputs.npy")
parser.add_argument("--output", type=str, default="mpc_20201028-164829_torques.npy")
args = parser.parse_args()
if use_cpp_mpc:
try:
import mpc_osqp as convex_mpc # pytype: disable=import-error
except: #pylint: disable=W0702
print("You need to install motion_imitation")
print("Either run python3 setup.py install --user in this repo")
print("or use pip3 install motion_imitation --user")
sys.exit()
else:
import torch
model = torch.load("mpc_ffn_model.pt", map_location='cpu')
model.to('cpu')
model.double()
model.eval()
input_file = args.input
output_file = args.output
loss_fn = torch.nn.MSELoss()
loss_arr = []
from typing import Any, Sequence, Tuple
import numpy as np
import pybullet as p # pytype: disable=import-error
try:
import gait_generator as gait_generator_lib
import leg_controller
except: #pylint: disable=W0702
print("You need to install motion_imitation")
print("Either run python3 setup.py install --user in this repo")
print("or use pip3 install motion_imitation --user")
sys.exit()
_FORCE_DIMENSION = 3
# The QP weights in the convex MPC formulation. See the MIT paper for details:
# https://ieeexplore.ieee.org/document/8594448/
# Intuitively, this is the weights of each state dimension when tracking a
# desired CoM trajectory. The full CoM state is represented by
# (roll_pitch_yaw, position, angular_velocity, velocity, gravity_place_holder).
# _MPC_WEIGHTS = (5, 5, 0.2, 0, 0, 10, 0.5, 0.5, 0.2, 0.2, 0.2, 0.1, 0)
# This worked well for in-place stepping in the real robot.
# _MPC_WEIGHTS = (5, 5, 0.2, 0, 0, 10, 0., 0., 0.2, 1., 1., 0., 0)
_MPC_WEIGHTS = (5, 5, 0.2, 0, 0, 10, 0., 0., 1., 1., 1., 0., 0)
_PLANNING_HORIZON_STEPS = 10
_PLANNING_TIMESTEP = 0.025
class TorqueStanceLegController(leg_controller.LegController):
"""A torque based stance leg controller framework.
Takes in high level parameters like walking speed and turning speed, and
generates necessary the torques for stance legs.
"""
def __init__(
self,
robot: Any,
gait_generator: Any,
state_estimator: Any,
desired_speed: Tuple[float, float] = (0, 0),
desired_twisting_speed: float = 0,
desired_body_height: float = 0.45,
body_mass: float = 220 / 9.8,
body_inertia: Tuple[float, float, float, float, float, float, float,
float, float] = (0.07335, 0, 0, 0, 0.25068, 0, 0, 0,
0.25447),
num_legs: int = 4,
friction_coeffs: Sequence[float] = (0.45, 0.45, 0.45, 0.45),
):
"""Initializes the class.
Tracks the desired position/velocity of the robot by computing proper joint
torques using MPC module.
Args:
robot: A robot instance.
gait_generator: Used to query the locomotion phase and leg states.
state_estimator: Estimate the robot states (e.g. CoM velocity).
desired_speed: desired CoM speed in x-y plane.
desired_twisting_speed: desired CoM rotating speed in z direction.
desired_body_height: The standing height of the robot.
body_mass: The total mass of the robot.
body_inertia: The inertia matrix in the body principle frame. We assume
the body principle coordinate frame has x-forward and z-up.
num_legs: The number of legs used for force planning.
friction_coeffs: The friction coeffs on the contact surfaces.
"""
self.torques=[]
self.mpc_inputs=[]
self._robot = robot
self._gait_generator = gait_generator
self._state_estimator = state_estimator
self.desired_speed = desired_speed
self.desired_twisting_speed = desired_twisting_speed
self._desired_body_height = desired_body_height
self._body_mass = body_mass
self._num_legs = num_legs
self._friction_coeffs = np.array(friction_coeffs)
body_inertia_list = list(body_inertia)
weights_list = list(_MPC_WEIGHTS)
alpha = 1e-5
self._cpp_mpc = convex_mpc.ConvexMpc(
body_mass,
body_inertia_list,
self._num_legs,
_PLANNING_HORIZON_STEPS,
_PLANNING_TIMESTEP,
weights_list,
alpha,
convex_mpc.QPOASES
)
def reset(self, current_time):
del current_time
def update(self, current_time):
del current_time
def get_action(self):
"""Computes the torque for stance legs."""
desired_com_position = np.array((0., 0., self._desired_body_height),
dtype=np.float64)
desired_com_velocity = np.array(
(self.desired_speed[0], self.desired_speed[1], 0.), dtype=np.float64)
desired_com_roll_pitch_yaw = np.array((0., 0., 0.), dtype=np.float64)
desired_com_angular_velocity = np.array(
(0., 0., self.desired_twisting_speed), dtype=np.float64)
foot_contact_state = np.array(
[(leg_state in (gait_generator_lib.LegState.STANCE,
gait_generator_lib.LegState.EARLY_CONTACT))
for leg_state in self._gait_generator.desired_leg_state],
dtype=np.int32)
# We use the body yaw aligned world frame for MPC computation.
com_roll_pitch_yaw = np.array(self._robot.GetBaseRollPitchYaw(),
dtype=np.float64)
com_roll_pitch_yaw[2] = 0
#predicted_contact_forces=[0]*self._num_legs*_FORCE_DIMENSION
# print("Com Vel: {}".format(self._state_estimator.com_velocity_body_frame))
# print("Com RPY: {}".format(self._robot.GetBaseRollPitchYawRate()))
# print("Com RPY Rate: {}".format(self._robot.GetBaseRollPitchYawRate()))
#p.submitProfileTiming("predicted_contact_forces")
mpc_input = []
com_vel = np.asarray(self._state_estimator.com_velocity_body_frame,dtype=np.float64)
com_roll_pitch_yaw = np.array(com_roll_pitch_yaw, dtype=np.float64)
com_angular_velocity = np.asarray(self._robot.GetBaseRollPitchYawRate(),dtype=np.float64)
foot_positions_base_frame = np.array(self._robot.GetFootPositionsInBaseFrame().flatten(),dtype=np.float64)
mpc_input.append(com_vel.flatten())
mpc_input.append(com_roll_pitch_yaw.flatten())
mpc_input.append(com_angular_velocity.flatten())
mpc_input.append(foot_contact_state.flatten())
mpc_input.append(foot_positions_base_frame.flatten())
mpc_input.append(self._friction_coeffs.flatten())
mpc_input.append(desired_com_position.flatten())
mpc_input.append(desired_com_velocity.flatten())
mpc_input.append(desired_com_roll_pitch_yaw.flatten())
mpc_input.append(desired_com_angular_velocity.flatten())
mpc_input = np.array(mpc_input).flatten()
self.mpc_inputs.append(mpc_input)
if use_cpp_mpc:
#print("cpp_mpc.compute")
#p.submitProfileTiming("cpp_mpc.compute")
predicted_contact_forces = self._cpp_mpc.compute_contact_forces(
[0], #com_position
com_vel, #com_velocity
com_roll_pitch_yaw , #com_roll_pitch_yaw
# Angular velocity in the yaw aligned world frame is actually different
# from rpy rate. We use it here as a simple approximation.
com_angular_velocity, #com_angular_velocity
foot_contact_state, #foot_contact_states
foot_positions_base_frame, | #foot_positions_base_frame
self._friction_coeffs, #foot_friction_coeffs
desired_com_position, #desired_com_position
desired_com_velocity, #desired_com_velocity
desired_com | _roll_pitch_yaw, #desired_com_roll_pitch_yaw
|
DLR-SC/prov2bigchaindb | prov2bigchaindb/version.py | Python | apache-2.0 | 48 | 0 | __version__ | = '0.4.1'
__short_version__ = ' | 0.4'
|
intuition-io/insights | insights/sources/live/currencies.py | Python | apache-2.0 | 1,793 | 0.000558 | # -*- coding: utf-8 -*-
# vim:fenc=utf-8
'''
Insights Forex live source
--------------------------
:copyright (c) 2014 Xavier Bruhiere
:license: Apache 2.0, see LICENSE for more details.
'''
import time
import pandas as pd
import dna.logging
import intuition.data.forex as forex
log = dna.logging.logger(__name__)
class Forex(object):
'''
At each event datetime of the provided index, ForexLiveSource fetchs live
forex data from TrueFX.
'''
def __init__(self, pairs, properties):
self._wait_retry = properties. | get('retry', 10)
self.forex = forex.TrueFX(pairs=pairs)
self.forex.connect()
def get_data(self, sids):
| while True:
rates = self.forex.query_rates()
if len(rates.keys()) >= len(sids):
log.debug('Data available for {}'.format(rates.keys()))
break
log.debug('Incomplete data ({}/{}), retrying in {}s'.format(
len(rates.keys()), len(sids), self._wait_retry))
time.sleep(self._wait_retry)
debug_feedback = self.forex.connect()
log.info('New Truefx connection: {}'.format(debug_feedback))
return rates
@property
def mapping(self):
return {
'dt': (lambda x: x, 'dt'),
#TODO Here conversion (weird result for now)
# Or: (lambda x: pd.tslib.i8_to_pydt(x + '000000'), 'trade_time'),
'trade_time': (lambda x: pd.datetime.utcfromtimestamp(
float(x[:-3])), 'timeStamp'),
'sid': (lambda x: x, 'sid'),
'price': (float, 'bid'),
'ask': (float, 'ask'),
'high': (float, 'high'),
'low': (float, 'low'),
'volume': (lambda x: 10000, 'bid')
}
|
PXke/invenio | invenio/legacy/bibauthorid/templates.py | Python | gpl-2.0 | 91,185 | 0.007052 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2011, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Bibauthorid HTML templates"""
# pylint: disable=W0105
# pylint: disable=C0301
from flask import url_for
#from cgi import escape
#from urllib import quote
#
import invenio.legacy.bibauthorid.config as bconfig
from invenio.config import CFG_SITE_LANG
from invenio.config import CFG_SITE_URL
from invenio.config import CFG_BIBAUTHORID_AUTHOR_TICKET_ADMIN_EMAIL
from invenio.modules.formatter import format_record
from invenio.legacy.bibrecord import get_fieldvalues
from invenio.legacy.bibauthorid.config import EXTERNAL_SYSTEMS_LIST
from invenio.legacy.bibauthorid.webapi import get_person_redirect_link, get_canonical_id_from_person_id, get_person_names_from_id
from invenio.legacy.bibauthorid.webapi import get_personiID_external_ids
from invenio.legacy.bibauthorid.frontinterface import get_uid_from_personid
from invenio.legacy.bibauthorid.frontinterface import get_bibrefrec_name_string
from invenio.legacy.bibauthorid.frontinterface import get_canonical_id_from_personid
from invenio.base.i18n import gettext_set_language, wash_language
from invenio.legacy.webuser import get_email
from invenio.utils.html import escape_html
#from invenio.utils.text import encode_for_xml
from flask import session
class Template:
"""Templating functions used by aid"""
def __init__(self, language=CFG_SITE_LANG):
"""Set defaults for all aid template output"""
self.language = language
self._ = gettext_set_language(wash_language(language))
def tmpl_person_detail_layout(self, content):
'''
writes HTML content into the person css container
@param content: HTML content
@type content: string
@return: HTML code
@rtype: string
'''
html = []
h = html.append
h('<div id="aid_person">')
h(content)
h('</div>')
return "\n".join(html)
def tmpl_transaction_box(self, teaser_key, messages, show_close_btn=True):
'''
Creates a notification box based on the jQuery UI style
@param teaser_key: key to a dict which returns the teaser
@type teaser_key: string
@param messages: list of keys to a dict which return the message to display in the box
@type messages: list of strings
@param show_close_btn: display close button [x]
@type show_close_btn: boolean
@return: HTML code
@rtype: string
'''
transaction_teaser_dict = { 'success': 'Success!',
'failure': 'Failure!' }
transaction_message_dict = { 'confirm_success': '%s transaction%s successfully executed.',
'confirm_failure': '%s transaction%s failed. The system may have been updating during your operation. Please try again or contact %s to obtain help.',
'reject_success': '%s transaction%s successfully executed.',
'reject_failure': '%s transaction%s failed. The system may have been updating during your operation. Please try again or contact %s to obtain help.',
'reset_success': '%s transaction%s successfully executed.',
'reset_failure': '%s transaction%s failed. The system may have been updating during your operation. Please try again or contact %s to obtain help.' }
teaser = self._(transaction_teaser_dict[teaser_key])
html = []
h = html.append
for key in transaction_message_dict.keys():
same_kind = [mes for mes in messages if mes == key]
trans_no = len(same_kind)
if trans_no == 0:
continue
elif trans_no == 1:
args = [trans_no, '']
else:
args = [trans_no, 's']
color = ''
if teaser_key == 'failure':
color = 'background: #FC2626;'
arg | s.append(CFG_BIBAUTHORID_AUTHOR_TICKET_ADMIN_EMAIL)
message = self._(transaction_message_dict[key] % tuple(args))
h('<div id="aid_notification_' + key + '" class="ui-widget ui-alert">')
h(' <div style="%s margin-top: 20px; padding: 0pt 0.7em;" class="ui-state-highlight ui-corner-all">' % (color))
h(' <p><sp | an style="float: left; margin-right: 0.3em;" class="ui-icon ui-icon-info"></span>')
h(' <strong>%s</strong> %s' % (teaser, message))
if show_close_btn:
h(' <span style="float:right; margin-right: 0.3em;"><a rel="nofollow" href="#" class="aid_close-notify" style="border-style: none;">X</a></span></p>')
h(' </div>')
h('</div>')
return "\n".join(html)
def tmpl_notification_box(self, teaser_key, message_key, bibrefs, show_close_btn=True):
'''
Creates a notification box based on the jQuery UI style
@param teaser_key: key to a dict which returns the teaser
@type teaser_key: string
@param message_key: key to a dict which returns the message to display in the box
@type message_key: string
@param bibrefs: bibrefs which are about to be assigned
@type bibrefs: list of strings
@param show_close_btn: display close button [x]
@type show_close_btn: boolean
@return: HTML code
@rtype: string
'''
notification_teaser_dict = {'info': 'Info!' }
notification_message_dict = {'attribute_papers': 'You are about to attribute the following paper%s:' }
teaser = self._(notification_teaser_dict[teaser_key])
arg = ''
if len(bibrefs) > 1:
arg = 's'
message = self._(notification_message_dict[message_key] % (arg) )
html = []
h = html.append
h('<div id="aid_notification_' + teaser_key + '" class="ui-widget ui-alert">')
h(' <div style="margin-top: 20px; padding: 0pt 0.7em;" class="ui-state-highlight ui-corner-all">')
h(' <p><span style="float: left; margin-right: 0.3em;" class="ui-icon ui-icon-info"></span>')
h(' <strong>%s</strong> %s' % (teaser, message))
h("<ul>")
for paper in bibrefs:
if ',' in paper:
pbibrec = paper.split(',')[1]
else:
pbibrec = paper
h("<li>%s</li>" % (format_record(int(pbibrec), "ha")))
h("</ul>")
if show_close_btn:
h(' <span style="float:right; margin-right: 0.3em;"><a rel="nofollow" href="#" class="aid_close-notify">X</a></span></p>')
h(' </div>')
h('</div>')
return "\n".join(html)
def tmpl_error_box(self, teaser_key, message_key, show_close_btn=True):
'''
Creates an error box based on the jQuery UI style
@param teaser_key: key to a dict which returns the teaser
@type teaser_key: string
@param message_key: key to a dict which returns the message to display in the box
@type message_key: string
@param show_close_btn: display close button [x]
@type show_close_btn: boolean
@return: HTML code
@rtype: string
'''
error_teaser_dict = {'sorry': 'Sorry.',
'error': 'Error:' }
error_message_dict = {'check_entries': 'Please check your entries.' |
OCA/partner-contact | partner_contact_address_default/tests/test_partner_contact_address_default.py | Python | agpl-3.0 | 1,714 | 0.00175 | # Copyright 2020 Tecnativa - Carlos Dauden
# Copyright 2020 Tecnativa - Sergio Teruel
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from odoo.tests import common
class TestPartnerContactAddressDefault(common.TransactionCase):
def setUp(self):
super().setUp()
self.Partner = self.env["res.partner"]
self.partner = self.Partner.create({"name": "Parent partner"})
self.partner_child_delivery1 = self.Partner.create(
{
"name": "Child delivery 1",
"type": "delivery",
"parent_id": self.partner.id,
}
)
self.partner_child_delivery2 = self.Partner.create(
{
"name": "Child delivery 2",
"type": "delivery",
"parent_id": self.partner.id,
}
)
self.partner_child_invoice = self.Partner.create(
{"name": "Child invoice", "type": "invoice", "parent_id": self.partner.id}
| )
def test_contact_address_default(self):
self.partner.partner_delivery_id = self.partner
self.partner.partner_invoice_id = self.partner
res = self.partner.address_get()
self.assertEqual(res["delivery"], self.partner.id)
self.ass | ertEqual(res["invoice"], self.partner.id)
self.partner_child_delivery2.partner_delivery_id = self.partner_child_delivery2
self.partner_child_delivery2.partner_invoice_id = self.partner_child_delivery2
res = self.partner_child_delivery2.address_get()
self.assertEqual(res["delivery"], self.partner_child_delivery2.id)
self.assertEqual(res["invoice"], self.partner_child_delivery2.id)
|
mulkieran/blivet | blivet/formats/prepboot.py | Python | gpl-2.0 | 3,650 | 0.001644 | # prepboot.py
# Format class for PPC PReP Boot.
#
# Copyright (C) 2009 Red | Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program i | s distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Dave Lehman <dlehman@redhat.com>
#
from ..size import Size
from .. import platform
from ..i18n import N_
from . import DeviceFormat, register_device_format
from parted import PARTITION_PREP
import os
import logging
log = logging.getLogger("blivet")
class PPCPRePBoot(DeviceFormat):
""" Generic device format. """
_type = "prepboot"
_name = N_("PPC PReP Boot")
partedFlag = PARTITION_PREP
_formattable = True # can be formatted
_linuxNative = True # for clearpart
_maxSize = Size("10 MiB")
_minSize = Size("4 MiB")
_supported = True
def __init__(self, **kwargs):
"""
:keyword device: path to block device node
:keyword exists: whether this is an existing format
:type exists: bool
.. note::
The 'device' kwarg is required for existing formats. For non-
existent formats, it is only necessary that the :attr:`device`
attribute be set before the :meth:`create` method runs. Note
that you can specify the device at the last moment by specifying
it via the 'device' kwarg to the :meth:`create` method.
"""
DeviceFormat.__init__(self, **kwargs)
def _create(self, **kwargs):
""" Write the formatting to the specified block device.
:keyword device: path to device node
:type device: str
:returns: None.
.. :note::
If a device node path is passed to this method it will overwrite
any previously set value of this instance's "device" attribute.
"""
super(PPCPRePBoot, self)._create(**kwargs)
try:
fd = os.open(self.device, os.O_RDWR)
length = os.lseek(fd, 0, os.SEEK_END)
os.lseek(fd, 0, os.SEEK_SET)
buf = '\0' * 1024 * 1024
while length > 0:
if length >= len(buf):
os.write(fd, buf.encode("utf-8"))
length -= len(buf)
else:
buf = '\0' * length
os.write(fd, buf.encode("utf-8"))
length = 0
except OSError as e:
log.error("error zeroing out %s: %s", self.device, e)
finally:
if fd:
os.close(fd)
@property
def status(self):
return False
@property
def supported(self):
return super(PPCPRePBoot, self).supported and isinstance(platform.platform, platform.IPSeriesPPC)
register_device_format(PPCPRePBoot)
|
bckwltn/SickRage | sickbeard/processTV.py | Python | gpl-3.0 | 20,557 | 0.006227 | # Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import os
import shutil
import stat
import sickbeard
from sickbeard import postProcessor
from sickbeard import db, helpers, exceptions
from sickbeard import encodingKludge as ek
from sickbeard.exceptions import ex
from sickbeard import logger
from sickbeard.name_parser.parser import NameParser, InvalidNameException, InvalidShowException
from sickbeard import common
from sickbeard import failedProcessor
from lib.unrar2 import RarFile, RarInfo
from lib.unrar2.rar_exceptions import *
class ProcessResult:
def __init__(self):
self.result = True
self.output = ''
def delete_folder(folder, check_empty=True):
# check if it's a folder
if not ek.ek(os.path.isdir, folder):
return False
# check if it isn't TV_DOWNLOAD_DIR
if sickbeard.TV_DOWNLOAD_DIR:
if helpers.real_path(folder) == helpers.real_path(sickbeard.TV_DOWNLOAD_DIR):
return False
# check if it's empty folder when wanted checked
if check_empty:
check_files = ek.ek(os.listdir, folder)
if check_files:
return False
# try deleting folder
try:
logger.log(u"Deleting folder: " + folder)
shutil.rmtree(folder)
except (OSError, IOError), e:
logger.log(u"Warning: unable to delete folder: " + folder + ": " + ex(e), logger.WARNING)
return False
return True
def delete_files(processPath, notwantedFiles, result):
if not result.result:
return
#Delete all file not needed
for cur_file in notwantedFiles:
cur_file_path = ek.ek(os.path.join, processPath, cur_file)
if not ek.ek(os.path.isfile, cur_file_path):
continue #Prevent error when a notwantedfiles is an associated files
result.output += logHelper(u"Deleting file " + cur_file, logger.DEBUG)
#check first the read-only attribute
file_attribute = ek.ek(os.stat, cur_file_path)[0]
if (not file_attribute & stat.S_IWRITE):
# File is read-only, so make it writeable
result.output += logHelper(u"Changing ReadOnly Flag for file " + cur_file, logger.DEBUG)
try:
ek.ek(os.chmod, cur_file_path, stat.S_IWRITE)
except OSError, e:
result.output += logHelper(u"Cannot change permissions of " + cur_file_path + ': ' + str(e.strerror),
logger.DEBUG)
try:
ek.ek(os.remove, cur_file_path)
except OSError, e:
result.output += logHelper(u"Unable to delete file " + cur_file + ': ' + str(e.strerror), logger.DEBUG)
def logHelper(logMessage, logLevel=logger.INFO):
logger.log(logMessage, logLevel)
return logMessage + u"\n"
def processDir(dirName, nzbName=None, process_method=None, force=False, is_priority=None, failed=False, type="auto"):
"""
Scans through the files in dirName and processes whatever media files it finds
dirName: The folder name to look in
nzbName: The NZB name which resulted in this folder being downloaded
force: True to postprocess already postprocessed files
failed: Boolean for whether or not the download failed
type: Type of postprocessing auto or manual
"""
result = ProcessResult()
result.output += logHelper(u"Processing folder " + dirName, logger.DEBUG)
result.output += logHelper(u"TV_DOWNLOAD_DIR: " + sickbeard.TV_DOWNLOAD_DIR, logger.DEBUG)
# if they passed us a real dir then assume it's the one we want
if ek.ek(os.path.isdir, dirName):
dirName = ek.ek(os.path.realpath, dirName)
# if the client and SickRage are not on the same machine translate the Dir in a network dir
elif sickbeard.TV_DOWNLOAD_DIR and ek.ek(os.path.isdir, sickbeard.TV_DOWNLOAD_DIR) \
and ek.ek(os.path.normpath, dirName) != ek.ek(os.path.normpath, sickbeard.TV_DOWNLOAD_DIR):
dirName = ek.ek(os.path.join, sickbeard.TV_DOWNLOAD_DIR, ek.ek(os.path.abspath, dirName).split(os.path.sep)[-1])
result.output += logHelper(u"Trying to use folder " + dirName, logger.DEBUG)
# if we didn't find a real dir then quit
if not ek.ek(os.path.isdir, dirName):
result.output += logHelper(
u"Unable to figure out what folder to process. If your downloader and SickRage aren't on the same PC make sure you fill out your TV download dir in the config.",
logger.DEBUG)
return result.output
path, dirs, files = get_path_dir_files(dirName, nzbName, type)
SyncFiles = filter(helpers.isSyncFile, files)
# Don't post process if files are still being synced and option is activated
if SyncFiles and sickbeard.POSTPONE_IF_SYNC_FILES:
result.output += logHelper(u"Found temporary sync files, skipping post processing", logger.WARNING)
return result.output
result.output += logHelper(u"PostProcessing Path: " + path, logger.DEBUG)
result.output += logHelper(u"PostProcessing Dirs: " + str(dirs), logger.DEBUG)
rarFiles = filter(helpers.isRarFile, files)
rarContent = unRAR(path, rarFiles, force, result)
files += rarContent
videoFiles = filter(helpers.isMediaFile, files)
videoInRar = filter(helpers.isMediaFile, rarContent)
result.output += logHelper(u"PostProcessing Files: " + str(files), logger.DEBUG)
result.output += logHelper(u"PostProcessing VideoFiles: " + str(videoFiles), logger.DEBUG)
result.output += logHelper(u"PostProcessing RarContent: " + str(rarContent), logger.DEBUG)
result.output += logHelper(u"PostProcessing VideoInRar: " + str(videoInRar), logger.DEBUG)
# If nzbName is set and there's more than one videofile in the folder, files will be lost (overwritten).
nzbNameOriginal = nzbName
if len(videoFiles) >= 2:
nzbName = None
| if not process_method:
process_method = sickbeard.PROCESS_METHOD
re | sult.result = True
#Don't Link media when the media is extracted from a rar in the same path
if process_method in ('hardlink', 'symlink') and videoInRar:
result.result = process_media(path, videoInRar, nzbName, 'move', force, is_priority, result)
delete_files(path, rarContent, result)
for video in set(videoFiles) - set(videoInRar):
result.result = process_media(path, [video], nzbName, process_method, force, is_priority, result)
else:
for video in videoFiles:
result.result = process_media(path, [video], nzbName, process_method, force, is_priority, result)
#Process Video File in all TV Subdir
for dir in [x for x in dirs if validateDir(path, x, nzbNameOriginal, failed, result)]:
result.result = True
for processPath, processDir, fileList in ek.ek(os.walk, ek.ek(os.path.join, path, dir), topdown=False):
SyncFiles = filter(helpers.isSyncFile, fileList)
# Don't post process if files are still being synced and option is activated
if SyncFiles and sickbeard.POSTPONE_IF_SYNC_FILES:
result.output += logHelper(u"Found temporary sync files, skipping post processing", logger.WARNING)
return result.output
rarFiles = filter(helpers.isRarFile, fileList)
rarContent = unRAR(processPath, rarFiles, force, result)
fileList = set(fileList + rarContent)
videoFiles = filter(helpers.isMediaFile, fileList)
|
zjuela/LapSRN-tensorflow | tensorlayer/files.py | Python | apache-2.0 | 33,051 | 0.003906 | #! /usr/bin/python
# -*- coding: utf8 -*-
import tensorflow as tf
import os
import numpy as np
import re
import sys
import tarfile
import gzip
import zipfile
from . import visualize
from . import nlp
import pickle
from six.moves import urllib
from six.moves import cPickle
from six.moves import zip
from tensorflow.python.platform import gfile
## Load dataset functions
def load_mnist_dataset(shape=(-1,784), path="data/mnist/"):
"""Automatically download MNIST dataset
and return the training, validation and test set with 50000, 10000 and 10000
digit images respectively.
Parameters
----------
shape : tuple
The shape of digit images, defaults to (-1,784)
path : string
Path to download data to, defaults to data/mnist/
Examples
--------
>>> X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1,784))
>>> X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 28, 28, 1))
"""
# We first define functions for loading MNIST images and labels.
# For convenience, they also download the requested files if needed.
def load_mnist_images(path, filename):
filepath = maybe_download_and_extract(filename, path, 'http://yann.lecun.com/exdb/mnist/')
print(filepath)
# Read the inputs in Yann LeCun's binary format.
with gzip.open(filepath, 'rb') as f:
data = np.frombuffer(f.read(), np.uint8, offset=16)
# The inputs are vectors now, we reshape them to monochrome 2D images,
# following the shape convention: (examples, channels, rows, columns)
data = data.reshape(shape)
# The inputs come as bytes, we convert them to float32 in range [0,1].
# (Actually to range [0, 255/256], for compatibility to the version
# provided at http://deeplearning.net/data/mnist/mnist.pkl.gz.)
return data / np.float32(256)
def load_mnist_labels(path, filename):
filepath = maybe_download_and_extract(filename, path, 'http://yann.lecun.com/exdb/mnist/')
# Read the labels in Yann LeCun's binary format.
with gzip.open(filepath, 'rb') as f:
data = np.frombuffer(f.read(), np.uint8, offset=8)
# The labels are vectors of integers now, that's exactly what we want.
return data
# Download and read the training and test set images and labels.
print("Load or Download MNIST > {}".format(path))
X_train = load_mnist_images(path, 'train-images-idx3-ubyte.gz')
y_train = load_mnist_labels(path, 'train-labels-idx1-ubyte.gz')
X_test = load_mnist_images(path, 't10k-images-idx3-ubyte.gz')
y_test = load_mnist_labels(path, 't10k-labels-idx1-ubyte.gz')
# We reserve the last 10000 training examples for validation.
X_train, X_val = X_train[:-10000], X_train[-10000:]
y_train, y_val = y_train[:-10000], y_train[-10000:]
# We just return all the arrays in order, as e | xpected in main().
# (It doesn't matter how we do this as long as we can read them again.)
X_train = np.asarray(X_train, dtype=np.float32)
y_train = np.a | sarray(y_train, dtype=np.int32)
X_val = np.asarray(X_val, dtype=np.float32)
y_val = np.asarray(y_val, dtype=np.int32)
X_test = np.asarray(X_test, dtype=np.float32)
y_test = np.asarray(y_test, dtype=np.int32)
return X_train, y_train, X_val, y_val, X_test, y_test
def load_cifar10_dataset(shape=(-1, 32, 32, 3), path='data/cifar10/', plotable=False, second=3):
"""The CIFAR-10 dataset consists of 60000 32x32 colour images in 10 classes, with
6000 images per class. There are 50000 training images and 10000 test images.
The dataset is divided into five training batches and one test batch, each with
10000 images. The test batch contains exactly 1000 randomly-selected images from
each class. The training batches contain the remaining images in random order,
but some training batches may contain more images from one class than another.
Between them, the training batches contain exactly 5000 images from each class.
Parameters
----------
shape : tupe
The shape of digit images: e.g. (-1, 3, 32, 32) , (-1, 32, 32, 3) , (-1, 32*32*3)
plotable : True, False
Whether to plot some image examples.
second : int
If ``plotable`` is True, ``second`` is the display time.
path : string
Path to download data to, defaults to data/cifar10/
Examples
--------
>>> X_train, y_train, X_test, y_test = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3), plotable=True)
Notes
------
CIFAR-10 images can only be display without color change under uint8.
>>> X_train = np.asarray(X_train, dtype=np.uint8)
>>> plt.ion()
>>> fig = plt.figure(1232)
>>> count = 1
>>> for row in range(10):
>>> for col in range(10):
>>> a = fig.add_subplot(10, 10, count)
>>> plt.imshow(X_train[count-1], interpolation='nearest')
>>> plt.gca().xaxis.set_major_locator(plt.NullLocator()) # 不显示刻度(tick)
>>> plt.gca().yaxis.set_major_locator(plt.NullLocator())
>>> count = count + 1
>>> plt.draw()
>>> plt.pause(3)
References
----------
- `CIFAR website <https://www.cs.toronto.edu/~kriz/cifar.html>`_
- `Data download link <https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz>`_
- `Code references <https://teratail.com/questions/28932>`_
"""
print("Load or Download cifar10 > {}".format(path))
#Helper function to unpickle the data
def unpickle(file):
fp = open(file, 'rb')
if sys.version_info.major == 2:
data = pickle.load(fp)
elif sys.version_info.major == 3:
data = pickle.load(fp, encoding='latin-1')
fp.close()
return data
filename = 'cifar-10-python.tar.gz'
url = 'https://www.cs.toronto.edu/~kriz/'
#Download and uncompress file
maybe_download_and_extract(filename, path, url, extract=True)
#Unpickle file and fill in data
X_train = None
y_train = []
for i in range(1,6):
data_dic = unpickle(os.path.join(path, 'cifar-10-batches-py/', "data_batch_{}".format(i)))
if i == 1:
X_train = data_dic['data']
else:
X_train = np.vstack((X_train, data_dic['data']))
y_train += data_dic['labels']
test_data_dic = unpickle(os.path.join(path, 'cifar-10-batches-py/', "test_batch"))
X_test = test_data_dic['data']
y_test = np.array(test_data_dic['labels'])
if shape == (-1, 3, 32, 32):
X_test = X_test.reshape(shape)
X_train = X_train.reshape(shape)
elif shape == (-1, 32, 32, 3):
X_test = X_test.reshape(shape, order='F')
X_train = X_train.reshape(shape, order='F')
X_test = np.transpose(X_test, (0, 2, 1, 3))
X_train = np.transpose(X_train, (0, 2, 1, 3))
else:
X_test = X_test.reshape(shape)
X_train = X_train.reshape(shape)
y_train = np.array(y_train)
if plotable == True:
print('\nCIFAR-10')
import matplotlib.pyplot as plt
fig = plt.figure(1)
print('Shape of a training image: X_train[0]',X_train[0].shape)
plt.ion() # interactive mode
count = 1
for row in range(10):
for col in range(10):
a = fig.add_subplot(10, 10, count)
if shape == (-1, 3, 32, 32):
# plt.imshow(X_train[count-1], interpolation='nearest')
plt.imshow(np.transpose(X_train[count-1], (1, 2, 0)), interpolation='nearest')
# plt.imshow(np.transpose(X_train[count-1], (2, 1, 0)), interpolation='nearest')
elif shape == (-1, 32, 32, 3):
plt.imshow(X_train[count-1], interpolation='nearest')
# plt.imshow(np.transpose(X_train[count-1], (1, 0, 2)), interpolation='nearest')
else:
raise Exception("Do not support the given 'shape' to plot the image examples")
plt.gca().xaxis.set_major_locator(plt.NullLocator( |
TheBatUNT/thebat | tts1.py | Python | apache-2.0 | 9,411 | 0.025183 | #!/usr/bin/python
from espeak import espeak
import MySQLdb
import serial
import time
import os.path
import math
from espeak import core as espeak_core
timerSeconds = 2 #Time between command repeats itself in seconds
timerProximity = 1
wallElapsed = 0 #starting value for elapsed
objectElapsed = 0
openingBothElapsed = 0
openingLeftElapsed = 0
openingRightElapsed = 0
leftProximityElapsed = 0
rightProximityElapsed = 0
ser = serial.Serial('/dev/rfcomm1',9600,timeout = 5)#wall, right and left sensor rfcomm
ser2 = serial.Serial('/dev/rfcomm0',9600,timeout =5)#object rfcomm
# Call espeak.synth() and wait for utterence to be finished.
# From https://answers.launchpad.net/python-espeak/+question/244655
def say(*args):
done_synth = [False]
def synth_callback(event, pos, length):
if event == espeak_core.event_MSG_TERMINATED:
done_synth[0] = True
espeak.set_SynthCallback(synth_callback)
call_result = espeak.synth(*args)
while call_result and not done_synth[0]:
time.sleep(0.05)
return call_result
say("Turning on Device")
time.sleep(2)
say("The Bat")
time.sleep(1)
say("Indoor navigation and obstacle detection")
# Openning DB connection
# Get user measurement settings
def convchk():
db = MySQLdb.connect("localhost","root","hearmeout","thebat")
# Preparing cursor object using cursor() method
cursor = db.cursor()
sql = "SELECT * FROM currentUser"
try:
#Execute SQL commands
cursor.execute(sql)
results = cursor.fetchall()
flagMeas = results[0][3]
except:
print "Error: Unable to fetch measurement"
db.close
del results
return flagMeas
flagMeas=convchk()
#############NAVIGATION FUNCTION###########################
# Store users current location in DB
def userLoc(sensorData):
print sensorData
lat = float(sensorData.split(" ")[7]) #current Latitude
lon = float(sensorData.split(" ")[8]) #current Longitude
print lat, lon
db = MySQLdb.connect("localhost","root","hearmeout","thebat")
query = "UPDATE currentUser SET Latitude=%f, Longitude=%f"%(lat,lon)
cursor = db.cursor()
try:
cursor.execute(query)
db.commit()
print "Success"
except:
db.rollback()
print "Failure"
db.close()
return;
###########################################################
###############INCHES to FEET function###################
def conv(feetDistance):
feetDistance = float(feetDistance) #make distance a float
feetDistance = feetDistance/12 #converting inches to feet
feetDistance = round(feetDistance,0) #rounding to 0 dec place
feetDistance = int(feetDistance) #Removing ".0" from output
return feetDistance;
#####################################################
###############Inches to Meters##################
def conv2meter(meterDistance):
meterDistance = float(meterDistance) #make distance a float
meterDistance = meterDistance/39.37 #converting to meter
meterDistance= round(meterDistance,1) #rounding to 0 dec place
print meterDistance
return meterDistance;
##############################################
#############Calling meters or feet#######
def callmeas(flagMeas,distanceforconv2):
if flagMeas == 1 : #meters
distance5= conv2meter(distanceforconv2)
return distance5;
elif flagMeas == 0 : #feet
distance6 = conv(distanceforconv2)
return distance6;
def flagmeascheck(flagMeas):
if flagMeas == 1 :
say("Meters")
elif flagMeas == 0 :
say("Feet")
return;
c1=0 #counter for timeout for headphone bluetooth
c2=0 #counter for timeout for belt bluetooth
while 1:
flagMeas=convchk()
try:
serialdata = ser.readline() # storing serial data
except:
say("Headphone Bluetooth is disconnected")
print "headphone Bluetooth is disconnected"
time.sleep(5)
c1=c1+1
if c1==5:
print "Time out on headphone bluetooth, device shutting down"
say("Device is shutting down check Headphone bluetooth and try again")
# os.system("sudo shutdown -h now")
exit()
continue
try:
serialdata2 = ser2.readline() #storing serial data 2
except:
say("Belt Sensor is disconnected")
print "Belt sensor is disconnected"
time.sleep(5)
c2=c2+1
if c2==5:
print "Time out on Belt bluetooth, device shutting down"
say("Device is shutting down check Belt bluetooth and try again")
# os.system("sudo shutdown -h now")
exit()
continue
tempabc = serialdata.split(" ") #length of serialdata
tempabcd = serialdata2.split(" ")
stringlenSerial2 = len(tempabcd)
stringlenSerial = len(tempabc) #setting to
if stringlenSerial == 10:
userLoc(serialdata);
if stringlenSerial != 9:
print "Wait line for headphones"
print serialdata
#time.sleep(2)
ser.flushInput()
ser2.flushInput()
continue
elif stringlenSerial2 !=2 :
print "Wait line for belt"
print serialdata2
#time.sleep(2)
ser.flushInput()
ser2.flushInput()
continue
else:
sensorL = serialdata.split(" ")[2] #Left Head Sensor
sensorR = serialdata.split(" ")[4] #Right Head Sensor
sensor = serialdata.split(" ")[0] #Head Sensor
sensorO = serialdata2.split(" ")[0] #Belt Sensor
gpscheck = serialdata.split(" ")[6] #Gps Sensor
#############OBJECT SENSOR##############################
distanceO = serialdata2.split(" ")[1]
distanceObject = conv(distanceO)
distance16 = callmeas(flagMeas,distanceO)
distance2 = str(distance16)
#############FRONT SENSOR DISTANCE###########################
distance = serialdata.split(" ")[1]
distanceFront = conv(distance)
distance15 = callmeas(flagMeas,distance)
distance1 = str(distance15)#converting distance to string for tts
############LEFT SENSOR DISTANCE#################################
distanceL = serialdata.split(" ")[3]
distanceL = float(distanceL) #make distance a float
#distanceL = distanceL/12 #converting inches to feet
############RIGHT SENSOR DISTANCE#############################
distanceR = serialdata.split(" ")[5]
distanceR = float(distanceR) #make distance a float
#distanceR = distanceR/12 #converting inches to feet
print serialdata #Print Sensor and Distance Data from Arduino
print serialdata2 #Printing object sensor data
if distanceFront < 10 and distanceFront > 0: #between 10-5ft
if wallElapsed == 0:
say("Wall in")
say(distance1)
flagmeascheck(flagMeas)
wallTimerStart = time.time()
wallElapsed = 1
else:
wallElapsed = time.time() - wallTimerStart
if wallElapsed > timerSeconds:
wallElapsed = 0
#####################OBJECT SENSOR##############################################################
if distanceObject < 8 and distanceObject > 0 : #between 8-5ft
if objectElapsed == 0:
say("Object in")
say(distance2)
flagmeascheck(flagMeas)
objectTimerStart = time.time()
objectElapsed = 1
else:
objectElapsed = time.time() - objectTimerStart
if objectElapsed > timerProximity:
objectElapsed = 0
if di | stanceL > 144 and distanceR > 144 :
if openingBothElapsed == 0:
say("Opening o | n the right and Left")
openingBothTimerStart = time.time()
openingBothElapsed = 1
else:
openingBothElapsed = time.time() - openingBothTimerStart
if openingBothElapsed > timerSeconds:
openingBothElapsed = 0
if distanceL > 144 and distanceR < 120 :
if openingLeftElapsed == 0:
say("Opening on the Left")
openingLeftTimerStart = time.time()
openingLeftElapsed = 1
else:
openingLeftElapsed = time.time() - openingLeftTimer |
vinegret/youtube-dl | youtube_dl/extractor/umg.py | Python | unlicense | 3,414 | 0.000879 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
int_or_none,
parse_filesize,
parse_iso8601,
)
class UMGDeIE(InfoExtractor):
IE_NAME = 'umg:de'
IE_DESC = 'Universal Music Deutschland'
_VALID_URL = r'https?://(?:www\.)?universal-music\.de/[^/]+/videos/[^/?#]+-(?P<id>\d+)'
_TEST = {
'url': 'https://www.universal-music.de/sido/videos/jedes-wort-ist-gold-wert-457803',
'md5': 'ebd90f48c80dcc82f77251eb1902634f',
'info_dict': {
'id': '457803',
'ext': 'mp4',
'title': 'Jedes Wort ist Gold wert',
'timestamp': 1513591800,
'upload_date': '20171218',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
video_data = self._download_json(
'https://api.universal-music.de/graphql',
video_id, query={
'query': '''{
universalMusic(channel:16) {
video(id:%s) {
headline
formats {
formatId
url
type
width
height
mimeType
fileSize
}
duration
createdDate
}
}
}''' % video_id})['data']['universalMusic']['video']
title = video_data['headline']
hls_url_template = 'http://mediadelivery.universal-music-services.de/vod/mp4:autofill/storage/' + '/'.join(list(video_id)) + '/content/%s/file/playlist.m3u8'
thumbnails = []
formats = []
def add_m3u8_format(format_id):
m3u8_formats = self._extract_m3u8_formats(
hls_url_template % format_id, video_id, 'mp4',
'm3u8_native', m3u8_id='hls', fatal='False')
if m3u8_formats and m3u8_formats[0].get('height'):
formats.extend(m3u8_formats)
for f in video_data.get('formats', []):
f_url = f.get('url')
mime_type = f.get('mimeType')
if not f_url or mime_type == 'application/mxf':
continue
fmt = {
'url': f_url,
'width': in | t_or_none(f.get('width')),
'h | eight': int_or_none(f.get('height')),
'filesize': parse_filesize(f.get('fileSize')),
}
f_type = f.get('type')
if f_type == 'Image':
thumbnails.append(fmt)
elif f_type == 'Video':
format_id = f.get('formatId')
if format_id:
fmt['format_id'] = format_id
if mime_type == 'video/mp4':
add_m3u8_format(format_id)
urlh = self._request_webpage(f_url, video_id, fatal=False)
if urlh:
first_byte = urlh.read(1)
if first_byte not in (b'F', b'\x00'):
continue
formats.append(fmt)
if not formats:
for format_id in (867, 836, 940):
add_m3u8_format(format_id)
self._sort_formats(formats, ('width', 'height', 'filesize', 'tbr'))
return {
'id': video_id,
'title': title,
'duration': int_or_none(video_data.get('duration')),
'timestamp': parse_iso8601(video_data.get('createdDate'), ' '),
'thumbnails': thumbnails,
'formats': formats,
}
|
hlange/LogSoCR | .waf/waflib/Errors.py | Python | agpl-3.0 | 1,682 | 0.032105 | #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2010-2016 (ita)
"""
Exceptions used in the Waf code
"""
import traceback, sys
class WafError(Exception):
"""Base class for all Waf errors"""
def __init__(self, msg='', ex=None):
"""
:param msg: error message
:type msg: string
:param ex: exception causing this error (optional)
:type ex: exception
"""
self.msg = msg
assert not isinstance(msg, Exception)
self.stack = []
if ex:
if not msg:
self.msg = str(ex)
if isinstance(ex, WafError):
self.stack = ex.stack
else:
self.stack = traceback.extract_tb(sys.exc_info()[2])
self.stack += traceback.extract_stack()[:-1]
self.verbose_msg = ''.join(traceback.format_list(self.stack))
def __str__(self):
return str(self.msg)
class BuildError(WafError):
"""Error raised during the build and install phases"""
def __init__(self, error_tasks=[]):
"""
:param error_tasks: tasks that could not complete normally
:type error_tasks: list of task objects
"""
self.tasks = error_tasks
WafError.__init__(self, self.format_error())
def format_error(self):
"""Forma | ts the error messages from the tasks that failed"""
lst = ['Build failed']
for tsk in self.tasks:
txt | = tsk.format_error()
if txt: lst.append(txt)
return '\n'.join(lst)
class ConfigurationError(WafError):
"""Configuration exception raised in particular by :py:meth:`waflib.Context.Context.fatal`"""
pass
class TaskRescan(WafError):
"""Task-specific exception type signalling required signature recalculations"""
pass
class TaskNotReady(WafError):
"""Task-specific exception type signalling that task signatures cannot be computed"""
pass
|
lene/tavsiye | test_recommender.py | Python | gpl-3.0 | 6,800 | 0.000735 | from compare_sets import jaccard_coefficient, similarity_matrix, similar_users, recommendations
from alternative_methods import asymmetric_similarity, minhash_similarity, minhashed
from read_file import read_file
from minhash import minhash
import unittest
from functools import reduce
__author__ = 'lene'
class TestRecommender(unittest.TestCase):
def test_jaccard(self):
self.assertEqual(jaccard_coefficient({'a', 'b'}, {'b', 'a'}), 1.)
self.assertEqual(jaccard_coefficient({'a', 'b'}, {'c', 'd'}), 0.)
self.assertAlmostEqual(jaccard_coefficient({'a', 'b'}, {'a', 'c'}), 1. / 3.)
def test_read_file(self):
csv = read_file('testdata.csv')
self.assertIsInstance(csv, dict)
self.assertEqual(len(csv), 5)
self.assertDictEqual(
csv,
{1: {12, 99, 32}, 2: {32, 77, 54, 66}, 3: {99, 42, 12, 32}, 4: {77, 66, 47}, 5: {65}}
)
def test_similarity_matrix_basic(self):
self.assertDictEqual(
similarity_matrix({1: {'a'}, 2: {'a'}}),
{1: {1: 1.0, 2: 1.0}, 2: {1: 1.0, 2: 1.0}}
)
self.assertDictEqual(
similarity_matrix({1: {'a'}, 2: {'b'}}),
{1: {1: 1.0, 2: 0.0}, 2: {1: 0.0, 2: 1.0}}
)
def test_similarity_matrix_elements_equal_to_themselves(self):
larger_list = {i: {i} for i in range(100)}
larger_list_matrix = similarity_matrix(larger_list)
self.assertEqual(len(larger_list_matrix), len(larger_list))
for i in range(len(larger_list)):
self.assertEqual(larger_list_matrix[i][i], 1.)
def test_similarity_matrix_with_testdata(self):
self.assertDictEqual(
similarity_matrix(read_file('testdata.csv')), {
1: {1: 1.0, 2: 0.16666666666666666, 3: 0.75, 4: 0.0, 5: 0.0},
2: {1: 0.16666666666666666, 2: 1.0, 3: 0.14285714285714285, 4: 0.4, 5: 0.0},
3: {1: 0.75, 2: 0.14285714285714285, 3: 1.0, 4: 0.0, 5: 0.0},
4: {1: 0.0, 2: 0.4, 3: 0.0, 4: 1.0, 5: 0.0},
5: | {1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 1.0}
}
)
def test_similar_users(self):
similarity = similarity_matrix({1: {'a'}, 2: {'a'}})
sel | f.assertEqual(similar_users(1, similarity, 0.2), [2])
self.assertEqual(similar_users(2, similarity, 0.2), [1])
self.assertEqual(similar_users(1, similarity, 1.0), [2])
similarity = similarity_matrix({1: {'a'}, 2: {'b'}})
self.assertEqual(similar_users(1, similarity, 0.2), [])
similarity = similarity_matrix(read_file('testdata.csv'))
self.assertEqual(similar_users(1, similarity, 0.2), [3])
self.assertEqual(similar_users(2, similarity, 0.15), [1, 4])
def test_recommendations(self):
sets = {1: {'a'}, 2: {'a', 'b'}}
similarity = similarity_matrix(sets)
self.assertEqual(recommendations(1, sets, similarity, 0.4), {'b'})
self.assertEqual(recommendations(2, sets, similarity, 0.4), set())
def test_recommendations_with_testdata(self):
sets = read_file('testdata.csv')
similarity = similarity_matrix(sets)
self.assertEqual(recommendations(1, sets, similarity, 0.75), {42})
self.assertFalse(recommendations(3, sets, similarity, 0.75))
self.assertEqual(
recommendations(1, sets, similarity, 0.15),
(sets[2] | sets[3]) - sets[1]
)
def test_recommendations_with_zero_cutoff_returns_all_other_products(self):
sets = read_file('testdata.csv')
similarity = similarity_matrix(sets)
for i in sets.keys():
self.assertEqual(
recommendations(i, sets, similarity, 0),
reduce(lambda a, b: a | b, sets.values(), set()) - sets[i]
)
def test_asymmetric_similarity(self):
self.assertEqual(asymmetric_similarity({'a'}, {'a', 'b'}), 1)
self.assertEqual(asymmetric_similarity({'a', 'b'}, {'a'}), 0.5)
sets = {1: {'a'}, 2: {'a', 'b'}}
similarity = similarity_matrix(sets, asymmetric_similarity)
self.assertDictEqual(
similarity, {1: {1: 1.0, 2: 1.0}, 2: {1: 0.5, 2: 1.0}}
)
def test_asymmetric_similarity_returns_superset_of_jaccard(self):
sets = read_file('testdata.csv')
similarity1 = similarity_matrix(sets)
similarity2 = similarity_matrix(sets, asymmetric_similarity)
for i in sets.keys():
self.assertTrue(
recommendations(i, sets, similarity1, 0.25).issubset(
recommendations(i, sets, similarity2, 0.25)
)
)
def test_minhashed_bounded_by_supplied_length(self):
self.assertEqual(minhashed({1}), {1})
self.assertEqual(minhashed({1}, 2), {1})
bigger_set = {i for i in range(100)}
self.assertLessEqual(len(minhashed(bigger_set, 10)), 10)
def test_minhash_similarity_succeeds_in_obvious_cases(self):
self.assertEqual(minhash_similarity({1, 2}, {3, 4}), 0.)
self.assertEqual(minhash_similarity({1, 2}, {2, 1}), 1.)
def test_minhash_with_strings(self):
self.assertEqual(
minhash(
[
("haoyuan", ["ak420", "ipad", "girlfriend"]),
("fenfen", ["ak46", "bayaji", "genjiu"])
],
5, 0
),
[
('haoyuan', ['ipad', 'girlfriend', 'ak420', 'ak420', 'ipad']),
('fenfen', ['ak46', 'bayaji', 'ak46', 'bayaji', 'bayaji'])
]
)
def test_minhash_with_ints(self):
self.assertEqual(
minhash(
[
(1, [12, 99, 32]),
(2, [32, 77, 54, 66]),
(3, [99, 42, 12, 32]),
(4, [77, 66, 47]),
(5, [65])
],
10, 0
),
[
(1, [99, 32, 12, 12, 99, 12, 99, 12, 99, 99]),
(2, [32, 66, 77, 77, 32, 66, 54, 54, 66, 32]),
(3, [32, 42, 32, 42, 99, 32, 32, 99, 12, 32]),
(4, [66, 47, 47, 66, 77, 66, 47, 47, 77, 47]),
(5, [65, 65, 65, 65, 65, 65, 65, 65, 65, 65])
]
)
def test_minhash_with_testdata(self):
sets = read_file('testdata.csv')
similarity = similarity_matrix(sets, minhash_similarity)
self.assertEqual(recommendations(1, sets, similarity, 0.75), {42})
self.assertFalse(recommendations(3, sets, similarity, 0.75))
self.assertEqual(
recommendations(1, sets, similarity, 0.15),
(sets[2] | sets[3]) - sets[1]
)
if __name__ == '__main__':
unittest.main()
|
aquametalabs/django-snailtracker | django_snailtracker/utils.py | Python | bsd-3-clause | 965 | 0.001036 | import logging
from django.db.models.signals import post_init, post_save, post_delete
from django_snailtracker.models import (snailtracker_post_init_hook,
snailtracker_post_save_hook, snailtracker_post_delete_hook)
from django_snailtracker.he | lpers import snailtracker_enabled
from django_snailtracker.sites import snailtracker_site
logger = logging.getLogger(__name__)
def register(obj_def):
if snailtracker_enabled():
if obj_def._meta.db_table not in snailtracker_site.registry:
| logger.debug('Registering %s' % obj_def._meta.db_table)
post_init.connect(snailtracker_post_init_hook, sender=obj_def,)
post_save.connect(snailtracker_post_save_hook, sender=obj_def,)
post_delete.connect(snailtracker_post_delete_hook, sender=obj_def,)
snailtracker_site.registry[obj_def._meta.db_table] = True
else:
logger.debug('%s already registered' % obj_def._meta.db_table)
|
azumimuo/family-xbmc-addon | plugin.video.salts/salts_lib/kodi.py | Python | gpl-2.0 | 4,540 | 0.007489 | """
SALTS XBMC Addon
Copyright (C) 2015 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import xbmcaddon
import xbmcplugin
import xbmcgui
import xbmc
import xbmcvfs
import urllib
import urlparse
import sys
import os
import re
addon = xbmcaddon.Addon()
get_setting = addon.getSetting
show_settings = addon.openSettings
def get_path():
return addon.getAddonInfo('path').decode('utf-8')
def get_profile():
return addon.getAddonInfo('profile').decode('utf-8')
def translate_ | path(path):
return xbmc.translatePath(path).decode('utf-8')
def set_setting(id, value):
if not isinstance(value, basestring): value = str(value)
addon.setSetting(id, value)
def get_version():
return addon.getAddonInfo('version')
def get_id():
return addon.getAddonInfo('id')
def get_name():
return addon.getAddonInfo('name')
def get_plugin_url(queries):
try:
query = urllib.u | rlencode(queries)
except UnicodeEncodeError:
for k in queries:
if isinstance(queries[k], unicode):
queries[k] = queries[k].encode('utf-8')
query = urllib.urlencode(queries)
return sys.argv[0] + '?' + query
def end_of_directory(cache_to_disc=True):
xbmcplugin.endOfDirectory(int(sys.argv[1]), cacheToDisc=cache_to_disc)
def set_content(content):
xbmcplugin.setContent(int(sys.argv[1]), content)
def create_item(queries, label, thumb='', fanart='', is_folder=None, is_playable=None, total_items=0, menu_items=None, replace_menu=False):
list_item = xbmcgui.ListItem(label, iconImage=thumb, thumbnailImage=thumb)
add_item(queries, list_item, fanart, is_folder, is_playable, total_items, menu_items, replace_menu)
def add_item(queries, list_item, fanart='', is_folder=None, is_playable=None, total_items=0, menu_items=None, replace_menu=False):
if menu_items is None: menu_items = []
if is_folder is None:
is_folder = False if is_playable else True
if is_playable is None:
playable = 'false' if is_folder else 'true'
else:
playable = 'true' if is_playable else 'false'
liz_url = get_plugin_url(queries)
if fanart: list_item.setProperty('fanart_image', fanart)
list_item.setInfo('video', {'title': list_item.getLabel()})
list_item.setProperty('isPlayable', playable)
list_item.addContextMenuItems(menu_items, replaceItems=replace_menu)
xbmcplugin.addDirectoryItem(int(sys.argv[1]), liz_url, list_item, isFolder=is_folder, totalItems=total_items)
def parse_query(query):
q = {'mode': 'main'}
if query.startswith('?'): query = query[1:]
queries = urlparse.parse_qs(query)
for key in queries:
if len(queries[key]) == 1:
q[key] = queries[key][0]
else:
q[key] = queries[key]
return q
def notify(header=None, msg='', duration=2000, sound=None):
if header is None: header = get_name()
if sound is None: sound = get_setting('mute_notifications') == 'false'
icon_path = os.path.join(get_path(), 'icon.png')
try:
xbmcgui.Dialog().notification(header, msg, icon_path, duration, sound)
except:
builtin = "XBMC.Notification(%s,%s, %s, %s)" % (header, msg, duration, icon_path)
xbmc.executebuiltin(builtin)
def get_current_view():
skinPath = translate_path('special://skin/')
xml = os.path.join(skinPath, 'addon.xml')
f = xbmcvfs.File(xml)
read = f.read()
f.close()
try: src = re.search('defaultresolution="([^"]+)', read, re.DOTALL).group(1)
except: src = re.search('<res.+?folder="([^"]+)', read, re.DOTALL).group(1)
src = os.path.join(skinPath, src, 'MyVideoNav.xml')
f = xbmcvfs.File(src)
read = f.read()
f.close()
match = re.search('<views>([^<]+)', read, re.DOTALL)
if match:
views = match.group(1)
for view in views.split(','):
if xbmc.getInfoLabel('Control.GetLabel(%s)' % (view)): return view
|
yokose-ks/edx-platform | common/lib/logsettings.py | Python | agpl-3.0 | 5,212 | 0 | import os
import platform
import sys
from logging.handlers import SysLogHandler
LOG_LEVELS = ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']
def get_logger_config(log_dir,
logging_env="no_env",
tracking_filename="tracking.log",
edx_filename="edx.log",
dev_env=False,
syslog_addr=None,
debug=False,
local_loglevel='INFO',
console_loglevel=None,
service_variant=None):
"""
Return the appropriate logging config dictionary. You should assign the
result of this to the LOGGING var in your settings. The reason it's done
this way instead of registering directly is because I didn't want to worry
about resetting the logging state if this is called multiple times when
settings are extended.
If dev_env is set to true logging will not be done via local rsyslogd,
instead, tracking and application logs will be dropped in log_dir.
"tracking_filename" and "edx_filename" are ignored unless dev_env
is set to true since otherwise logging is handled by rsyslogd.
"""
# Revert to INFO if an invalid string is passed in
i | f local_loglevel not in LOG_LEVELS:
local_loglevel = 'INFO'
if console_loglevel is None or console_loglevel not in LOG_LEVELS:
console_loglevel = 'DEBUG' if debug else 'INFO'
if service_variant is None:
# default to a blank string so that if SERVICE_VARIANT is not
# set we will not log to a sub directory
service_variant = ''
| hostname = platform.node().split(".")[0]
syslog_format = ("[service_variant={service_variant}]"
"[%(name)s][env:{logging_env}] %(levelname)s "
"[{hostname} %(process)d] [%(filename)s:%(lineno)d] "
"- %(message)s").format(service_variant=service_variant,
logging_env=logging_env,
hostname=hostname)
handlers = ['console', 'local'] if debug else ['console',
'syslogger-remote', 'local']
logger_config = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(asctime)s %(levelname)s %(process)d '
'[%(name)s] %(filename)s:%(lineno)d - %(message)s',
},
'syslog_format': {'format': syslog_format},
'raw': {'format': '%(message)s'},
},
'handlers': {
'console': {
'level': console_loglevel,
'class': 'logging.StreamHandler',
'formatter': 'standard',
'stream': sys.stderr,
},
'syslogger-remote': {
'level': 'INFO',
'class': 'logging.handlers.SysLogHandler',
'address': syslog_addr,
'formatter': 'syslog_format',
},
'newrelic': {
'level': 'ERROR',
'class': 'lms.lib.newrelic_logging.NewRelicHandler',
'formatter': 'raw',
}
},
'loggers': {
'tracking': {
'handlers': ['tracking'],
'level': 'DEBUG',
'propagate': False,
},
'': {
'handlers': handlers,
'level': 'DEBUG',
'propagate': False
},
}
}
if dev_env:
tracking_file_loc = os.path.join(log_dir, tracking_filename)
edx_file_loc = os.path.join(log_dir, edx_filename)
logger_config['handlers'].update({
'local': {
'class': 'logging.handlers.RotatingFileHandler',
'level': local_loglevel,
'formatter': 'standard',
'filename': edx_file_loc,
'maxBytes': 1024 * 1024 * 2,
'backupCount': 5,
},
'tracking': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': tracking_file_loc,
'formatter': 'raw',
'maxBytes': 1024 * 1024 * 2,
'backupCount': 5,
},
})
else:
# for production environments we will only
# log INFO and up
logger_config['loggers']['']['level'] = 'INFO'
logger_config['handlers'].update({
'local': {
'level': local_loglevel,
'class': 'logging.handlers.SysLogHandler',
'address': '/dev/log',
'formatter': 'syslog_format',
'facility': SysLogHandler.LOG_LOCAL0,
},
'tracking': {
'level': 'DEBUG',
'class': 'logging.handlers.SysLogHandler',
'address': '/dev/log',
'facility': SysLogHandler.LOG_LOCAL1,
'formatter': 'raw',
},
})
return logger_config
|
Glorfindelrb/pyBPMN20engine | HumanInteraction/models.py | Python | mit | 3,637 | 0.005784 | # -*- coding: utf-8 -*-
# The MIT License (MIT)
# Copyright (c) 2014 Roland Bettinelli
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
'''
BPMN Package - HumanInteraction
'''
from Core.Foundation.models import BaseElement
from Activities.models import Task
from Process.models import Performer
# from Core.Common.models import FlowNode, FlowElementsContainer
from Core.Common.fonctions import residual_args
class ManualTask(Task):
'''
'''
def __init__(self, id, **kwargs):
'''
'''
super(ManualTask, self).__init__(id, **kwargs)
if self.__class__.__name__=='ManualTask':
residual_args(self.__init__, **kwargs)
class UserTask(Task):
'''
A User Task | is a typical “workflow” Task where a human performer performs the Task with the assistance of a
software application. The lifecycle of the Task is managed by a software component (called task manager) and is
typically executed in the context of a Process.
'''
def __init__(self, id , implementation='##unspecified', **kwargs):
'''
implementation:str (default='##unspecified' | )
This attribute specifies the technology that will be used to implement the User Task.
Valid values are "##unspecified" for leaving the implementation technology open, "##WebService"
for the Web service technology or a URI identifying any other technology or coordination protocol.
renderings:Rendering list
This attributes acts as a hook which allows BPMN adopters to specify task rendering attributes
by using the BPMN Extension mechanism.
'''
super(UserTask, self).__init__(id, **kwargs)
self.implementation = implementation
self.renderings = kwargs.pop('renderings', [])
#instances attributes
self.actualOwner = None
self.taskPriority = None
if self.__class__.__name__ == 'UserTask':
residual_args(self.__init__, **kwargs)
class HumanPerformer(Performer):
'''
'''
def __init__(self, id, **kwargs):
'''
'''
super(HumanPerformer, self).__init__(id, **kwargs)
if self.__class__.__name__=='HumanPerformer':
residual_args(self.__init__, **kwargs)
class PotentialOwner(HumanPerformer):
'''
'''
def __init__(self, id, **kwargs):
'''
'''
super(PotentialOwner, self).__init__(id, **kwargs)
if self.__class__.__name__ == 'PotentialOwner':
residual_args(self.__init__, **kwargs) |
JuliBakagianni/CEF-ELRC | metashare/repository/editor/manual_admin_registration.py | Python | bsd-3-clause | 11,138 | 0.004669 | '''
This file contains the manually chosen admin forms, as needed for an easy-to-use
editor.
'''
from django.contrib import admin
from django.conf import settings
from metashare.repository.editor import admin_site as editor_site
from metashare.repository.editor.resource_editor import ResourceModelAdmin, \
LicenceModelAdmin
from metashare.repository.editor.superadmin import SchemaModelAdmin
from metashare.repository.models import resourceInfoType_model, \
identificatio | nInfoType_model, metadataInfoType_model, \
communicationInfoType_model, validationInfoType_model, \
relationInfoType_model, foreseenUseInfoType_model, \
corpusMediaTypeType_model, corpusTextInfoType_model, \
corpusVideoInfoType_model, textNumericalFormatInfoType_model, \
videoClassificationInfoType_model, imageClassificationInfoType_model, \
participantInfoType_model, corpusAudioInfoType_model, \
corpusImageInfoType_model, corpusT | extNumericalInfoType_model, \
corpusTextNgramInfoType_model, languageDescriptionInfoType_model, \
languageDescriptionTextInfoType_model, actualUseInfoType_model, \
languageDescriptionVideoInfoType_model, \
languageDescriptionImageInfoType_model, \
lexicalConceptualResourceInfoType_model, \
lexicalConceptualResourceTextInfoType_model, \
lexicalConceptualResourceAudioInfoType_model, \
lexicalConceptualResourceVideoInfoType_model, \
lexicalConceptualResourceImageInfoType_model, toolServiceInfoType_model, \
licenceInfoType_model, personInfoType_model, projectInfoType_model, \
documentInfoType_model, organizationInfoType_model, \
documentUnstructuredString_model
from metashare.repository.editor.related_mixin import RelatedAdminMixin
from django.views.decorators.csrf import csrf_protect
from django.db import transaction
from django.utils.decorators import method_decorator
from django.contrib.admin.util import unquote
from django.core.exceptions import PermissionDenied
from django.utils.html import escape
from django.utils.encoding import force_unicode
from django.http import Http404
from django.utils.safestring import mark_safe
from django.contrib.admin import helpers
from django.utils.translation import ugettext as _
from metashare.repository.editor.related_objects import AdminRelatedInfo
csrf_protect_m = method_decorator(csrf_protect)
# Custom admin classes
class CorpusTextInfoAdmin(SchemaModelAdmin):
hidden_fields = ('back_to_corpusmediatypetype_model', )
show_tabbed_fieldsets = True
class CorpusVideoInfoAdmin(SchemaModelAdmin):
hidden_fields = ('back_to_corpusmediatypetype_model', )
show_tabbed_fieldsets = True
class GenericTabbedAdmin(SchemaModelAdmin):
show_tabbed_fieldsets = True
class LexicalConceptualResourceInfoAdmin(SchemaModelAdmin):
readonly_fields = ('lexicalConceptualResourceMediaType', )
show_tabbed_fieldsets = True
class LanguageDescriptionInfoAdmin(SchemaModelAdmin):
readonly_fields = ('languageDescriptionMediaType', )
show_tabbed_fieldsets = True
class CorpusAudioModelAdmin(SchemaModelAdmin):
show_tabbed_fieldsets = True
class PersonModelAdmin(AdminRelatedInfo, SchemaModelAdmin):
exclude = ('source_url', 'copy_status')
list_display = ('instance_data', 'num_related_resources', 'related_resources')
def instance_data(self, obj):
return obj.__unicode__()
instance_data.short_description = _('Person')
class OrganizationModelAdmin(AdminRelatedInfo, SchemaModelAdmin):
exclude = ('source_url', 'copy_status')
list_display = ('instance_data', 'num_related_resources', 'related_resources')
def instance_data(self, obj):
return obj.__unicode__()
instance_data.short_description = _('Organization')
class ProjectModelAdmin(AdminRelatedInfo, SchemaModelAdmin):
exclude = ('source_url', 'copy_status')
list_display = ('instance_data', 'num_related_resources', 'related_resources')
def instance_data(self, obj):
return obj.__unicode__()
instance_data.short_description = _('Project')
class DocumentModelAdmin(AdminRelatedInfo, SchemaModelAdmin):
exclude = ('source_url', 'copy_status')
list_display = ('instance_data', 'num_related_resources', 'related_resources')
def instance_data(self, obj):
return obj.__unicode__()
instance_data.short_description = _('Document')
class DocumentUnstructuredStringModelAdmin(admin.ModelAdmin, RelatedAdminMixin):
def response_change(self, request, obj):
'''
Response sent after a successful submission of a change form.
We customize this to allow closing edit popups in the same way
as response_add deals with add popups.
'''
if '_popup_o2m' in request.REQUEST:
caller = None
if '_caller' in request.REQUEST:
caller = request.REQUEST['_caller']
return self.edit_response_close_popup_magic_o2m(obj, caller)
if '_popup' in request.REQUEST:
if request.POST.has_key("_continue"):
return self.save_and_continue_in_popup(obj, request)
return self.edit_response_close_popup_magic(obj)
else:
return super(DocumentUnstructuredStringModelAdmin, self).response_change(request, obj)
@csrf_protect_m
@transaction.commit_on_success
def change_view(self, request, object_id, extra_context=None):
"""
The 'change' admin view for this model.
This follows closely the base implementation from Django 1.3's
django.contrib.admin.options.ModelAdmin,
with the explicitly marked modifications.
"""
# pylint: disable-msg=C0103
model = self.model
opts = model._meta
obj = self.get_object(request, unquote(object_id))
if not self.has_change_permission(request, obj):
raise PermissionDenied
#### begin modification ####
# make sure that the user has a full session length time for the current
# edit activity
request.session.set_expiry(settings.SESSION_COOKIE_AGE)
#### end modification ####
if obj is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % {'name': force_unicode(opts.verbose_name), 'key': escape(object_id)})
if request.method == 'POST' and "_saveasnew" in request.POST:
return self.add_view(request, form_url='../add/')
ModelForm = self.get_form(request, obj)
formsets = []
if request.method == 'POST':
form = ModelForm(request.POST, request.FILES, instance=obj)
if form.is_valid():
form_validated = True
new_object = self.save_form(request, form, change=True)
else:
form_validated = False
new_object = obj
if form_validated:
#### begin modification ####
self.save_model(request, new_object, form, change=True)
#### end modification ####
change_message = self.construct_change_message(request, form, formsets)
self.log_change(request, new_object, change_message)
return self.response_change(request, new_object)
else:
form = ModelForm(instance=obj)
#### begin modification ####
media = self.media or []
#### end modification ####
inline_admin_formsets = []
#### begin modification ####
adminForm = helpers.AdminForm(form, self.get_fieldsets(request, obj),
self.prepopulated_fields, self.get_readonly_fields(request, obj),
model_admin=self)
media = media + adminForm.media
#### end modification ####
context = {
'title': _('Change %s') % force_unicode(opts.verbose_name),
'adminform': adminForm,
'object_id': object_id,
'original': obj,
'is_popup': "_popup" in request.REQUEST or \
"_popup_o2m" in request.REQUEST,
'media': mark_safe(media),
'inline_admin_formsets': inline_admin_formsets,
|
ujenmr/ansible | lib/ansible/plugins/doc_fragments/aws_credentials.py | Python | gpl-3.0 | 1,170 | 0.001709 | # -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
class ModuleDocFragment(object):
# inventory cache
DOCUMENTATION = r'''
options:
aws_profile:
description: The AWS profile
type: str
aliases: [ boto_profile ]
env:
- name: AWS_PROFILE
- name: AWS_DEFAULT_PROFILE
aws_access_key:
description: The AWS access key to use.
type: str
env:
- name: AWS_ACCESS_KEY_ID
- name: AWS_ACCESS_KEY
- name: EC2_ACCESS_K | EY
aws_secret_key:
description: The AWS secret key that corresponds to the access key.
type: str
env:
- name | : AWS_SECRET_ACCESS_KEY
- name: AWS_SECRET_KEY
- name: EC2_SECRET_KEY
aws_security_token:
description: The AWS security token if using temporary access and secret keys.
type: str
env:
- name: AWS_SECURITY_TOKEN
- name: AWS_SESSION_TOKEN
- name: EC2_SECURITY_TOKEN
region:
description: The region for which to create the connection.
type: str
env:
- name: AWS_REGION
- name: EC2_REGION
'''
|
dyninc/dyn-python | dyn/mm/session.py | Python | bsd-3-clause | 3,590 | 0 | # -*- coding: utf-8 -*-
"""This module implements an interface to a DynECT REST Session. It provides
easy access to all other functionality within the dynect library via
methods that return various types of DynECT objects which will provide their
own respective functionality.
"""
import locale
# API Libs
from dyn.core import SessionEngine
from dyn.compat import urlencode, pathname2url, json, prepare_for_loads
from dyn.mm.errors import (EmailKeyError, EmailInvalidArgumentError,
EmailObjectError)
__author__ = 'jnappi'
class MMSession(SessionEngine):
"""Base object representing a Message Management API Session"""
__metakey__ = 'a577c742-6dce-49ae-9b1f-dce6477fa646'
_valid_methods = ('GET', 'POST')
uri_root = '/rest/json'
def __init__(self, apikey, host='emailapi.dynect.net', port=443, ssl=True,
proxy_host=None, proxy_port=None, proxy_user=None,
proxy_pass=None):
"""Initialize a Dynect Rest Session object and store the provided
credentials
:param host: DynECT API server address
:param port: Port to connect to DynECT API server
:param ssl: Enable SSL
:param apikey: your unique Email API key
:param proxy_host: A proxy host to utilize
:param proxy_port: The port that the proxy is served on
:param proxy_user: A username to connect to the proxy with if required
:param proxy_pass: A password to connect to the proxy with if required
"""
super(MMSession, self).__init__(host, port, ssl, proxy_host,
proxy_port,
proxy_user, proxy_pass)
self.apikey = apikey
self.content_type = 'application/x-www-form-urlencoded'
self._conn = None
self._encoding = locale.getdefaultlocale()[-1] or 'UTF-8'
self.connect()
def _prepare_arguments(se | lf, args, method, uri):
"""Prepare MM arguments which need to be packaged differently depending
on the specified HTTP method
"""
a | rgs, content, uri = super(MMSession, self)._prepare_arguments(args,
method,
uri)
if 'apikey' not in args:
args['apikey'] = self.apikey
if method == 'GET':
if '%' not in uri:
uri = pathname2url(uri)
uri = '?'.join([uri, urlencode(args)])
return {}, '{}', uri
return args, urlencode(args), uri
def _handle_response(self, response, uri, method, raw_args, final):
"""Handle the processing of the API's response"""
body = response.read()
ret_val = json.loads(prepare_for_loads(body, self._encoding))
return self._process_response(ret_val['response'], method, final)
def _process_response(self, response, method, final=False):
"""Process an API response for failure, incomplete, or success and
throw any appropriate errors
:param response: the JSON response from the request being processed
"""
status = response['status']
reason = response['message']
self.logger.debug(status)
if status == 200:
return response['data']
elif status == 451:
raise EmailKeyError(reason)
elif status == 452:
raise EmailInvalidArgumentError(reason)
elif status == 453:
raise EmailObjectError(reason)
|
nagyistoce/devide | modules/writers/stlWRT.py | Python | bsd-3-clause | 2,674 | 0.006358 | # $Id$
from module_base import ModuleBase
from module_mixins import FilenameViewModuleMixin
import module_utils
import vtk
class stlWRT(FilenameViewModuleMixin, ModuleBase):
def __init__(self, module_manager):
# call parent constructor
ModuleBase.__init__(self, module_manager)
# need to make sure that we're all happy triangles and stuff
self._cleaner = vtk.vtkCleanPolyData()
self._tf = vtk.vtkTriangleFilter()
self._tf.SetInput(self._cleaner.GetOutput())
| self._writer = vtk.vtkSTLWriter()
self._writer.SetInput(self._tf.GetOutput())
# sorry about this, but the files get REALLY big if we write them
# in ASCII - I'll make this a gui option later.
#self._writer.SetFileTypeToBinary()
# following is the standard way of connecting up the devide progress
# callback to a VTK object; you shou | ld do this for all objects in
mm = self._module_manager
for textobj in (('Cleaning data', self._cleaner),
('Converting to triangles', self._tf),
('Writing STL data', self._writer)):
module_utils.setup_vtk_object_progress(self, textobj[1],
textobj[0])
# ctor for this specific mixin
FilenameViewModuleMixin.__init__(
self,
'Select a filename',
'STL data (*.stl)|*.stl|All files (*)|*',
{'vtkSTLWriter': self._writer},
fileOpen=False)
# set up some defaults
self._config.filename = ''
self.sync_module_logic_with_config()
def close(self):
# we should disconnect all inputs
self.set_input(0, None)
del self._writer
FilenameViewModuleMixin.close(self)
def get_input_descriptions(self):
return ('vtkPolyData',)
def set_input(self, idx, input_stream):
self._cleaner.SetInput(input_stream)
def get_output_descriptions(self):
return ()
def get_output(self, idx):
raise Exception
def logic_to_config(self):
filename = self._writer.GetFileName()
if filename == None:
filename = ''
self._config.filename = filename
def config_to_logic(self):
self._writer.SetFileName(self._config.filename)
def view_to_config(self):
self._config.filename = self._getViewFrameFilename()
def config_to_view(self):
self._setViewFrameFilename(self._config.filename)
def execute_module(self):
if len(self._writer.GetFileName()):
self._writer.Write()
|
LuckyGeck/dedalus | worker/engine.py | Python | mit | 5,052 | 0.002375 | import os
import traceback
from threading import Thread, Event
from common.models.task import TaskInfo
from common.models.state import TaskState
from worker.backend import WorkerBackend
from worker.executor import ExecutionEnded, Executors
from worker.resource import Resources
class TaskExecution(Thread):
def __init__(self, task_id: str, backend: WorkerBackend,
resources: Resources, executors: Executors) -> None:
super().__init__()
self.task_id = task_id
self.backend = backend
task_info = self.backend.read_task_info(task_id)
self.resources = [resources.construct_resource(_) for _ in task_info.structure.resources]
self.executor = executors.construct_executor(self.task_id, task_info.structure.executor)
self.user_stop = Event()
def get_task_state(self):
return self.backend.read_task_state(self.task_id)
def set_task_state(self, state: str):
self.backend.write_task_state(self.task_id, state)
def run(self):
if self.prepare() and not self.user_stop.is_set():
self.execute_task()
def prepare(self):
task_info = self.backend.read_task_info(self.task_id)
task_info.exec_stats.start_preparation()
self.backend.write_task_info(self.task_id, task_info)
prep_error = None
for resource in self. | resources:
if self.user_stop.is_set():
break
try:
resource.ensure()
except Exception as ex:
print(ex)
prep_error = str(ex)
break
task_info = self.backend.read_task_info(self.task_id)
task_info.exec_stats.finish_preparation(
success=prep_error is None,
prep_msg=prep_error,
is_initiated_by_user=self.user | _stop.is_set()
)
self.backend.write_task_info(self.task_id, task_info)
return prep_error is None
def execute_task(self):
task_info = self.backend.read_task_info(self.task_id)
task_info.exec_stats.start_execution()
self.backend.write_task_info(self.task_id, task_info)
return_code = None
try:
it = self.executor.start()
with open(os.path.join(self.executor.work_dir, 'stdout.log'), 'a') as out_file:
with open(os.path.join(self.executor.work_dir, 'stderr.log'), 'a') as err_file:
for stdout, stderr in it:
if stdout is not None:
print(stdout, file=out_file)
if stderr is not None:
print(stderr, file=err_file)
except ExecutionEnded as ex:
print('Execution ended! RetCode:', ex.retcode)
return_code = ex.retcode
except Exception as ex:
print('Exception during task execution! Error: {}'.format(str(ex)))
with open(os.path.join(self.executor.work_dir, 'stderr.log'), 'a') as err_file:
print(traceback.format_exc(), file=err_file)
return_code = -1
task_info.exec_stats.finish_execution(retcode=return_code,
is_initiated_by_user=self.user_stop.is_set())
self.backend.write_task_info(self.task_id, task_info)
def set_state(self, target_state: str) -> TaskState:
state = self.backend.read_task_state(self.task_id)
old_state_name = state.name
old_state = state.change_state(new_state=target_state, force=False) # check for validness of state change
if old_state_name != target_state:
if old_state_name == TaskState.idle and target_state == TaskState.preparing:
self.start()
elif target_state == TaskState.stopped:
self.user_stop.set()
self.executor.kill()
return old_state
class Engine:
def __init__(self, backend: WorkerBackend, resources: Resources, executors: Executors) -> None:
self.tasks = dict()
self.backend = backend
self.resources = resources
self.executors = executors
def create_idle_task(self, task_id: str, task_struct: dict):
return self.backend.write_task_info(task_id, TaskInfo.create({
'task_id': task_id,
'structure': task_struct
}))
def set_task_state(self, task_id: str, state: str) -> str:
if task_id not in self.tasks:
task_info = self.backend.read_task_info(task_id)
old_state = task_info.exec_stats.state.change_state(state) # check state transition validity
if state != TaskState.preparing: # check if we need to create run a task
self.backend.write_task_info(task_id, task_info)
return old_state.name
# TODO: remove non-running tasks from self.tasks
self.tasks[task_id] = TaskExecution(task_id, self.backend, self.resources, self.executors)
return self.tasks[task_id].set_state(state).name
|
jmckib/soundcurl | setup.py | Python | apache-2.0 | 490 | 0.002041 | f | rom setuptools import setup
setup(
name='soundcurl',
version='0.1.0',
description='A command line utility for downloading songs from SoundCloud.',
author='Jeremy McKibben-Sanders',
author_email='jmckib2+soundcurl@gmail.com',
url='https://github.com/jmckib/soundcurl',
package_dir={'': 'src'},
py_modules=['soundcurl'],
entry_points={'console_scripts': ['soundcurl = soundcurl:main']},
instal | l_requires=['beautifulsoup4==4.2.1', 'mutagen==1.21'],
)
|
thaim/ansible | lib/ansible/module_utils/hetzner.py | Python | mit | 4,484 | 0.002453 | # -*- coding: utf-8 -*-
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Felix Fontein <felix@fontein.de>, 2019
#
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible.module_utils.urls import fetch_url
from ansible.module_utils.six.moves.urllib.parse import urlencode
HETZNER_DEFAULT_ARGUMENT_SPEC = dict(
hetzner_user=dict(type='str', required=True),
hetzner_password=dict(type='str', required=True, no_log=True),
)
# The API endpoint is fixed.
BASE_URL = "https://robot-ws.your-server.de"
def fetch_url_json(module, url, method='GET', timeout=10, data=None, headers=None, accept_errors=None):
'''
Make general request to Hetzner's JSON robot API.
'''
module.params['url_username'] = module.params['hetzner_user']
module.params['url_password'] = module.params['hetzner_password']
resp, info = fetch_url(module, url, method=method, timeout=timeout, data=data, headers=headers)
try:
content = resp.read()
except AttributeError:
content = info.pop('body', None)
if not content:
module.fail_json(msg='Cannot retrieve content from {0}'.format(url))
try:
result = module.from_json(content.decode('utf8'))
if 'error' in result:
if accept_errors:
if result['error']['code'] in accept_errors:
return result, result['error']['code']
module.fail_json(msg='Request failed: {0} {1} ({2})'.format(
result['error']['status'],
result['error']['code'],
result['error']['message']
))
return result, None
except ValueError:
module.fail_json(msg='Cannot decode content retrieved from {0}'.format(url))
# #####################################################################################
# ## FAILOVER IP ######################################################################
def get_failover_record(module, ip):
'''
Get information record of failover IP.
See https://robot.your-server.de/doc/webservice/en.html#get-failover-failover-ip
'''
url = "{0}/failover/{1}".format(BASE_URL, ip)
result, error = fetch_url_json(module, url)
if 'failover' not in result:
module.fail_json(msg='Cannot interpret result: {0}'.format(result))
return result['failover']
def get_failover(module, ip):
'''
Get current routing target of failover IP.
The value ``None`` represents unrouted.
See https://robot.your-server.de/doc/webservice/en.html#get-failover-failover-ip
'''
return get_failover_record(module, ip)['active_server_ip']
def set_failover(module, ip, value, timeout=180):
'''
Set current routing target of failover IP.
Return a pair ``(value, changed)``. The value ``None`` for ``value`` represents unrouted.
See https://robot.your-server.de/doc/webservice/en.html#post-failover-failover-ip
and https://robot.your-server.de/doc/webservice/en.html#delete-failover-failover-ip
'''
url = "{0}/failover/{1}".format(BASE_URL, ip)
if value is None:
result, error = fetch_url_json(
module,
url,
method='DELETE',
timeout=timeout,
accept_errors=['FAILOVER_ALREADY_ROUTED']
)
else:
| headers = {"Content-type": "application/x-www-form-urlencoded"}
data = dict(
active_server_ip=value,
)
result, erro | r = fetch_url_json(
module,
url,
method='POST',
timeout=timeout,
data=urlencode(data),
headers=headers,
accept_errors=['FAILOVER_ALREADY_ROUTED']
)
if error is not None:
return value, False
else:
return result['failover']['active_server_ip'], True
def get_failover_state(value):
'''
Create result dictionary for failover IP's value.
The value ``None`` represents unrouted.
'''
return dict(
value=value,
state='routed' if value else 'unrouted'
)
|
SaturdayNeighborhoodHealthClinic/osler | referral/models.py | Python | gpl-3.0 | 6,199 | 0 | """Data models for referral system."""
from __future__ import unicode_literals
from builtins import map
from django.db import models
from django.core.urlresolvers import reverse
from pttrack.models import (ReferralType, ReferralLocation, Note,
ContactMethod, CompletableMixin,)
from followup.models import ContactResult, NoAptReason, NoShowReason
class Referral(Note):
"""A record of a particular patient's referral to a particular center."""
STATUS_SUCCESSFUL = 'S'
STATUS_PENDING = 'P'
STATUS_UNSUCCESSFUL = 'U'
# Status if there are no referrals of a specific type
# Used in aggregate_referral_status
NO_REFERRALS_CURRENTLY = "No referrals currently"
REFERRAL_STATUSES = (
(STATUS_SUCCESSFUL, 'Successful'),
(STATUS_PENDING, 'Pending'),
(STATUS_UNSUCCESSFUL, 'Unsuccessful'),
)
location = models.ManyToManyField(ReferralLocation)
comments = models.TextField(blank=True)
status = models.CharField(
max_length=50, choices=REFERRAL_STATUSES, default=STATUS_PENDING)
kind = models.ForeignKey(
ReferralType,
help_text="The kind of care the patient should recieve at the "
"referral location.")
def __str__(self):
"""Provides string to display on front end for referral.
For FQHC referrals, returns referral kind and date.
For non-FQHC referrals, returns referral location and date.
"""
formatted_date = self.written_datetime.strftime("%D")
if self.kind.is_fqhc:
return "%s referral on %s" % (self.kind, formatted_date)
else:
location_names = [loc.name for loc in self.location.all()]
locations = " ,".join(location_names)
return "Referral to %s on %s" % (locations, formatted_date)
@staticmethod
def aggregate_referral_status(referrals):
referral_status_output = ""
if referrals:
all_successful = all(referral.status == Referral.STATUS_SUCCESSFUL
| for referral in referrals)
if all_successful:
referral_status_output = (dict(Referral.REFERRAL_STATUSES)
| [Referral.STATUS_SUCCESSFUL])
else:
# Determine referral status based on the last FQHC referral
referral_status_output = (dict(Referral.REFERRAL_STATUSES)
[referrals.last().status])
else:
referral_status_output = Referral.NO_REFERRALS_CURRENTLY
return referral_status_output
class FollowupRequest(Note, CompletableMixin):
referral = models.ForeignKey(Referral)
contact_instructions = models.TextField()
MARK_DONE_URL_NAME = 'new-patient-contact'
ADMIN_URL_NAME = ''
def class_name(self):
return self.__class__.__name__
def short_name(self):
return "Referral"
def summary(self):
return self.contact_instructions
def mark_done_url(self):
return reverse(self.MARK_DONE_URL_NAME,
args=(self.referral.patient.id,
self.referral.id,
self.id))
def admin_url(self):
return reverse(
'admin:referral_followuprequest_change',
args=(self.id,)
)
def __str__(self):
formatted_date = self.due_date.strftime("%D")
return 'Followup with %s on %s about %s' % (self.patient,
formatted_date,
self.referral)
class PatientContact(Note):
followup_request = models.ForeignKey(FollowupRequest)
referral = models.ForeignKey(Referral)
contact_method = models.ForeignKey(
ContactMethod,
null=False,
blank=False,
help_text="What was the method of contact?")
contact_status = models.ForeignKey(
ContactResult,
blank=False,
null=False,
help_text="Did you make contact with the patient about this referral?")
PTSHOW_YES = "Y"
PTSHOW_NO = "N"
PTSHOW_OPTS = [(PTSHOW_YES, "Yes"),
(PTSHOW_NO, "No")]
has_appointment = models.CharField(
choices=PTSHOW_OPTS,
blank=True, max_length=1,
verbose_name="Appointment scheduled?",
help_text="Did the patient make an appointment?")
no_apt_reason = models.ForeignKey(
NoAptReason,
blank=True,
null=True,
verbose_name="No appointment reason",
help_text="If the patient didn't make an appointment, why not?")
appointment_location = models.ManyToManyField(
ReferralLocation,
blank=True,
help_text="Where did the patient make an appointment?")
pt_showed = models.CharField(
max_length=1,
choices=PTSHOW_OPTS,
blank=True,
null=True,
verbose_name="Appointment attended?",
help_text="Did the patient show up to the appointment?")
no_show_reason = models.ForeignKey(
NoShowReason,
blank=True,
null=True,
help_text="If the patient didn't go to the appointment, why not?")
def short_text(self):
"""Return a short text description of this followup and what happened.
Used on the patient chart view as the text in the list of followups.
"""
text = ""
locations = " ,".join(map(str, self.appointment_location.all()))
if self.pt_showed == self.PTSHOW_YES:
text = "Patient went to appointment at " + locations + "."
else:
if self.has_appointment == self.PTSHOW_YES:
text = ("Patient made appointment at " + locations +
"but has not yet gone.")
else:
if self.contact_status.patient_reached:
text = ("Successfully contacted patient but the "
"patient has not made an appointment yet.")
else:
text = "Did not successfully contact patient"
return text
|
bdusell/romaji-cpp | test/run_test.py | Python | mit | 1,881 | 0.00638 | #!/usr/bin/env python
import sys, re, subprocess
def usage():
print '''\
Usage: %s <program name> <test file>
''' % sys.argv[0]
def escape(s, code):
return '\033[%sm%s\033[0m' % (code, s)
def red(s):
return escape(s, 91)
def green(s):
return escape(s, 92)
def main():
try:
prog_name, fin_name = sys.argv[1:]
except ValueError:
usage()
sys.exit(1)
pat = re.compile('^(.+) (.+)$')
out_pat = re.compile('^(.*)\n$')
failures = 0
passes = 0
def test_pass(s):
print green('PASS') + ': ' + s
def test_fail(s):
print red('FAIL') + ': ' + s
with open(fin_name, 'r') as fin:
for line in filter(None, fin):
m = pat.match(line)
if m:
text_in, expected_out = m.groups()
p = subprocess.Popen([prog_name, text_in], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
rc = p.wait()
raw_out = p.stdout.read()
m = out_pat.match(raw_out)
actual_out = m and m.group(1)
if rc != 0:
test_fail('%s => %s: [%d] %s' % (text_in, expected_out, rc, | raw_out.rstrip()))
| failures += 1
elif expected_out == actual_out:
test_pass('%s: %s == %s' % (text_in, expected_out, actual_out))
passes += 1
else:
test_fail('%s: %s != %s' % (text_in, expected_out, actual_out))
failures += 1
print '%d/%d passed' % (passes, passes + failures)
if failures:
print red('FAILURE')
else:
print green('SUCCESS')
if __name__ == '__main__':
try:
main()
except IOError, e:
# Ignore broken pipe (e.g. piping into head)
if e.errno != 32:
raise e
|
ilveroluca/pydoop | pydoop/test_support.py | Python | apache-2.0 | 3,704 | 0 | # BEGIN_COPYRIGHT
#
# Copyright 2009-2015 CRS4.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# END_COPYRIGHT
"""
Miscellaneous utilities for testing.
"""
import re
import sys
import os
import tempfile
from pydoop.hdfs import default_is_local
def inject_code(new_code, target_code):
"""
Inject new_code into target_code, before the first import.
NOTE: this is just a hack to make examples work out-of-the-box, in
the general case it can fail in several ways.
"""
new_code = "{0}#--AUTO-INJECTED--{0}{1}{0}#-----------------{0}".format(
os.linesep, os.linesep.join(new_code.strip().splitlines())
)
pos = max(target_code.find("import"), 0)
if pos:
pos = target_code.rfind(os.linesep, 0, pos) + 1
return target_code[:pos] + new_code + target_code[pos:]
def add_sys_path(target_code):
new_code = os.linesep.join([
"import sys",
"sys.path = %r" % (sys.path,)
])
return inject_code(new_code, target_code)
def parse_mr_output(output, vtype=str):
d = {}
for line in output.splitlines():
if line.isspace():
continue
try:
k, v = line.split()
v = vtype(v)
except (ValueError, TypeError):
raise ValueError("bad output format")
d[k] = v
return d
def compare_counts(c1, c2):
if len(c1) != len(c2):
print len(c1), len(c2)
re | turn "number of keys differs"
keys = sorted(c1)
if sorted(c2) != keys:
return "key lists are different"
for k in keys:
if c1[k] != c2[k]:
return "values are different for key %r (%r != %r)" % (
k, c1[k], c2[k]
| )
class LocalWordCount(object):
def __init__(self, input_path, min_occurrence=0):
self.input_path = input_path
self.min_occurrence = min_occurrence
self.__expected_output = None
@property
def expected_output(self):
if self.__expected_output is None:
self.__expected_output = self.run()
return self.__expected_output
def run(self):
wc = {}
if os.path.isdir(self.input_path):
for fn in os.listdir(self.input_path):
if fn[0] == ".":
continue
self._wordcount_file(wc, fn, self.input_path)
else:
self._wordcount_file(wc, self.input_path)
if self.min_occurrence:
wc = dict(t for t in wc.iteritems() if t[1] >= self.min_occurrence)
return wc
def _wordcount_file(self, wc, fn, path=None):
with open(os.path.join(path, fn) if path else fn) as f:
for line in f:
words = re.sub('[^0-9a-zA-Z]+', ' ', line).split()
for w in words:
wc[w] = wc.get(w, 0) + 1
def check(self, output):
res = compare_counts(
parse_mr_output(output, vtype=int), self.expected_output
)
if res:
return "ERROR: %s" % res
else:
return "OK."
def get_wd_prefix(base="pydoop_"):
if default_is_local():
return os.path.join(tempfile.gettempdir(), "pydoop_")
else:
return base
|
byDimasik/Magic_Ping | client.py | Python | gpl-3.0 | 2,642 | 0.002027 | import socket
import argparse
import sys
import magic_ping
import os
import settings
import signal
import logging
import struct
logging.basicConfig(format=u'%(levelname)-8s [%(asctime)s] %(message)s', level=logging.DEBUG, filename=u'client.log')
# Обработка CTRL+C
def signal_handler(signal, frame):
print("\nSTOP CLIENT.")
logging.info("STOP CLIENT.")
exit(0)
# Парсер аргументов командной строки
def create_cmd_parser():
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file', required=True, type=argparse.FileType(mode='rb'))
parser.add_argument('-a', '--address', required=True)
parser.add_argument('-c', '--cypher', action='store_const', const=True)
return parser
signal.signal(signal.SIGINT, signal_handler)
if __name__ == '__main__':
p = create_cmd_parser()
arguments = p.parse_args(sys.argv[1:])
file = arguments.file
file_name = file.name
file_size = os.stat(file_name).st_size
address = arguments.address
ID = 1
s = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_ICMP)
packet_number = 1
data = file_name.encode()
if arguments.cypher:
data = struct.pack('b', 1) + data
else:
data = struct.pack('b', 0) + data
logging.debug("Start sending file to %s" % address)
magic_ping.send_ping(s, address, ID, data, packet_number)
print('start sending')
already_sent = 0 # размер уже отправленной части
while True:
data = file.read(settings.DATA_SIZE)
if arguments.cypher:
data = [a ^ b for (a, b) in zip(data, settings.KEY)] # шифруем XORом с ключом
data = bytes(data)
if not data:
break
already_sent += len(data)
packet_number += 1
magic_ping.send_ping(s, address, ID, data, packet_number)
logging.info('Отправлено: %.2f %%' % (already_sent / file_size * 100))
print('Отправлено: %.2f %%' % (already_sent / file_size * 100))
magic_ping.sen | d_ping(s, address, ID, bytes(0), packet_number=0)
logging.debug("Packets sent: %d" % packet_number)
print("send:", packet_number)
file.close()
client_address, packet_number, checksum = magic_ping.receive_ping(s, ID, {}) # проверяем корректность передачи
if checksum and settings.md5_checksum(file_name) != che | cksum.decode():
logging.warning("Файл передался с ошибками!!!")
print("Файл передался с ошибками!!!")
s.close()
|
ganmk/python-prctice | py-日志.py | Python | mit | 411 | 0 | import logging
from logging.handlers import TimedRotatingFileHandler
log = logging.getLogger()
file_nam | e = "./test.log"
logformatter = logging.Formatter('%(asctime)s [%(levelname)s]|%(message)s')
loghandle = TimedRotatingFileHandler(file_name, 'midnight', 1, 2)
loghandle.setFormatter(logformatter)
loghandle.suffix = '%Y%m%d'
log.addHandler(loghandle)
log.setLevel(logging.DEBUG)
log.debug("init successf | ul")
|
youfoh/webkit-efl | Tools/Scripts/webkitpy/layout_tests/port/qt.py | Python | lgpl-2.1 | 8,302 | 0.00265 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the Google name nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""QtWebKit implementation of the Port interface."""
import glob
import logging
import re
import sys
import os
import webkit
from webkitpy.common.memoized import memoized
from webkitpy.layout_tests.models.test_configuration import TestConfiguration
from webkitpy.layout_tests.port.webkit import WebKitPort
from webkitpy.layout_tests.port.xvfbdriver import XvfbDriver
_log = logging.getLogger(__name__)
class QtPort(WebKitPort):
ALL_VERSIONS = ['linux', 'win', 'mac']
port_name = "qt"
def _wk2_port_name(self):
return "qt-5.0-wk2"
def _port_flag_for_scripts(self):
return "--qt"
@classmethod
def determine_full_port_name(cls, host, options, port_name):
if port_name and port_name != cls.port_name:
return port_name
return port_name + '-' + host.platform.os_name
# sys_platform exists only for unit testing.
def __init__(self, host, port_name, **kwargs):
WebKitPort.__init__(self, host, port_name, **kwargs)
# FIXME: This will allow WebKitPort.baseline_search_path and WebKitPort._skipped_file_search_paths
# to do the right thing, but doesn't include support for qt-4.8 or qt-arm (seen in LayoutTests/platform) yet.
self._operating_system = port_name.replace('qt-', '')
# FIXME: Why is this being set at all?
self._version = self.operating_system()
def _generate_all_test_configurations(self):
configurations = []
for version in self.ALL_VERSIONS:
for build_type in self.ALL_BUILD_TYPES:
configurations.append(TestConfiguration(version=version, architecture='x86', build_type=build_type))
return configurations
def _build_driver(self):
# The Qt port builds DRT as part of the main build step
return True
def _path_to_driver(self):
return self._build_path('bin/%s' % self.driver_name())
def _path_to_image_diff(self):
return self._build_path('bin/ImageDiff')
def _path_to_webcore_library(self):
if self.operating_system() == 'mac':
return self._build_path('lib/QtWebKit.framework/QtWebKit')
else:
return self._build_path('lib/libQtWebKit.so')
def _modules_to_search_for_symbols(self):
# We search in every library to be reliable in th | e case of building with CONFIG+=force_static_libs_as_shared.
if self.operating_system() == 'mac':
frameworks = glob.glob(os.path.join(self._build_path('lib'), '*.framework'))
return [os.path.join(framework, os.path.splitext(os.path.basename(framework))[0]) for fr | amework in frameworks]
else:
suffix = 'dll' if self.operating_system() == 'win' else 'so'
return glob.glob(os.path.join(self._build_path('lib'), 'lib*.' + suffix))
@memoized
def qt_version(self):
version = ''
try:
for line in self._executive.run_command(['qmake', '-v']).split('\n'):
match = re.search('Qt\sversion\s(?P<version>\d\.\d)', line)
if match:
version = match.group('version')
break
except OSError:
version = '4.8'
return version
def _search_paths(self):
# Qt port uses same paths for baseline_search_path and _skipped_file_search_paths
#
# qt-5.0-wk1 qt-5.0-wk2
# \/
# qt-5.0 qt-4.8
# \/
# (qt-linux|qt-mac|qt-win)
# |
# qt
search_paths = []
version = self.qt_version()
if '5.0' in version:
if self.get_option('webkit_test_runner'):
search_paths.append('qt-5.0-wk2')
else:
search_paths.append('qt-5.0-wk1')
if '4.8' in version:
search_paths.append('qt-4.8')
elif version:
search_paths.append('qt-5.0')
search_paths.append(self.port_name + '-' + self.host.platform.os_name)
search_paths.append(self.port_name)
return search_paths
def default_baseline_search_path(self):
return map(self._webkit_baseline_path, self._search_paths())
def _skipped_file_search_paths(self):
skipped_path = self._search_paths()
if self.get_option('webkit_test_runner') and '5.0' in self.qt_version():
skipped_path.append('wk2')
return skipped_path
def expectations_files(self):
# expectations_files() uses the directories listed in _search_paths reversed.
# e.g. qt -> qt-linux -> qt-4.8
return list(reversed([self._filesystem.join(self._webkit_baseline_path(p), 'TestExpectations') for p in self._search_paths()]))
def setup_environ_for_server(self, server_name=None):
clean_env = WebKitPort.setup_environ_for_server(self, server_name)
clean_env['QTWEBKIT_PLUGIN_PATH'] = self._build_path('lib/plugins')
self._copy_value_from_environ_if_set(clean_env, 'QT_DRT_WEBVIEW_MODE')
self._copy_value_from_environ_if_set(clean_env, 'DYLD_IMAGE_SUFFIX')
self._copy_value_from_environ_if_set(clean_env, 'QT_WEBKIT_LOG')
self._copy_value_from_environ_if_set(clean_env, 'DISABLE_NI_WARNING')
self._copy_value_from_environ_if_set(clean_env, 'QT_WEBKIT_PAUSE_UI_PROCESS')
return clean_env
# FIXME: We should find a way to share this implmentation with Gtk,
# or teach run-launcher how to call run-safari and move this down to WebKitPort.
def show_results_html_file(self, results_filename):
run_launcher_args = []
if self.get_option('webkit_test_runner'):
run_launcher_args.append('-2')
run_launcher_args.append("file://%s" % results_filename)
self._run_script("run-launcher", run_launcher_args)
def operating_system(self):
return self._operating_system
def check_sys_deps(self, needs_http):
result = super(QtPort, self).check_sys_deps(needs_http)
if not 'WEBKIT_TESTFONTS' in os.environ:
_log.error('\nThe WEBKIT_TESTFONTS environment variable is not defined or not set properly.')
_log.error('You must set it before running the tests.')
_log.error('Use git to grab the actual fonts from http://gitorious.org/qtwebkit/testfonts')
return False
return result
def _supports_switching_pixel_tests_per_test(self):
return True
def _should_run_as_pixel_test(self, test_input):
return any(test_input.test_name.startswith(directory)
for directory in QtPort._defau |
pylover/timesheet | timesheet/commands/start.py | Python | mit | 1,244 | 0.002412 | # -*- coding: utf-8 -*-
from timesheet.commands import Command
from timesheet.models import Subject, Task, DBSession
from timesheet.commands.completers import subject_completer, task_completer
import argparse
__author__ = 'vahid'
class StartCommand(Command):
name = 'start'
description = 'Starts a task'
@classmethod
def add_arguments(cls):
cls.parser.add_argument('subject', help="Subject to do something about that.").completer = subject_completer
cls.parser.add_argument('task', nargs=argparse. | REMAINDER, metavar='task', default=[], choices=[],
help="Task title.").completer = task_completer
def do_job(self):
active_task = Task.get_active_task()
if active_task:
print('You have an active task: %s' % active_task)
answer | = input("Do you want to terminate the currently active task ([y]/n)? ")
if not answer or answer.lower() == 'y':
active_task.end()
else:
return
subject = Subject.ensure(self.args.subject)
task = Task(title=' '.join(self.args.task))
subject.tasks.append(task)
DBSession.commit()
print('Started task: %s' % task)
|
yangdongsheng/autotest | client/base_sysinfo.py | Python | gpl-2.0 | 15,544 | 0.002059 | import os, shutil, re, glob, subprocess, logging, gzip
from autotest.client.shared import log, software_manager
from autotest.client.shared.settings import settings
from autotest.client import utils
_LOG_INSTALLED_PACKAGES = settings.get_value('CLIENT', 'log_installed_packages',
type=bool, default=False)
_DE | FAULT_COMMA | NDS_TO_LOG_PER_TEST = []
_DEFAULT_COMMANDS_TO_LOG_PER_BOOT = [
"lspci -vvn", "gcc --version", "ld --version", "mount", "hostname",
"uptime",
]
_DEFAULT_COMMANDS_TO_LOG_BEFORE_ITERATION = []
_DEFAULT_COMMANDS_TO_LOG_AFTER_ITERATION = []
_DEFAULT_FILES_TO_LOG_PER_TEST = []
_DEFAULT_FILES_TO_LOG_PER_BOOT = [
"/proc/pci", "/proc/meminfo", "/proc/slabinfo", "/proc/version",
"/proc/cpuinfo", "/proc/modules", "/proc/interrupts", "/proc/partitions",
]
_DEFAULT_FILES_TO_LOG_BEFORE_ITERATION = [
"/proc/schedstat", "/proc/meminfo", "/proc/slabinfo", "/proc/interrupts"
]
_DEFAULT_FILES_TO_LOG_AFTER_ITERATION = [
"/proc/schedstat", "/proc/meminfo", "/proc/slabinfo", "/proc/interrupts"
]
class loggable(object):
""" Abstract class for representing all things "loggable" by sysinfo. """
def __init__(self, logf, log_in_keyval):
self.logf = logf
self.log_in_keyval = log_in_keyval
def readline(self, logdir):
path = os.path.join(logdir, self.logf)
if os.path.exists(path):
return utils.read_one_line(path)
else:
return ""
class logfile(loggable):
def __init__(self, path, logf=None, log_in_keyval=False):
if not logf:
logf = os.path.basename(path)
super(logfile, self).__init__(logf, log_in_keyval)
self.path = path
def __repr__(self):
r = "sysinfo.logfile(%r, %r, %r)"
r %= (self.path, self.logf, self.log_in_keyval)
return r
def __eq__(self, other):
if isinstance(other, logfile):
return (self.path, self.logf) == (other.path, other.logf)
elif isinstance(other, loggable):
return False
return NotImplemented
def __ne__(self, other):
result = self.__eq__(other)
if result is NotImplemented:
return result
return not result
def __hash__(self):
return hash((self.path, self.logf))
def run(self, logdir):
if os.path.exists(self.path):
try:
shutil.copyfile(self.path, os.path.join(logdir, self.logf))
except IOError:
logging.info("Not logging %s (lack of permissions)",
self.path)
class command(loggable):
def __init__(self, cmd, logf=None, log_in_keyval=False, compress_log=False):
if not logf:
logf = cmd.replace(" ", "_")
super(command, self).__init__(logf, log_in_keyval)
self.cmd = cmd
self._compress_log = compress_log
def __repr__(self):
r = "sysinfo.command(%r, %r, %r)"
r %= (self.cmd, self.logf, self.log_in_keyval)
return r
def __eq__(self, other):
if isinstance(other, command):
return (self.cmd, self.logf) == (other.cmd, other.logf)
elif isinstance(other, loggable):
return False
return NotImplemented
def __ne__(self, other):
result = self.__eq__(other)
if result is NotImplemented:
return result
return not result
def __hash__(self):
return hash((self.cmd, self.logf))
def run(self, logdir):
env = os.environ.copy()
if "PATH" not in env:
env["PATH"] = "/usr/bin:/bin"
logf_path = os.path.join(logdir, self.logf)
stdin = open(os.devnull, "r")
stderr = open(os.devnull, "w")
stdout = open(logf_path, "w")
try:
subprocess.call(self.cmd, stdin=stdin, stdout=stdout, stderr=stderr,
shell=True, env=env)
finally:
for f in (stdin, stdout, stderr):
f.close()
if self._compress_log and os.path.exists(logf_path):
utils.run('gzip -9 "%s"' % logf_path, ignore_status=True,
verbose=False)
class base_sysinfo(object):
def __init__(self, job_resultsdir):
self.sysinfodir = self._get_sysinfodir(job_resultsdir)
# pull in the post-test logs to collect
self.test_loggables = set()
for cmd in _DEFAULT_COMMANDS_TO_LOG_PER_TEST:
self.test_loggables.add(command(cmd))
for filename in _DEFAULT_FILES_TO_LOG_PER_TEST:
self.test_loggables.add(logfile(filename))
# pull in the EXTRA post-boot logs to collect
self.boot_loggables = set()
for cmd in _DEFAULT_COMMANDS_TO_LOG_PER_BOOT:
self.boot_loggables.add(command(cmd))
for filename in _DEFAULT_FILES_TO_LOG_PER_BOOT:
self.boot_loggables.add(logfile(filename))
# pull in the pre test iteration logs to collect
self.before_iteration_loggables = set()
for cmd in _DEFAULT_COMMANDS_TO_LOG_BEFORE_ITERATION:
self.before_iteration_loggables.add(
command(cmd, logf=cmd.replace(" ", "_") + '.before'))
for fname in _DEFAULT_FILES_TO_LOG_BEFORE_ITERATION:
self.before_iteration_loggables.add(
logfile(fname, logf=os.path.basename(fname) + '.before'))
# pull in the post test iteration logs to collect
self.after_iteration_loggables = set()
for cmd in _DEFAULT_COMMANDS_TO_LOG_AFTER_ITERATION:
self.after_iteration_loggables.add(
command(cmd, logf=cmd.replace(" ", "_") + '.after'))
for fname in _DEFAULT_FILES_TO_LOG_AFTER_ITERATION:
self.after_iteration_loggables.add(
logfile(fname, logf=os.path.basename(fname) + '.after'))
# add in a couple of extra files and commands we want to grab
self.test_loggables.add(command("df -mP", logf="df"))
# We compress the dmesg because it can get large when kernels are
# configured with a large buffer and some tests trigger OOMs or
# other large "spam" that fill it up...
self.test_loggables.add(command("dmesg -c", logf="dmesg",
compress_log=True))
self.boot_loggables.add(logfile("/proc/cmdline",
log_in_keyval=True))
# log /proc/mounts but with custom filename since we already
# log the output of the "mount" command as the filename "mount"
self.boot_loggables.add(logfile('/proc/mounts', logf='proc_mounts'))
self.boot_loggables.add(command("uname -a", logf="uname",
log_in_keyval=True))
self.sm = software_manager.SoftwareManager()
def __getstate__(self):
ret = dict(self.__dict__)
ret["sm"] = None
return ret
def serialize(self):
return {"boot": self.boot_loggables, "test": self.test_loggables}
def deserialize(self, serialized):
self.boot_loggables = serialized["boot"]
self.test_loggables = serialized["test"]
@staticmethod
def _get_sysinfodir(resultsdir):
sysinfodir = os.path.join(resultsdir, "sysinfo")
if not os.path.exists(sysinfodir):
os.makedirs(sysinfodir)
return sysinfodir
def _get_reboot_count(self):
if not glob.glob(os.path.join(self.sysinfodir, "*")):
return -1
else:
return len(glob.glob(os.path.join(self.sysinfodir, "boot.*")))
def _get_boot_subdir(self, next=False):
reboot_count = self._get_reboot_count()
if next:
reboot_count += 1
if reboot_count < 1:
return self.sysinfodir
else:
boot_dir = "boot.%d" % (reboot_count - 1)
return os.path.join(self.sysinfodir, boot_dir)
def _get_iteration_subdir(self, test, iteration):
iter_dir = "iteration.%d" % iteration
logdir = os.path.join(self._get_sysinfodir(test.outputdir), ite |
michaelBenin/sqlalchemy | test/orm/test_unitofworkv2.py | Python | mit | 55,891 | 0.004938 | from sqlalchemy.testing import eq_, assert_raises, assert_raises_message
from sqlalchemy import testing
from sqlalchemy.testing import engines
from sqlalchemy.testing.schema import Table, Column
from test.orm import _fixtures
from sqlalchemy import exc
from sqlalchemy.testing import fixtures
from sqlalchemy import Integer, String, ForeignKey, func
from sqlalchemy.orm import mapper, relationship, backref, \
create_session, unitofwork, attributes,\
Session, class_mapper, sync, exc as orm_exc
from sqlalchemy.testing.assertsql import AllOf, CompiledSQL, Or
class AssertsUOW(object):
def _get_test_uow(self, session):
uow = unitofwork.UOWTransaction(session)
deleted = set(session._deleted)
new = set(session._new)
dirty = set(session._dirty_states).difference(deleted)
for s in new.union(dirty):
uow.register_object(s)
for d in deleted:
uow.register_object(d, isdelete=True)
return uow
def _assert_uow_size(self, session, expected ):
uow = self._get_test_uow(session)
postsort_actions = uow._generate_actions()
print(postsort_actions)
eq_(len(postsort_actions), expected, postsort_actions)
class UOWTest(_fixtures.FixtureTest,
testing.AssertsExecutionResults, AssertsUOW):
run_inserts = None
class RudimentaryFlushTest(UOWTest):
def test_one_to_many_save(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses':relationship(Address),
})
mapper(Address, addresses)
sess = create_session()
a1, a2 = Address(email_address='a1'), Address(email_address='a2')
u1 = User(name='u1', addresses=[a1, a2])
sess.add(u1)
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"INSERT INTO users (name) VALUES (:name)",
{'name': 'u1'}
),
CompiledSQL(
"INSERT INTO addresses (user_id, email_address) "
"VALUES (:user_id, :email_address)",
lambda ctx: {'email_address': 'a1', 'user_id':u1.id}
),
CompiledSQL(
"INSERT INTO addresses (user_id, email_address) "
"VALUES (:user_id, :email_address)",
lambda ctx: {'email_address': 'a2', 'user_id':u1.id}
),
)
def test_one_to_many_delete_all(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses':relationship(Address),
})
mapper(Address, addresses)
sess = create_session()
a1, a2 = Address(email_address='a1'), Address(email_address='a2')
u1 = User(name='u1', addresses=[a1, a2])
sess.add(u1)
sess.flush()
sess.delete(u1)
sess.delete(a1)
sess.delete(a2)
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"DELETE FROM addresses WHERE addresses.id = :id",
[{'id':a1.id},{'id':a2.id}]
),
CompiledSQL(
"DELETE FROM users WHERE users.id = :id",
{'id':u1.id}
),
)
def test_one_to_many_delete_parent(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses':relationship(Address),
})
mapper(Address, addresses)
sess = create_session()
a1, a2 = Address(email_address='a | 1'), Address(email_address='a2')
u1 = User(name='u1', addresses=[a1, a2])
sess.add(u1)
sess.flush()
sess.delete(u1)
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
| "UPDATE addresses SET user_id=:user_id WHERE "
"addresses.id = :addresses_id",
lambda ctx: [{'addresses_id': a1.id, 'user_id': None}]
),
CompiledSQL(
"UPDATE addresses SET user_id=:user_id WHERE "
"addresses.id = :addresses_id",
lambda ctx: [{'addresses_id': a2.id, 'user_id': None}]
),
CompiledSQL(
"DELETE FROM users WHERE users.id = :id",
{'id':u1.id}
),
)
def test_many_to_one_save(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users)
mapper(Address, addresses, properties={
'user':relationship(User)
})
sess = create_session()
u1 = User(name='u1')
a1, a2 = Address(email_address='a1', user=u1), \
Address(email_address='a2', user=u1)
sess.add_all([a1, a2])
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"INSERT INTO users (name) VALUES (:name)",
{'name': 'u1'}
),
CompiledSQL(
"INSERT INTO addresses (user_id, email_address) "
"VALUES (:user_id, :email_address)",
lambda ctx: {'email_address': 'a1', 'user_id':u1.id}
),
CompiledSQL(
"INSERT INTO addresses (user_id, email_address) "
"VALUES (:user_id, :email_address)",
lambda ctx: {'email_address': 'a2', 'user_id':u1.id}
),
)
def test_many_to_one_delete_all(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users)
mapper(Address, addresses, properties={
'user':relationship(User)
})
sess = create_session()
u1 = User(name='u1')
a1, a2 = Address(email_address='a1', user=u1), \
Address(email_address='a2', user=u1)
sess.add_all([a1, a2])
sess.flush()
sess.delete(u1)
sess.delete(a1)
sess.delete(a2)
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"DELETE FROM addresses WHERE addresses.id = :id",
[{'id':a1.id},{'id':a2.id}]
),
CompiledSQL(
"DELETE FROM users WHERE users.id = :id",
{'id':u1.id}
),
)
def test_many_to_one_delete_target(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users)
mapper(Address, addresses, properties={
'user':relationship(User)
})
sess = create_session()
u1 = User(name='u1')
a1, a2 = Address(email_address='a1', user=u1), \
Address(email_ |
oostende/blackhole-2 | mytest.py | Python | gpl-2.0 | 17,187 | 0.026473 | import sys, os
if os.path.isfile("/usr/lib/enigma2/python/enigma.zip"):
sys.path.append("/usr/lib/enigma2/python/enigma.zip")
from Tools.Profile import profile, profile_final
profile("PYTHON_START")
import Tools.RedirectOutput
import enigma
import eConsoleImpl
import eBaseImpl
enigma.eTimer = eBaseImpl.eTimer
enigma.eSocketNotifier = eBaseImpl.eSocketNotifier
enigma.eConsoleAppContainer = eConsoleImpl.eConsoleAppContainer
from traceback import print_exc
profile("SimpleSummary")
from Screens import InfoBar
from Screens.SimpleSummary import SimpleSummary
from sys import stdout, exc_info
profile("Bouquets")
from Components.config import config, configfile, ConfigText, ConfigYesNo, ConfigInteger, NoSave
config.misc.load_unlinked_userbouquets = ConfigYesNo(default=True)
def setLoadUnlinkedUserbouquets(configElement):
enigma.eDVBDB.getInstance().setLoadUnlinkedUserbouquets(configElement.value)
config.misc.load_unlinked_userbouquets.addNotifier(setLoadUnlinkedUserbouquets)
enigma.eDVBDB.getInstance().reloadBouquets()
profile("ParentalControl")
import Components.ParentalControl
Components.ParentalControl.InitParentalControl()
profile("LOAD:Navigation")
from Navigation import Navigation
profile("LOAD:skin")
from skin import readSkin
profile("LOAD:Tools")
from Tools.Directories import InitFallbackFiles, resolveFilename, SCOPE_PLUGINS, SCOPE_CURRENT_SKIN
InitFallbackFiles()
profile("config.misc")
config.misc.radiopic = ConfigText(default = resolveFilename(SCOPE_CURRENT_SKIN, "radio.mvi"))
config.misc.blackradiopic = ConfigText(default = resolveFilename(SCOPE_CURRENT_SKIN, "black.mvi"))
config.misc.useTransponderTime = ConfigYesNo(default=True)
config.misc.startCounter = ConfigInteger(default=0) # number of e2 starts...
config.misc.standbyCounter = NoSave(ConfigInteger(default=0)) # number of standby
config.misc.DeepStandby = NoSave(ConfigYesNo(default=False)) # detect deepstandby
config.misc.RestartUI = ConfigYesNo(default=False) # detect user interface restart
config.misc.prev_wakeup_time = ConfigInteger(default=0)
#config.misc.prev_wakeup_time_type is only valid when wakeup_time is not 0
config.misc.prev_wakeup_time_type = ConfigInteger(default=0)
# 0 = RecordTimer, 1 = ZapTimer, 2 = Plugins, 3 = WakeupTimer
config.misc.epgcache_filename = ConfigText(default = "/hdd/epg.dat")
def setEPGCachePath(configElement):
enigma.eEPGCache.getInstance().setCacheFile(configElement.value)
#demo code for use of standby enter leave callbacks
#def leaveStandby():
# print "!!!!!!!!!!!!!!!!!leave standby"
#def standbyCountChanged(configElement):
# print "!!!!!!!!!!!!!!!!!enter standby num", configElement.value
# from Screens.Standby import inStandby
# inStandby.onClose.append(leaveStandby)
#config.misc.standbyCounter.addNotifier(standbyCountChanged, initial_call = False)
####################################################
def useTransponderTimeChanged(configElement):
enigma.eDVBLocalTimeHandler.getInstance().setUseDVBTime(configElement.value)
config.misc.useTransponderTime.addNotifier(useTransponderTimeChanged)
profile("Twisted")
try:
import twisted.python.runtime
import e2reactor
e2reactor.install()
from twisted.internet import reactor
def runReactor():
reactor.run(installSignalHandlers=False)
except ImportError:
print "twisted not available"
def runReactor():
enigma.runMainloop()
profile("LOAD:Plugin")
# initialize autorun plugins and plugin menu entries
from Components.PluginComponent import plugins
profile("LOAD:Wizard")
from Screens.Wizard import wizardManager
from Screens.StartWizard import *
import Screens.Rc
from Tools.BoundFunction import boundFunction
from Plugins.Plugin import PluginDescriptor
profile("misc")
had = dict()
def dump(dir, p = ""):
if isinstance(dir, dict):
for (entry, val) in dir.items():
dump(val, p + "(dict)/" + entry)
if hasattr(dir, "__dict__"):
for name, value in dir.__dict__.items():
if not had.has_key(str(value)):
had[str(value)] = 1
dump(value, p + "/" + str(name))
else:
print p + "/" + str(name) + ":" + str(dir.__class__) + "(cycle)"
else:
print p + ":" + str(dir)
# + ":" + str(dir.__class__)
# display
profile("LOAD:ScreenGlobals")
from Screens.Globals import Globals
from Screens.SessionGlobals import SessionGlobals
from Screens.Screen import Screen
profile("Screen")
Screen.global_screen = Globals()
# Session.open:
# * push current active dialog ('current_dialog') onto stack
# * call execEnd for this dialog
# * clear in_exec flag
# * hide screen
# * instantiate new dialog into 'current_dialog'
# * create screens, components
# * read, apply skin
# * create GUI for screen
# * call execBegin for new dialog
# * set in_exec
# * show gui screen
# * call components' / screen's onExecBegin
# ... screen is active, until it calls 'close'...
# Session.close:
# * assert in_exec
# * save return value
# * start deferred close handler ('onClose')
# * execEnd
# * clear in_exec
# * hide screen
# .. a moment later:
# Session.doClose:
# * destroy screen
class Session:
def __init__(self, desktop = None, summary_desktop = None, navigation = None):
self.desktop = desktop
self.summary_desktop = summary_desktop
self.nav = navigation
self.delay_timer = enigma.eTimer()
self.delay_timer.callback.append(self.processDelay)
self.current_dialog = None
self.dialog_stack = [ ]
self.summary_stack = [ ]
self.summary = None
self.in_exec = False
self.screen = SessionGlobals(self)
for p in plugins.getPlugins(PluginDescriptor.WHERE_SESSIONSTART):
try:
p(reason=0, session=self)
except:
print "Plugin raised exception at WHERE_SESSIONSTART"
import traceback
traceback.print_exc()
def processDelay(self):
callback = self.current_dialog.callback
retval = s | elf.current_dialog.returnValue
if self.current_dialog.isTmp:
self.current_dialog.doClose()
# dump(self.current_dialog)
del self.current_dialog
else:
del self.current_dialog.callback
self.popCurrent()
if callback is not None:
callback(*retval)
def execBegin(self, firs | t=True, do_show = True):
assert not self.in_exec
self.in_exec = True
c = self.current_dialog
# when this is an execbegin after a execend of a "higher" dialog,
# popSummary already did the right thing.
if first:
self.instantiateSummaryDialog(c)
c.saveKeyboardMode()
c.execBegin()
# when execBegin opened a new dialog, don't bother showing the old one.
if c == self.current_dialog and do_show:
c.show()
def execEnd(self, last=True):
assert self.in_exec
self.in_exec = False
self.current_dialog.execEnd()
self.current_dialog.restoreKeyboardMode()
self.current_dialog.hide()
if last and self.summary:
self.current_dialog.removeSummary(self.summary)
self.popSummary()
def instantiateDialog(self, screen, *arguments, **kwargs):
return self.doInstantiateDialog(screen, arguments, kwargs, self.desktop)
def deleteDialog(self, screen):
screen.hide()
screen.doClose()
def instantiateSummaryDialog(self, screen, **kwargs):
if self.summary_desktop is not None:
self.pushSummary()
summary = screen.createSummary() or SimpleSummary
arguments = (screen,)
self.summary = self.doInstantiateDialog(summary, arguments, kwargs, self.summary_desktop)
self.summary.show()
screen.addSummary(self.summary)
def doInstantiateDialog(self, screen, arguments, kwargs, desktop):
# create dialog
dlg = screen(self, *arguments, **kwargs)
if dlg is None:
return
# read skin data
readSkin(dlg, None, dlg.skinName, desktop)
# create GUI view of this dialog
dlg.setDesktop(desktop)
dlg.applySkin()
return dlg
def pushCurrent(self):
if self.current_dialog is not None:
self.dialog_stack.append((self.current_dialog, self.current_dialog.shown))
self.execEnd(last=False)
def popCurrent(self):
if self.dialog_stack:
(self.current_dialog, do_show) = self.dialog_stack.pop()
self.execBegin(first=False, do_show=do_show)
else:
self.current_dialog = None
def execDialog(self, dialog):
self.pushCurrent()
self.current_dialog = dialog
self.current_dialog.isTmp = False
self.current_dialog.callback = None # would ca |
google-code/billreminder | src/lib/scheduler.py | Python | mit | 2,973 | 0.003027 | # -*- coding: utf-8 -*-
import time, datetime
from lib import i18n
SC_ONCE = _("Once")
SC_WEEKLY = _("Weekly")
SC_MONTHLY = _("Monthly")
def time_from_calendar(calendar):
''' Return a time object representing the date. '''
day = calendar[2]
month = calendar[1] + 1
year = calendar[0]
# Create datetime object
ret = datetime.datetime(year, month, day)
# Convert from datetime to time
ret = timestamp_from_datetime(ret)
return ret
def timestamp_from_datetime(date):
''' Convert a datetime object into a time object. '''
if isinstance(date, datetime.datetime):
ret = time.mktime(date.timetuple())
else:
ret = time.time()
return ret
def datetime_from_timestamp(timestamp):
''' Convert a time object into a datetime object. '''
if isinstance(timestamp, float) or isinstance(timestamp, int):
ret = datetime.datetime.fromtimestamp(timestamp)
else:
ret = datetime.datetime.now()
return ret
def get_schedule_timestamp(frequency, date):
''' Return the scheduled date from original date. '''
# Date conversion if needed
if isinstance(date, float) or isinstance(date, int):
date = datetime_from_timestamp(date)
if frequency == SC_WEEKLY:
delta = datetime.timedelta(days=7)
ret = date + delta
elif frequency == SC_MONTHLY:
nextMonth = date.month % 12 + 1
nextMonthYear = date.year + ((date.month) / 12)
ret = datetime.datetime(nextMonthYear, nextMonth, date.day)
else:
ret = date
# Convert to timestamp
ret = timestamp_from_datetime(ret)
return ret
def first_of_month(month, year):
''' Return the timestamp for the first day of the given month. '''
ret = datetime.datetime(year, month, 1, 0, 0, 0)
# Convert to timestamp
ret = timestamp_from_datetime(ret)
return ret
def last_of_month(month, year):
''' Return the timestamp for the last day of the given month. '''
nextMonth = month | % 12 + 1
nextMonthYear = year + ((month) / 12)
goback = datetime.timedelta(seconds=1)
# Create datetime object with a timestamp corresponding the end of day
| nextMonth = datetime.datetime(nextMonthYear, nextMonth, 1, 0, 0, 0)
ret = nextMonth - goback
# Convert to timestamp
ret = timestamp_from_datetime(ret)
return ret
def get_alarm_timestamp(alertDays, alertTime, origDate=None):
''' Calculate alarm timestamp. '''
if not origDate:
origDate = datetime_from_timestamp(origDate)
elif isinstance(origDate, float) or isinstance(origDate, int):
origDate = datetime_from_timestamp(origDate)
alertTime = alertTime.split(':')
delta = datetime.timedelta(days=alertDays)
alertDate = origDate - delta
ret = datetime.datetime(alertDate.year, alertDate.month, alertDate.day, int(alertTime[0]), int(alertTime[1]))
# Convert to timestamp
ret = timestamp_from_datetime(ret)
return ret
|
danille/ClothesAdvisor-server | main/scripts/color.py | Python | apache-2.0 | 354 | 0.002825 | class Color:
def __init__(self, name, hue, saturation, value):
self.name = name
self.hue = hue
self.saturation = saturation
self.value = value
| def __str__(self):
return 'Hue: {0}, Saturation: {1}, Value: {2}'.format(self.hue, self.saturation, self.value)
de | f get_name(self):
return self.name
|
brooksc/bugherd | setup.py | Python | mit | 4,028 | 0.005214 | from setuptools import setup, find_packages # Always prefer setuptools over distutils
from code | cs import open # To use a consistent encoding
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
# with open(path.join(here, 'DESCRIPTION.rst'), encoding='utf-8') as f:
with | open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name = 'bugherd',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/development.html#single-sourcing-the-version
version = '0.1.dev1',
description = 'Access bugherd.com API',
long_description=long_description,
# The project's main homepage.
url = 'https://github.com/brooksc/bugherd', # use the URL to the github repo
# Author details
author = 'Brooks Cutter',
author_email = 'brooksc@brooksc.com',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
# 'Topic :: Software Development :: Build Tools',
'Topic :: Software Development :: Bug Tracking',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
# 'Programming Language :: Python :: 3',
# 'Programming Language :: Python :: 3.2',
# 'Programming Language :: Python :: 3.3',
# 'Programming Language :: Python :: 3.4',
],
# What does your project relate to?
keywords='bugherd',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
# List run-time dependencies here. These will be installed by pip when your
# project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/technical.html#install-requires-vs-requirements-files
install_requires=['requests'],
# List additional groups of dependencies here (e.g. development dependencies).
# You can install these using the following syntax, for example:
# $ pip install -e .[dev,test]
extras_require = {
'dev': ['check-manifest'],
'test': ['coverage'],
},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={
# 'sample': ['package_data.dat'],
},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages.
# see http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
data_files=[],
# data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={},
# entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
# },
) |
bzzzz/cython | Cython/Compiler/Symtab.py | Python | apache-2.0 | 80,141 | 0.00594 | #
# Symbol Table
#
import re
from Cython import Utils
from Errors import warning, error, InternalError
from StringEncoding import EncodedString
import Options, Naming
import PyrexTypes
from PyrexTypes import py_object_type, unspecified_type
import TypeSlots
from TypeSlots import \
pyfunction_signature, pymethod_signature, \
get_special_method_signature, get_property_accessor_signature
import ControlFlow
import Code
import __builtin__ as builtins
try:
set
except NameError:
from sets import Set as set
import copy
possible_identifier = re.compile(ur"(?![0-9])\w+$", re.U).match
nice_identifier = re.compile('^[a-zA-Z0-0_]+$').match
iso_c99_keywords = set(
['auto', 'break', 'case', 'char', 'const', 'continue', 'default', 'do',
'double', 'else', 'enum', 'extern', 'float', 'for', 'goto', 'if',
'int', 'long', 'register', 'return', 'short', 'signed', 'sizeof',
'static', 'struct', 'switch', 'typedef', 'union', 'unsigned', 'void',
'volatile', 'while',
'_Bool', '_Complex'', _Imaginary', 'inline', 'restrict'])
def c_safe_identifier(cname):
# There are some C limitations on struct entry names.
if ((cname[:2] == '__'
and not (cname.startswith(Naming.pyrex_prefix)
or cname == '__weakref__'))
or cname in iso_c99_keywords):
cname = Naming.pyrex_prefix + cname
return cname
class BufferAux(object):
writable_needed = False
def __init__(self, buffer_info_var, stridevars, shapevars,
suboffsetvars):
self.buffer_info_var = buffer_info_var
self.stridevars = stridevars
self.shapevars = shapevars
self.suboffsetvars = suboffsetvars
def __repr__(self):
return "<BufferAux %r>" % self.__dict__
class Entry(object):
# A symbol table entry in a Scope or ModuleNamespace.
#
# name string Python name of entity
# cname string C name of entity
# type PyrexType Type of entity
# doc string Doc string
# init string Initial value
# visibility 'private' or 'public' or 'extern'
# is_builtin boolean Is an entry in the Python builtins dict
# is_cglobal boolean Is a C global variable
# is_pyglobal boolean Is a Python module-level variable
# or class attribute during
# class construction
# is_member boolean Is an assigned class member
# is_pyclass_attr boolean Is a name in a Python class namespace
# is_variable boolean Is a variable
# is_cfunction boolean Is a C function
# is_cmethod boolean Is a C method of an extension type
# is_unbound_cmethod boolean Is an unbound C method of an extension type
# is_anonymous boolean Is a anonymous pyfunction entry
# is_type boolean Is a type definition
# is_cclass boolean Is an extension class
# is_cpp_class boolean Is a C++ class
# is_const boolean Is a constant
# is_property boolean Is a property of an extension type:
# doc_cname string or None C const holding the docstring
# getter_cname string C func for getting property
# setter_cname string C func for setting or deleting property
# is_self_arg boolean Is the "self" arg of an exttype method
# is_arg boolean Is the arg of a method
# is_local boolean Is a local variable
# in_closure boolean Is referenced in an inner scope
# is_readonly boolean Can't be assigned to
# func_cname string C func implementing Python func
# func_modifiers [string] C function modifiers ('inline')
# pos position Source position where declared
# namespace_cname string If is_pyglobal, the C variable
# holding its home namespace
# pymethdef_cname string PyMethodDef structure
# signature Signature Arg & return types for Python func
# init_to_none boolean True if initial value should be None
# as_variable Entry Alternative interpretation of extension
# type name or builtin C function as a variable
# xdecref_cleanup boolean Use Py_XDECREF for error cleanup
# in_cinclude boolean Suppress C declaration code
# enum_values [Entry] For enum types, list of values
# qualified_name string "modname.funcname" or "modname.classname"
# or "modname.classname.funcname"
# is_declared_generic boolean Is declared as PyObject * even though its
# type is an extension type
# as_module None Module scope, if a cimported module
# is_inherited boolean Is an inherited attribute of an extension type
# pystring_cname string C name of Python version of string literal
# is_interned boolean For string const entries, value is interned
# is_identifier boolean For string const entries, value is an identifier
# used boolean
# is_special boolean Is a special method or property accessor
# of an extension type
# defined_in_pxd boolean Is defined in a .pxd file (not just declared)
# api boolean Generate C API for C class or function
# utility_code string Utility code needed when this entry is used
#
# buffer_aux BufferAux or None Extra information needed for buffer variables
# inline_func_in_pxd boolean Hacky special case for inline function in pxd file.
# Ideally this should not be necesarry.
# assignments [ExprNode] List of expressions that get assigned to this entry.
# might_overflow boolean In an arithmetic expression that could cause
# overflow (used for type inference).
inline_func_in_pxd = False
borrowed = 0
init = ""
visibility = 'private'
is_builtin = 0
is_cglobal = 0
is_pyglobal = 0
is_member = 0
is_pyclass_attr = 0
is_variable = 0
is_cfunction = 0
is_cmethod = 0
is_unbound_cmethod = 0
is_anonymous = 0
is_type = 0
is_cclass = 0
is_cpp_class = 0
is_const = 0
is_property = 0
doc_cname = None
getter_cname = None
setter_cname = None
is_self_arg = 0
is_arg = 0
is_local = 0
in_closure = 0
from_closure = 0
is_declared_generic = 0
is_readonly = 0
func_cna | me = None
func_modifiers = []
doc = None
init_to_none = | 0
as_variable = None
xdecref_cleanup = 0
in_cinclude = 0
as_module = None
is_inherited = 0
pystring_cname = None
is_identifier = 0
is_interned = 0
used = 0
is_special = 0
defined_in_pxd = 0
is_implemented = 0
api = 0
utility_code = None
is_overridable = 0
buffer_aux = None
prev_entry = None
might_overflow = 0
def __init__(self, name, cname, type, pos = None, init = None):
self.name = name
self.cname = cname
self.type = type
self.pos = pos
self.init = init
self.overloaded_alternatives = []
self.assignments = []
def __repr__(self):
return "Entry(name=%s, type=%s)" % (self.name, self.type)
def redeclared(self, pos):
error(pos, "'%s' does not match previous declaration" % self.name)
error(self.pos, "Previous declaration is here")
def all_alternatives(self):
return [self] + self.overloaded_alternatives
class Scope(object):
# name string Unqualified name
# outer_scope Scope or None Enclosing scope
# entries {string : Entry} Python name to entry, non-types
# const_entries [Entry] Constant entries
# type_entries [Entry] Struct/union/enum/typedef/exttype entries
# sue_entri |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/lib/mpl_examples/pylab_examples/scatter_profile.py | Python | gpl-2.0 | 556 | 0.010791 | #!/usr/bin/env python |
# -*- noplot -*-
"""
N Classic Base renderer Ext renderer
20 0.22 0.14 0.14
100 0.16 0.14 0.13
1000 0.45 0.26 0.17
10000 3.30 1.31 0.53
50000 19.30 6.53 1.98
"""
from pylab import *
import time
for N in (20,100,1000,10000,50000):
tstart = time.time()
x = 0.9*rand | (N)
y = 0.9*rand(N)
s = 20*rand(N)
scatter(x,y,s)
print '%d symbols in %1.2f s' % (N, time.time()-tstart)
|
FAForever/client | src/fa/factions.py | Python | gpl-3.0 | 1,836 | 0 | import random
from enum import Enum, unique
@unique
class Factions(Enum):
"""
Enum to represent factions. Numbers match up with faction identification
ids from the game.
"""
UEF = 1
AEON = 2
CYBRAN = 3
SERAPHIM = 4
# Shall remain the last element: not a real faction number.
RANDOM = 5
@staticmethod
def get_random_faction():
"""
:return: A random faction, but never RANDOM.
"""
possibilities = list(Factions)
possibilities.pop()
return random.choice(possibilities)
@staticmethod
def set_faction(sub_factions=[]):
if any(sub_factions):
possibilities = []
for faction, selected in zip(list(Factions)[:-1], sub_factions):
if selected:
possibilities.append(faction)
else:
possibilities = list(Factions)[:-1]
retu | rn ran | dom.choice(possibilities)
@staticmethod
def from_name(name):
name = name.lower()
if name == "uef":
return Factions.UEF
elif name == "aeon":
return Factions.AEON
elif name == "cybran":
return Factions.CYBRAN
elif name == "seraphim":
return Factions.SERAPHIM
elif name == "random":
return Factions.RANDOM
raise ValueError("Invalid faction name provided: {}".format(name))
def to_name(self):
if self == Factions.UEF:
return "uef"
elif self == Factions.AEON:
return "aeon"
elif self == Factions.CYBRAN:
return "cybran"
elif self == Factions.SERAPHIM:
return "seraphim"
elif self == Factions.RANDOM:
return "random"
raise ValueError("Invalid faction id provided: {}".format(self))
|
ConPaaS-team/conpaas | conpaas-services/src/conpaas/services/mysql/manager/config.py | Python | bsd-3-clause | 4,854 | 0.006799 | # -*- coding: utf-8 -*-
"""
:copyright: (C) 2010-2013 by Contrail Consortium.
"""
from conpaas.core.log import create_logger
from conpaas.core.node import ServiceNode
E_ARGS_UNEXPECTED = 0
E_CONFIG_READ_FAILED = 1
E_CONFIG_NOT_EXIST = 2
E_UNKNOWN = 3
E_ARGS_MISSING = 4
E_ARGS_INVALID = 5
E_STATE_ERROR = 6
E_STRINGS = ['Unexpected arguments %s',
'Unable to open configuration file: %s',
'Configuration file does not exist: %s',
'Unknown error.',
'Missing argument: %s',
'Invalid argument: %s',
'Service in wrong state'
]
class SQLServiceNode(ServiceNode):
'''
Holds information on service nodes.
:param vm: Service node id
:type vm: array
:param runMySQL: Indicator if service node is running MySQL
:type runMySQL: boolean
'''
isNode=True
isGlb_node=False;
def __init__(self, node):
ServiceNode.__init__(self, node.vmid,
node.ip, node.private_ip,
node.cloud_name, role=node.role)
self.port = 5555
'''String representation of the ServiceNode.
@return: returns service nodes information. Id ip and if mysql is running on this service node.'''
def __repr__(self):
return 'ServiceNode(ip=%s, port=%s)' % (self.ip, self.port)
class GLBServiceNode(ServiceNode):
'''
Holds information on Galera Balancer nodes.
:param vm: Service node id
:type vm: array
:param runMySQL: Indicator if service node is running MySQL
:type runMySQL: boolean
'''
isGlb_node=True
isNode=False
def __init__(self, node):
ServiceNode.__init__(self, node.vmid,
node.ip, node.private_ip,
node.cloud_name, role=node.role)
self.port = 5555
'''String representation of the ServiceNode.
@return: returns service nodes information. Id ip and if mysql is running on this service node.'''
def __repr__(self):
return 'GLBServiceNode(ip=%s, port=%s)' % (self.ip, self.port)
class Configuration(object):
MYSQL_PORT = 3306
GLB_PORT = 3307
# The port on which the agent listens
AGENT_PORT = 5555
'''Galera Load Balancer Nodes'''
glb_service_nodes = {}
serviceNodes = {}
def __init__(self, configuration, logger):
'''Representation of the deployment configuration'''
self.logger = logger
self.mysql_count = 0
self.serviceNodes = {}
self.glb_service_nodes = {}
def getMySQLTuples(self):
'''Returns the list of service nodes as tuples <IP, PORT>.'''
return [[serviceNode.ip, self.MYSQL_PORT]
for serviceNode in self.serviceNodes.values()]
''' Returns the list of IPs of MySQL instances'''
def get_nodes_addr(s | elf):
return [serviceNode.ip for serviceNode in self.serviceNodes.values()]
def get_nodes(self):
""" Returns the list of MySQL nodes."""
return [serviceNode for serviceNode in self.serviceNodes.values()]
def get_glb_nodes(self):
''' Returns the list of GLB nodes'''
return [serviceNode for serviceNode i | n self.glb_service_nodes.values()]
def getMySQLNode(self, id):
if self.serviceNodes.has_key(id):
node = self.serviceNodes[id]
else:
node = self.glb_service_nodes[id]
return node
def addGLBServiceNodes(self, nodes):
'''
Add new GLB Node to the server (configuration).
'''
self.logger.debug('Entering addGLBServiceNodes')
for node in nodes:
self.glb_service_nodes[str(node.id)] = GLBServiceNode(node)
self.logger.debug('Exiting addGLBServiceNodes')
def removeGLBServiceNode(self, id):
'''
Remove GLB Node to the server (configuration).
'''
del self.glb_service_nodes[id]
def remove_glb_nodes(self, nodes):
for node in nodes:
del self.glb_service_nodes[node.id]
def addMySQLServiceNodes(self, nodes):
'''
Add new Service Node to the server (configuration).
'''
for node in nodes:
self.serviceNodes[str(node.id)] = SQLServiceNode(node)
def removeMySQLServiceNode(self, id):
'''
Remove Service Node to the server (configuration).
'''
del self.serviceNodes[id]
def remove_nodes(self, nodes):
for node in nodes:
self.logger.debug('RemoveNodes node.id=%s' % node.id )
self.logger.debug('RemoveNodes node=%s' % node )
self.logger.debug('RemoveNodes self.ServiceNodes=%s' % self.serviceNodes )
if self.serviceNodes.has_key(node.id) :
self.serviceNodes.pop(node.id, None)
else :
self.glb_service_nodes.pop(node.id,None)
|
ZTH1970/alcide | init.d/alcide-conf.py | Python | agpl-3.0 | 143 | 0.006993 | backlog = 2048
debug | = False
workers = 12
loglevel = "info"
secure_scheme_headers = {'X-FORWARDED-PROTOCOL': 'https | ', 'X-FORWARDED-SSL': 'on'}
|
Spotipo/spotipo | unifispot/ext/development.py | Python | agpl-3.0 | 902 | 0 | # coding: utf-8
def configure(app):
if app.config.get('DEBUG_TOOLBAR_ENABLED'):
try:
from flask_debugtoolbar import DebugToolbarExtension
DebugToolbarExtension(app)
except ImportError:
app.logger.info('flask_debugtoolbar is not in | stalled')
if app.config.get('OPBEAT'):
try:
from opbeat.contrib.flask import Opbeat
Opbeat(
app,
logging=app.config.ge | t('OPBEAT', {}).get('LOGGING', False)
)
app.logger.info('opbeat configured!!!')
except ImportError:
app.logger.info('opbeat is not installed')
if app.config.get('SENTRY_ENABLED', False):
try:
from raven.contrib.flask import Sentry
app.sentry = Sentry(app)
except ImportError:
app.logger.info('sentry, raven is not installed')
|
tody411/InverseToon | inversetoon/core/project_normal_3d.py | Python | mit | 2,601 | 0.003076 |
# -*- coding: utf-8 -*-
## @package inversetoon.core.project_normal_3d
#
# inversetoon.core.project_normal_3d utility package.
# @author tody
# @date 2015/08/12
import numpy as np
from inversetoon.np.norm import normalizeVectors
from inversetoon.core.smoothing import smoothing
def projectTangent3D(tangents_2D, normals):
num_points = len(tangents_2D)
tangents_3D = np.zeros((num_points, 3))
tangents_3D[:, 0] = tangents_2D[:, 0]
tangents_3D[:, 1] = tangents_2D[:, 1]
for i in range(num_points):
tangent = tangents_3D[i]
normal = normals[i]
tangents_3D[i, 2] = - (normal[0] * tangent[0] + normal[1] * tangent[1]) / normal[2]
# tangents_3D = normalizeVectors(tangents_3D)
return tangents_3D
def smoothing3DVectors(vectors_3D, parameters):
vectors_3D = smoothing(vectors_3D, parameters, smooth=0.01)
vectors_3D = normalizeVectors(vectors_3D)
return vectors_3D
def smoothingTangentZ(tangents_3D, parameters):
tangents_3D[:, 2] = smoothing(tangents_3D[:, 2], parameters, smooth=1.0)
return tangents_3 | D
def projectNormals(L, I, tangents_3D, normals):
normals_smooth = np.array(normals)
num_points = len(normals)
| for i in range(num_points):
T = tangents_3D[i]
N = normals[i]
NdL = np.dot(N, L)
dN_L = (I - NdL) * L
NdT = np.dot(N, T)
dN_T = - NdT * T
normals_smooth[i] = N + dN_L + dN_T
normals_smooth = normalizeVectors(normals_smooth)
return normals_smooth
def preserveEndPoints(N_st, N_ed, normals_3D, sigma=0.05):
params = np.linspace(0.0, 1.0, len(normals_3D))
w_st = np.exp( - params * params / (sigma * sigma))
params_inv = 1.0 - params
w_ed = np.exp( - params_inv * params_inv / (sigma * sigma))
for i in range(3):
normals_3D[:, i] = (1.0 - w_st) * normals_3D[:, i] + w_st * N_st[i]
normals_3D[:, i] = (1.0 - w_ed) * normals_3D[:, i] + w_ed * N_ed[i]
def projectIteration(L, I, tangents_3D, normals_3D, parameters, num_iterations=3):
N_st = normals_3D[0]
N_ed = normals_3D[-1]
I = 0.5 * (np.dot(N_st, L) + np.dot(N_ed, L))
for iter in xrange(num_iterations):
tangents_3D = projectTangent3D(tangents_3D, normals_3D)
tangents_3D = smoothingTangentZ(tangents_3D, parameters)
tangents_3D = smoothing3DVectors(tangents_3D, parameters)
normals_3D = projectNormals(L, I, tangents_3D, normals_3D)
preserveEndPoints(N_st, N_ed, normals_3D)
normals_3D = smoothing3DVectors(normals_3D, parameters)
return normals_3D
|
mdaif/LDAP-Manager | ldap_manager/settings.py | Python | gpl-2.0 | 2,724 | 0.000367 | """
Django settings for ldap_manager project.
Generated by 'django-admin startproject' using Django 1.8.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'wc%+3s9cv8w(egt8is*(!m9xysp&ubs-*7#^!3ulm1ff*t$9j)'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ["*"]
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'gui'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'ldap_manager.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': | [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_pr | ocessors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ldap_manager.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = 'static'
SESSION_COOKIE_HTTPONLY = False |
chintal/tendril-fs-server | setup.py | Python | mit | 1,119 | 0.000894 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptool | s import setup
except ImportError:
from distutils.core import setup
with open('README.rst') as readme_file:
readme = readme_fi | le.read()
requirements = [
'twisted',
'fs',
]
test_requirements = [
# TODO: put package test requirements here
]
setup(
name='tendril-server-fs',
version='0.2.9',
description="XML-RPC Filesystem Server using Twisted and Pyfilesystems for Tendril",
long_description=readme,
author="Chintalagiri Shashank",
author_email='shashank@chintal.in',
url='https://github.com/chintal/tendril-fs-server',
packages=[
'fs_server',
],
package_dir={'fs_server': 'fs_server'},
include_package_data=True,
install_requires=requirements,
license="MIT",
zip_safe=False,
keywords='tendril',
classifiers=[
'Development Status :: 4 - Beta',
"License :: OSI Approved :: MIT License",
'Natural Language :: English',
'Programming Language :: Python',
],
test_suite='tests',
tests_require=test_requirements
)
|
lr292358/connectomics | run.py | Python | bsd-2-clause | 17,790 | 0.0181 | import cPickle
import gzip
import time
import os
import sys
import cPickle as pickle
import gc
import numpy as np
from time import sleep
import auc
import theano
import theano.tensor as T
from theano.tensor.signal import downsample
from theano.tensor.nnet import conv
from theano.ifelse import ifelse
import theano.printing
from collections import OrderedDict
from logisticRegression import LogisticRegression
from layers import DropoutHiddenLayer, HiddenLayer2d, HiddenLayer, ConvolutionalHiddenSoftmax, ConvolutionalLayer
import warnings
warnings.filterwarnings('ignore')
L = 330
n_epochs = 20
Q = 14
NUM_TRAIN = 1200000 #(use multiplicity of 50'000)
MINIREAD = 1
batch_size= 1000
############
############ uncomment these lines below to verify if the code runs correctly, execution around 15 times faster
############
# n_epochs = 4
# Q = 4
# NUM_TRAIN = 250000
# MINIREAD = 4
# batch_size= 1000
THREAD = 20
span = 1
NN = 1000
POOL = 10
Knormal = 1794 * 100 / MINIREAD
learning_rate0 = 0.2;
def ReLU(x):
y = T.maximum(0.0, x)
return (y)
def read(s, sn ,sp, Kile):
P=[]; lenn = []; nott = 0
_nps = []
_s = []
with open(path+sp) as ff:
for line in ff:
x,y = line.split(',')
P.append([float(x),float(y)])
print "opening"
with open(path+s) as f:
rlast = []; cnt = 0; arrayprev = []; Ti = []; ile = 0
for line in f:
if cnt % 17940 == 0:
print str(cnt/1794), "% ",
if cnt != 0:
pos = 0; r = []; rr2 = np.zeros(NN); rr = np.zeros(NN); rp = []
for x in line.split(','):
x_f = float(x)
rp.append(x_f)
for x in rp:
val2 = x - arrayprev[pos]
rr[pos] = val2 # to sum
pos+=1
nps = np.sum(rr)
_w = [_x for _x in rr if _x >= 0.2]
_wn = len(_w)
if nps < THREAD :
if nott > 0:
lenn.append(nott);
ile+=nott
nott = 0
else:
nott -=1
else:
if nott <= 0:
nott = 1
else:
nott += 1
pos+=1
if nott >= 1:
Ti.append(rr)
_nps.append(nps)
if nott==1:
_s.append(1)
else:
_s.append(1)
arrayprev = rp
else:
arrayprev = [float(x) for x in line.split(',')]
if cnt > Kile + 10:
break
cnt+=1
C = [[0]*len(rr)]*len(rr)
C = np.asarray(C)
print "\n\n selected frames number = ", ile, "\n\n"
if sn != None:
with open(path+sn) as ff:
for line in ff:
a,b,w = line.split(',')
a = int(a); b = int(b); w = int(w)
if w==1:
C[a-1][b-1] = 1;
print "trans..."
Tprim = np.empty((len(rr)+2, ile), np.float32) ##############
for j in range(len(rr)):
a = []
for i in range(ile):
Tprim[j][i] = Ti[i][j]
for i in range(ile):
Tprim[1000][i] = _nps[i]
for i in range(ile):
Tprim[1001][i] = _s[i]
gc.collect()
print "AVG SPLIT LEN: ", np.mean(lenn)
return Tprim, C, P
def learnAndPredict(Ti, C, TOList):
rng = np.random.RandomState(SEED)
learning_rate = learning_rate0
print np.mean(Ti[1000,:])
aminW = np.amin(Ti[:1000,:])
amaxW = np.amax(Ti[:1000,:])
Ti[:1000,:] = (Ti[:1000,:] - aminW) / (amaxW - aminW)
astdW = np.std(Ti[:1000,:])
ameanW = np.mean(Ti[:1000,:])
Ti[:1000,:] = (Ti[:1000,:] - ameanW) / astdW
| aminacW = np.amin(Ti[1000,:])
amaxacW = np.amax(Ti[1000,:])
print aminW, amaxW, ami | nacW, amaxacW
Ti[1000,:] = (Ti[1000,:] - aminacW) / (amaxacW - aminacW)
astdacW = np.std(Ti[1000,:])
ameanacW = np.mean(Ti[1000,:])
Ti[1000,:] = (Ti[1000,:] - ameanacW) / astdacW
ile__ = len(TOList)
ileList = np.zeros(ile__)
for titer in range(len(TOList)):
print np.mean(TOList[titer][1000,:])
TOList[titer][:1000,:] = (TOList[titer][:1000,:] - aminW)/(amaxW - aminW)
TOList[titer][:1000,:] = (TOList[titer][:1000,:] - ameanW)/astdW
TOList[titer][1000,:] = (TOList[titer][1000,:] - aminacW)/(amaxacW - aminacW)
TOList[titer][1000,:] = (TOList[titer][1000,:] - ameanacW)/astdacW
_, ileList[titer] = TOList[titer].shape
_, ile = Ti.shape
N = NN
data = []; yyy = []; need = 1; BYL = {}; j= 0; dwa = 0; ONES = []; ZEROS = []
for i in range(NN):
for j in range(NN):
if i!= j:
if C[i][j]==1:
ONES.append((i,j))
else:
ZEROS.append((i,j))
Nones = len(ONES)
rng.shuffle(ONES)
Nzeros = len(ZEROS)
print Nones
print Nzeros
Needed = NUM_TRAIN/2
onesPerPair = Needed / Nones + 1
onesIter = 0
jj = 0
while jj < NUM_TRAIN:
if jj%300000 == 0:
print jj/300000,
need = 1 - need
if need == 1:
pairNo = onesIter % Nones
ppp = onesIter / Nones
s,t = ONES[pairNo]
shift = rng.randint(0, ile - L)
onesIter += 1
if need == 0:
zer = rng.randint(Nzeros)
s,t = ZEROS[zer]
del ZEROS[zer]
Nzeros -= 1
shift = rng.randint(0, ile - L)
x = np.hstack(( Ti[s][shift:shift+L], Ti[t][shift:shift+L], Ti[1000][shift:shift+L]))
y = C[s][t]
data.append(x); yyy.append(y)
jj+=1
data = np.array(data, dtype=theano.config.floatX)
is_train = np.array( ([0]*96 + [1,1,2,2]) * (NUM_TRAIN / 100))
yyy = np.array(yyy)
train_set_x0, train_set_y0 = np.array(data[is_train==0]), yyy[is_train==0]
test_set_x, test_set_y = np.array(data[is_train==1]), yyy[is_train==1]
valid_set_x, valid_set_y = np.array(data[is_train==2]), yyy[is_train==2]
n_train_batches = len(train_set_y0) / batch_size
n_valid_batches = len(valid_set_y) / batch_size
n_test_batches = len(test_set_y) / batch_size
epoch = T.scalar()
index = T.lscalar()
x = T.matrix('x')
inone2 = T.matrix('inone2')
y = T.ivector('y')
print '... building the model'
#-------- my layers -------------------
#---------------------
layer0_input = x.reshape((batch_size, 1, 3, L))
Cx = 5
layer0 = ConvolutionalLayer(rng, input=layer0_input,
image_shape=(batch_size, 1, 3, L),
filter_shape=(nkerns[0], 1, 2, Cx), poolsize=(1, 1), fac = 0)
ONE = (3 - 2 + 1) / 1
L2 = (L - Cx + 1) / 1
#---------------------
Cx2 = 5
layer1 = ConvolutionalLayer(rng, input=layer0.output,
image_shape=(batch_size, nkerns[0], ONE, L2),
filter_shape=(nkerns[1], nkerns[0], 2, Cx2), poolsize=(1, 1), activation=ReLU, fac = 0)
ONE = (ONE - 2 + 1) /1
L3 = (L2 - Cx2 + 1) /1
#---------------------
Cx3 = 1
layer1b = ConvolutionalLayer(rng, input=layer1.output,
image_shape=(batch_size, nkerns[1], ONE, L3),
filter_shape=(nkerns[2], nkerns[1], 1, Cx3), poolsize=(1, POOL), activation=ReLU, fac = 0)
ONE = (ONE - 1 + 1) /1
L4 = (L3 - Cx3 + 1) /POOL
REGx = 100
#---------------------
layer2_input = layer1b.output.flatten(2)
print layer2_input.shape
use_b = False
layer2 = HiddenLayer(rng, input=layer2_input, n_in=nkerns[2]*L4 , n_out=REGx, activation=T.tanh,
use_bias = use_b)
layer3 = LogisticRegression(input=layer2.output, n_in=REGx, n_out=2)
cost = layer3.negative_log_likelihood(y)
out_x2 = theano.shared(np.asarray(np.zeros((N,L)), dtype=theano. |
FBSLikan/Cetico-TCC | thirdparty/illuminants/sourcecode/trainingSVM.py | Python | gpl-3.0 | 12,867 | 0.042745 | import numpy as np
import sys
import os
from sklearn import svm
from sklearn.externals import joblib
from sklearn import preprocessing
from sklearn.grid_search import GridSearchCV
import getSpaceChannelName as sc
def readTrainingTestFiles(outfile):
ofid = open(outfile,'rt')
ofid.seek(0)
lines = ofid.readlines()
ofid.close()
features = []
labels = []
for i in lines:
label = 0
tmp = i[:-2].split(" ")
row = []
for j in tmp:
if (label != 0):
tmp2 = j.split(":")
row.append(tmp2[1])
else:
label = j
labels.append(j)
features.append(row)
return(features,labels)
def svmTraining(dataset,descriptor,space,channel, illuminant="IIC", trainingFolds=[1,2,3,4,5]):
nameSpace,nameChannel = sc.getSpaceChannelName(space,channel)
tt = descriptor.upper()
# Loading Training Data
fd=""
for i in trainingFolds:
fd = fd+str(i)+"-"
fd = fd[:-1]
outfile = "../training-test-files/" + tt + "-" + illuminant + "-" + nameSpace + "-" + nameChannel + "/" + dataset + "-SVM-training-folds-" + fd
ft,lb = readTrainingTestFiles(outfile)
trainingMatrixF = np.array(ft)
trainingMatrixL = np.array(lb)
#Calculate the weight of each class
sp = lb.count('1')
sn = lb.count('-1')
weightFake = 2*(sp/(sp+sn))
weightNormal = 2-weightFake
#Scale Train Features
#trainingMatrixFScaled = preprocessing.s | cale(trainingMatrixF)
#Scale features between [-1,1]
max_abs_scaler = preprocessing.MaxAbsScaler()
trainingMatrixFScaled = max_abs_scaler.fit_transform(trainingMatrixF)
# Make grid search for best set of parameters
#Cs = np.logspace(-6, -1, 10)
#svc = svm.SVC(kernel='rbf',class_weight={'1':weightNormal,'- | 1':weightFake})
svc = svm.SVC()
#clf = GridSearchCV(svc,dict(C=Cs),n_jobs=-1,param_grid={'C': [1, 10, 100, 1000], 'gamma': [0.001, 0.0001], 'kernel': ['rbf']})
clf = GridSearchCV(svc,n_jobs=-1,param_grid={'C': list(range(1,1000,10)), 'gamma': np.arange(0.0001, 0.001,0.001), 'kernel': ['rbf'], 'class_weight':[{'1':weightNormal,'-1':weightFake}]})
clf.fit(trainingMatrixFScaled, trainingMatrixL)
npath = "../models/" + tt + "-" + illuminant + "-" + nameSpace + "-" + nameChannel + "/"
if not os.path.exists(npath):
os.makedirs(npath)
modelName = npath + "model-" + dataset + "-" + tt + "-" + illuminant + "-" + nameSpace + "-" + nameChannel + ".pkl"
joblib.dump(clf,modelName)
def main():
if len(sys.argv) < 3:
print("Provide correct number of arguments - svmTraining.py <dataset> <descriptorName> <training set - default \"1,2,3,4,5\">")
return
else:
try:
tt = sys.argv[2].upper()
dtst = sys.argv[1]
if (len(sys.argv) == 3):
if(tt == "ACC"):
print("\n\n>>>>>>>>>>>>>>>>>>>>>>>>> Training an SVM Classifier Using ACC Color Descriptor <<<<<<<<<<<<<<<<<<<<<<<<<<<<< \n\n")
svmTraining(dtst,"acc",4,3,"IIC")
svmTraining(dtst,"acc",1,3,"IIC")
svmTraining(dtst,"acc",2,3,"IIC")
svmTraining(dtst,"acc",4,3,"GGE")
svmTraining(dtst,"acc",1,3,"GGE")
svmTraining(dtst,"acc",2,3,"GGE")
elif(tt == "BIC"):
print("\n\n>>>>>>>>>>>>>>>>>>>>>>>>> Training an SVM Classifier Using BIC Color Descriptor <<<<<<<<<<<<<<<<<<<<<<<<<<<<< \n\n")
svmTraining(dtst,"bic",4,3,"IIC")
svmTraining(dtst,"bic",1,3,"IIC")
svmTraining(dtst,"bic",2,3,"IIC")
svmTraining(dtst,"bic",4,3,"GGE")
svmTraining(dtst,"bic",1,3,"GGE")
svmTraining(dtst,"bic",2,3,"GGE")
elif(tt == "CCV"):
print("\n\n>>>>>>>>>>>>>>>>>>>>>>>>> Training an SVM Classifier Using CCV Color Descriptor <<<<<<<<<<<<<<<<<<<<<<<<<<<<< \n\n")
svmTraining(dtst,"ccv",4,3,"IIC")
svmTraining(dtst,"ccv",1,3,"IIC")
svmTraining(dtst,"ccv",2,3,"IIC")
svmTraining(dtst,"ccv",4,3,"GGE")
svmTraining(dtst,"ccv",1,3,"GGE")
svmTraining(dtst,"ccv",2,3,"GGE")
elif(tt == "LCH"):
print("\n\n>>>>>>>>>>>>>>>>>>>>>>>>> Training an SVM Classifier Using LCH Color Descriptor <<<<<<<<<<<<<<<<<<<<<<<<<<<<< \n\n")
svmTraining(dtst,"lch",4,3,"IIC")
svmTraining(dtst,"lch",1,3,"IIC")
svmTraining(dtst,"lch",2,3,"IIC")
svmTraining(dtst,"lch",4,3,"GGE")
svmTraining(dtst,"lch",1,3,"GGE")
svmTraining(dtst,"lch",2,3,"GGE")
elif(tt == "SASI"):
print("\n\n>>>>>>>>>>>>>>>>>>>>>>>>> Training an SVM Classifier Using SASI Texture Descriptor <<<<<<<<<<<<<<<<<<<<<<<<<<<<< \n\n")
svmTraining(dtst,"sasi",4,2,"IIC")
svmTraining(dtst,"sasi",0,0,"IIC")
svmTraining(dtst,"sasi",2,2,"IIC")
svmTraining(dtst,"sasi",4,2,"GGE")
svmTraining(dtst,"sasi",0,0,"GGE")
svmTraining(dtst,"sasi",2,2,"GGE")
elif(tt == "LAS"):
print("\n\n>>>>>>>>>>>>>>>>>>>>>>>>> Training an SVM Classifier Using LAS Texture Descriptor <<<<<<<<<<<<<<<<<<<<<<<<<<<<< \n\n")
svmTraining(dtst,"las",4,2,"IIC")
svmTraining(dtst,"las",0,0,"IIC")
svmTraining(dtst,"las",2,2,"IIC")
svmTraining(dtst,"las",4,2,"GGE")
svmTraining(dtst,"las",0,0,"GGE")
svmTraining(dtst,"las",2,2,"GGE")
elif(tt == "UNSER"):
print("\n\n>>>>>>>>>>>>>>>>>>>>>>>>> Training an SVM Classifier Using UNSER Texture Descriptor <<<<<<<<<<<<<<<<<<<<<<<<<<<<< \n\n")
svmTraining(dtst,"unser",4,2,"IIC")
svmTraining(dtst,"unser",0,0,"IIC")
svmTraining(dtst,"unser",2,2,"IIC")
svmTraining(dtst,"unser",4,2,"GGE")
svmTraining(dtst,"unser",0,0,"GGE")
svmTraining(dtst,"unser",2,2,"GGE")
elif(tt == "EOAC"):
print("\n\n>>>>>>>>>>>>>>>>>>>>>>>>> Training an SVM Classifier Using EOAC Shape Descriptor <<<<<<<<<<<<<<<<<<<<<<<<<<<<< \n\n")
svmTraining(dtst,"eoac",4,2,"IIC")
svmTraining(dtst,"eoac",0,0,"IIC")
svmTraining(dtst,"eoac",2,2,"IIC")
svmTraining(dtst,"eoac",4,2,"GGE")
svmTraining(dtst,"eoac",0,0,"GGE")
svmTraining(dtst,"eoac",2,2,"GGE")
elif(tt == "SPYTEC"):
print("\n\n>>>>>>>>>>>>>>>>>>>>>>>>> Training an SVM Classifier Using SPYTEC Shape Descriptor <<<<<<<<<<<<<<<<<<<<<<<<<<<<< \n\n")
svmTraining(dtst,"spytec",4,2,"IIC")
svmTraining(dtst,"spytec",0,0,"IIC")
svmTraining(dtst,"spytec",2,2,"IIC")
svmTraining(dtst,"spytec",4,2,"GGE")
svmTraining(dtst,"spytec",0,0,"GGE")
svmTraining(dtst,"spytec",2,2,"GGE")
else:
print("\n\n>>>>>>>>>>>>>>>>>>>>>>>>>> Image Descriptor %s not available! <<<<<<<<<<<<<<<<<<<<<<<<<<<<< \n\n " %tt)
elif(len(sys.argv) == 4):
tmp = sys.argv[3].split(",")
if(tt == "ACC"):
print("\n\n>>>>>>>>>>>>>>>>>>>>>>>>> Training an SVM Classifier Using ACC Color Descriptor <<<<<<<<<<<<<<<<<<<<<<<<<<<<< \n\n")
svmTraining(dtst,"acc",4,3,"IIC",tmp)
svmTraining(dtst,"acc",1,3,"IIC",tmp)
svmTraining(dtst,"acc",2,3,"IIC",tmp)
svmTraining(dtst,"acc",4,3,"GGE",tmp)
svmTra |
halbbob/dff | modules/viewer/hexedit/hexView.py | Python | gpl-2.0 | 7,468 | 0.00549 | # DFF -- An Open Source Digital Forensics Framework
# Copyright (C) 2009 ArxSys
#
# This program is free software, distributed under the terms of
# the GNU General Public License Version 2. See the LICENSE file
# at the top of the source tree.
#
# See http://www.digital-forensic.org for more information about this
# project. Please do not directly contact any of the maintainers of
# DFF for assistance; the project provides a web site, mailing lists
# and IRC channels for your use.
#
# Author(s):
# Jeremy Mounier <jmo@digital-forensic.org>
#
from modules.viewer.hexedit.hexItem import *
from modules.viewer.hexedit.offsetItem import *
from modules.viewer.hexedit.asciiItem import *
from modules.viewer.hexedit.scrollbar import hexScrollBar
from PyQt4.QtCore import Qt, QLineF
from PyQt4.QtGui import QGraphicsView, QKeySequence, QHBoxLayout, QWidget, QFont, QGraphicsScene, QGraphicsLineItem, QGraphicsTextItem
class wHex(QWidget):
def __init__(self, parent):
QWidget.__init__(self)
self.init(parent)
self.initShape()
self.initMode()
def init(self, parent):
self.heditor = parent
def initShape(self):
self.hbox = QHBoxLayout()
self.hbox.setContentsMargins(0, 0, 0, 0)
self.view = hexView(self)
self.scroll = hexScrollBar(self)
#Init Items
self.hexitem = hexItem(self)
self.offsetitem = offsetItem(self)
self.asciitem = asciiItem(self)
self.hexcursor = hexCursor(self)
self.asciicursor = asciiCursor(self)
self.view.setItems()
self.view.setCursors()
self.hbox.addWidget(self.view)
self.hbox.addWidget(self.scroll)
self.setLayout(self.hbox)
#Set Long File Mode
def initMode(self):
self.lfmod = False
self.maxint = 2147483647
self.lines = self.heditor.filesize / self.heditor.bytesPerLine
if self.isInt(self.lines):
self.scroll.max = self.lines - 1
else:
self.lfmod = True
self.scroll.max = self.maxint - 1
self.scroll.setValues()
def offsetToValue(self, offset):
if self.isLFMOD():
# print (self.maxint * offset) / self.heditor.filesize
return ((self.maxint * offset) / self.heditor.filesize)
else:
return (offset / self.heditor.bytesPerLine)
def isLFMOD(self):
return self.lfmod
def isInt(self, val):
try:
res = int(val)
if res < 2147483647:
return True
else:
return False
except ValueError, TypeError:
return False
else:
return False
class hexView(QGraphicsView):
def __init__(self, parent):
QGraphicsView.__init__(self, None, parent)
self.init(parent)
self.initShape()
def init(self, parent):
self.whex = parent
self.heditor = self.whex.heditor
#Init scene
self.__scene = QGraphicsScene(self)
self.setScene(self.__scene)
#Get heditor stuff
self.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.setAlignment(Qt.AlignLeft)
def setItems(self):
self.__scene.addItem(self.whex.offsetitem)
self.__scene.addItem(self.whex.hexitem)
self.__scene.addItem(self.whex.asciitem)
def initShape(self):
self.initHeads()
#Line decoration
offsetLine = QGraphicsLineItem(QLineF(90, 0, 90, 700))
asciiLine = QGraphicsLineItem(QLineF(480, 0, 480, 700))
#Add to scene
self.__scene.addItem(offsetLine)
self.__scene.addItem(asciiLine)
def setCursors(self):
self.__scene.addItem(self.whex.hexcursor)
self.__scene.addItem(self.whex.asciicursor)
def initHeads(self):
self.offHead = QGraphicsTextItem()
self.hexHead = QGraphicsTextItem()
self.asciiHead = QGraphicsTextItem()
#Set Color
self.offHead.setDefaultTextColor(QColor(Qt.red))
self.hexHead.setDefaultTextColor(QColor(Qt.black))
self.asciiHead.setDefaultTextColor(QColor(Qt.darkCyan))
#Create Font
self.font = QFont("Gothic")
self.font.setFixedPitch(1)
self.font.setBold(False)
self.font.setPixelSize(14)
#Set Font
self.offHead.setFont(self.font)
self.hexHead.setFont(self.font)
self.asciiHead.setFont(self.font)
#Set Text
self.offHead.setPlainText("Offset")
self.hexHead.setPlainText("0 1 2 3 4 5 6 7 8 9 A B C D E F")
self.asciiHead.setPlainText("Ascii")
#Position
self.offHead.setPos(20, 0)
self.hexHead.setPos(95, 0)
self.asciiHead.setPos(520, 0)
#Add to scene
self.__scene.addItem(self.offHead)
self.__scene.addItem(self.hexHead)
self.__scene.addItem(self.asciiHead)
headLine = QGraphicsLineItem(QLineF(0, 20, 615, 20))
self.__scene.addItem(headLine)
def move(self, step, way):
#step: line = 1 * bytesPerLine, page = pagesize, wheel = 3 * bytesPerLine
offset = self.heditor.currentOffset
if way == 0:
#UP
if (offset - (step * self.heditor.bytesPerLine)) >= 0:
self.heditor.readOffset(offset - (step * self.heditor.bytesPerLine))
if self.whex.isLFMOD():
self.whex.scroll.setValue(self.whex.offsetToValue(offset - step * (self.heditor.bytesPerLine)))
else:
self.whex.scroll.setValue(self.whex.scroll.value() - step)
else:
self.heditor.readOffset(0)
self.whex.scroll.setValue(0)
elif way == 1:
#Down
if (offset + (step * self.heditor.bytesPerLine)) <= (self.heditor.filesize - (step * self.heditor.bytesPerLine)):
self.heditor.readOffset(offset + (step * self.heditor.bytesPerLine))
if self.whex.isLFMOD():
self.whex.scroll.setValue(self.whex.offsetToValue(offset + step * (self.heditor.bytesPerLine)))
else:
self.whex.scroll.setValue(self.whex.scroll.value() + step)
else:
self.heditor.readOffset(self.heditor.filesize - 5 * (self.heditor.bytesPerLine))
self.whex.scroll.setValue(self.whex.scroll.max)
####################################
# Navigation Operations #
####################################
def wheelEvent(self, event):
offset = self.heditor.currentOffset
if event.delta() > 0:
self.move(3, 0)
else:
self.move(3, 1)
def keyPressEvent(self, keyEvent):
# off = self.heditor.currentOffset
if keyEvent.matches(QKeySequence.MoveToNextPage):
self.move(self.heditor.pageSize / self.heditor.bytesPerLine, 1)
elif keyEvent.matches(QKeySequence.MoveToPreviousPage):
self.m | ove(self.heditor.pageSize / self.heditor.bytesPerLine, 0)
elif keyEvent.matches(QKeySequence.MoveToNextWord):
print "Next Word"
elif keyEvent.matches(QKeySequence.MoveToPreviousWord):
print "Pre | vious word"
elif keyEvent.matches(QKeySequence.MoveToNextLine):
print "Next Line"
elif keyEvent.matches(QKeySequence.MoveToPreviousLine):
print "Previous Line"
|
tensorflow/text | tensorflow_text/python/ops/mst_ops_test.py | Python | apache-2.0 | 5,182 | 0.006561 | # coding=utf-8
# Copyright 2022 TF.Text Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for maximum spanning tree ops."""
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
from tensorflow_text.python.ops import mst_ops
class MstOpsTest(test.TestCase):
"""Testing rig."""
@test_util.run_all_in_graph_and_eager_modes
def testMaximumSpanningTree(self):
"""Tests that the MST op can recover a simple tree."""
# The first batch element prefers 3 as root, then 3->0->1->2, for a total
# score of 4+2+1=7. The second batch element is smaller and has reversed
# scores, so 0 is root and 0->2->1.
num_nodes = constant_op.constant([4, 3], dtypes.int32)
scores = constant_op.constant([[[0, 0, 0, 0],
[1, 0, 0, 0],
[1, 2, 0, 0],
[1, 2, 3, 4]],
[[4, 3, 2, 9],
[0, 0, 2, 9],
[0, 0, 0, 9],
[9, 9, 9, 9]]],
dtypes.int32) # pyformat: disable
(max_scores, argmax_sources) = mst_ops.max_spanning_tree(
num_nodes, scores, forest=False)
self.assertAllEqual(max_scores, [7, 6])
self.assertAllEqual(argmax_sources, [[3, 0, 1, 3],
[0, 2, 0, -1]]) # pyformat: disable
@test_util.run_deprecated_v1
def testMaximumSpanningTreeGradient(self):
"""Tests the MST max score gradient."""
with self.test_session() as session:
num_nodes = constant_op.constant([4, 3], dtypes.int32)
scores = constant_op.constant([[[0, 0, 0, 0],
[1, 0, 0, 0],
[1, 2, 0, 0],
[1, 2, 3, 4]],
[[4, 3, 2, 9],
[0, 0, 2, 9],
[0, 0, 0, 9],
[9, 9, 9, 9]]],
dtypes.int32) # pyformat: disable
mst_ops.max_spanning_tree(num_nodes, scores, forest=False, name='MST')
mst_op = session.graph.get_operation_by_name('MST')
d_loss_d_max_scores = constant_op.constant([3, 7], dtypes.float32)
d_loss_d_num_nodes, d_loss_d_scores = (
mst_ops.max_spanning_tree_gradient(mst_op, d_loss_d_max_scores))
# The num_nodes input is non-differentiable.
self.assertIs(d_loss_d_num_nodes, None)
self.assertAllEqual(d_loss_d_scores.eval(),
[[[0, 0, 0, 3],
[3, 0, 0, 0],
[0, 3, 0, | 0],
[0, 0, 0, 3]],
[[7, 0, 0, 0],
[0, 0, 7, 0],
| [7, 0, 0, 0],
[0, 0, 0, 0]]]) # pyformat: disable
@test_util.run_deprecated_v1
def testMaximumSpanningTreeGradientError(self):
"""Numerically validates the max score gradient."""
with self.test_session():
# The maximum-spanning-tree-score function, as a max of linear functions,
# is piecewise-linear (i.e., faceted). The numerical gradient estimate
# may be inaccurate if the epsilon ball used for the estimate crosses an
# edge from one facet to another. To avoid spurious errors, we manually
# set the sample point so the epsilon ball fits in a facet. Or in other
# words, we set the scores so there is a non-trivial margin between the
# best and second-best trees.
scores_raw = [[[0, 0, 0, 0],
[1, 0, 0, 0],
[1, 2, 0, 0],
[1, 2, 3, 4]],
[[4, 3, 2, 9],
[0, 0, 2, 9],
[0, 0, 0, 9],
[9, 9, 9, 9]]] # pyformat: disable
# Use 64-bit floats to reduce numerical error.
scores = constant_op.constant(scores_raw, dtypes.float64)
init_scores = np.array(scores_raw)
num_nodes = constant_op.constant([4, 3], dtypes.int32)
max_scores = mst_ops.max_spanning_tree(num_nodes, scores, forest=False)[0]
gradient_error = test.compute_gradient_error(scores, [2, 4, 4],
max_scores, [2], init_scores)
self.assertIsNot(gradient_error, None)
if __name__ == '__main__':
test.main()
|
fmca/ctxpy | ctx/toolkit.py | Python | mit | 2,280 | 0.001754 | from abc import abstractmethod
from threading import Timer
from ctx.uncertainty.measurers import clear_dobson_paddy
class Event:
def __init__(self, typ | e, **kwargs):
self.type = type
self.properties = kwargs
class Observer:
def update(self):
raise NotImplementedError("Not implemented")
class Observable:
def __init__(self):
self._observers = | []
def register(self, observer):
self._observers.append(observer)
def notify(self, event):
event.source = self
for observer in self._observers:
observer.update(event)
class Widget(Observable, Observer):
@abstractmethod
def update(self, event):
pass
def __init__(self, type, status_name, *generators):
super(Widget, self).__init__()
self.type = type
self.generators = generators
self.status = None
self.status_name = status_name
for generator in generators:
generator.register(self)
def get_property(self, type):
for generator in self.generators:
if generator.type == type:
return generator.property
class Generator(Observable):
def __init__(self, type, relevance, threshold, certainty_measurer=clear_dobson_paddy):
super().__init__()
self.certainty_measurer = certainty_measurer
self.property = None
self.type = type
self.relevance = relevance
self.threshold = threshold
def generate(self):
# generate a dict, e.g.: {"value": 12, "certainty" : 0.9}
raise NotImplementedError("Not implemented")
def has_acceptable_certainty(self, new_property):
certainty_level = self.certainty_measurer(self.relevance, new_property['accuracy'])
is_acceptable = certainty_level > self.threshold
return is_acceptable
def start(self, delay=5):
new_property = self.generate()
if new_property['value'] != self.property and self.has_acceptable_certainty(new_property):
self.property = new_property['value']
event = Event(self.type, property=new_property['value'])
super().notify(event)
timer_task = Timer(delay, lambda: self.start(delay), ())
timer_task.start()
|
brycepg/cave-dweller | tests/exploratory/sdl_render.py | Python | gpl-3.0 | 680 | 0.010294 | """I got sys_register_SDL_renderer to work!"""
import time
import sy | s
import os
sys.path.append("../../cave_dweller")
import libtcodpy as libtcod
from ctypes import *
import draw_text
def render(surface):
draw_text.draw_text("it works!", surface, 25, 25)
#sdl.fill_circle(surface, 1, 1, 5, 5)
|
libtcod.console_set_custom_font(os.path.join('../../fonts', 'dejavu12x12_gs_tc.png'), libtcod.FONT_TYPE_GREYSCALE | libtcod.FONT_LAYOUT_TCOD)
libtcod.console_init_root(90,40, 'test', False, libtcod.RENDERER_SDL)
# Set font size, init ttf
draw_text.set_font(pt_size=12)
libtcod.sys_register_SDL_renderer(render)
libtcod.sys_set_fps(40)
libtcod.console_flush()
time.sleep(10)
|
shanzhenren/PLE | Classifier/PLSVM.py | Python | gpl-3.0 | 4,745 | 0.002529 | from __future__ import division
__author__ = 'wenqihe'
import sys
import random
import math
class PLSVM:
def __init__(self, feature_size, label_size, type_hierarchy, lambda_reg=0.1, max_iter=5000, threshold=0.5, batch_size=100):
self._feature_size = feature_size
self._label_size = label_size
self._type_hierarchy = type_hierarchy
self._weight = [[0 for col in range(feature_size)] for row in range(label_size)]
for i in xrange(label_size):
for j in xrange(feature_size):
self._weight[i][j] = random.uniform(0, 1)
self._lambda_reg = lambda_reg
self._max_iter = max_iter
self._threshold = threshold
self._batch_size = batch_size
def fit(self, train_x, train_y):
"""
:param train_x: list of list
:param train_y: list of list
:return:
"""
m = len(train_y)
batch = int(math.ceil(m/self._batch_size))
for t in xrange(1, self._max_iter):
eta_t = 1.0/(self._lambda_reg*t)
dW = [[0 for col in range(self._feature_size)] for row in range(self._label_size)]
for j in xrange(self._batch_size):
i = random.randint(0, m-1)
x = train_x[i]
y = train_y[i]
ny = [k for k in range(self._label_size) if k not in y]
yi = self.find_max(y, x)
nyi = self.find_max(ny, x)
for feature in x:
self._weight[yi][feature] = self._weight[yi][feature]*(1-eta_t*self._lambda_reg) + eta_t
self._weight[nyi][feature] = self._weight[nyi][feature]*(1-eta_t*self._lambda_reg) - eta_t
# self.update_weight(dW, eta_t, 1)
sys.stdout.write('{0} iteration done.\r'.format(t))
sys.stdout.flush()
def predict(self, x):
labels = set()
parent_mapping = self._type_hierarchy._type_hierarchy
scores = []
max_index = 0
max_value = self.inner_prod(self._weight[0], x)
scores.append(max_value)
for i in xrange(1, self._label_size):
temp = self.inner_prod(self._weight[i], x)
scores.append(temp)
if temp>max_value:
max_index = i
max_value = temp
# print scores
labels.add(max_index)
# Add parent of max_index if any
temp = max_index
while temp in parent_mapping:
labels.add(parent_mapping[temp])
temp = parent_mapping[temp]
# add child of max_index if meeting threshold
temp = max_index
while temp != -1:
max_sub_index = -1
max_sub_score = -sys.maxint
for child in parent_mapping:
# check the maximum subtype
if parent_mapping[child] == temp:
if child < self._label_size:
# print child
if max_sub_score < scores[child]:
max_sub_index = child
max_sub_score = scores[child]
if max_sub_index != -1 and max_sub_score > self._threshold:
labels.add(max_sub_index)
temp = max_sub_index
return labels
def find_max(self, Y, x):
random.shuffle(Y)
y = Y[0]
max_value = self.inner_prod(self._weight[y], x)
for i in xrange(1, len(Y)):
temp = self.inner_prod(self._weight[Y[i]], x)
if temp > max_value:
y = Y[i]
max_value = temp
return y
def update_weight(self, dW, eta_t, m):
for i in xrange | (self._label_size):
# L2 = 0
for j in xrange(self._feature_size):
self._weight[i][j] = self._weight[i][j]*(1-eta_t*self._lambda_reg) + eta_t*dW[i][j]/m
# L2 += self._weight[i][j] * self._weight[i][j]
# if L2>0:
# factor = min(1, 1/(math.sqrt(self._lambda_reg)*math.sqrt(L2)))
# | if factor < 1:
# for j in xrange(self._feature_size):
# self._weight[i][j] *= factor
@staticmethod
def inner_prod(weight, x):
result = 0
for feature in x:
result += weight[feature]
return result
@staticmethod
def kernel(x1, x2):
i1 = 0
i2 = 0
result = 0
while i1<len(x1) and i2<len(x2):
if x1[i1] == x2[i2]:
result += 1
i1 += 1
i2 += 1
elif x1[i1] < x2[i2]:
i1 += 1
else:
i2 += 1
return result
|
LE-GO-LE-STOP/Robocup-Junior-Rescue-2016 | src/python/ev3dev/ev3.py | Python | gpl-3.0 | 6,266 | 0.002713 | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
# Copyright (c) 2015 Eric Pascual
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without r | estriction, includi | ng without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON INFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# -----------------------------------------------------------------------------
"""
An assortment of classes modeling specific features of the EV3 brick.
"""
from .core import *
OUTPUT_A = 'outA'
OUTPUT_B = 'outB'
OUTPUT_C = 'outC'
OUTPUT_D = 'outD'
INPUT_1 = 'in1'
INPUT_2 = 'in2'
INPUT_3 = 'in3'
INPUT_4 = 'in4'
class Leds(object):
"""
The EV3 LEDs.
"""
# ~autogen led-colors platforms.ev3.led>currentClass
red_left = Led(name_pattern='ev3:left:red:ev3dev')
red_right = Led(name_pattern='ev3:right:red:ev3dev')
green_left = Led(name_pattern='ev3:left:green:ev3dev')
green_right = Led(name_pattern='ev3:right:green:ev3dev')
LEFT = ( red_left, green_left, )
RIGHT = ( red_right, green_right, )
BLACK = ( 0, 0, )
RED = ( 1, 0, )
GREEN = ( 0, 1, )
AMBER = ( 1, 1, )
ORANGE = ( 1, 0.5, )
YELLOW = ( 0.1, 1, )
@staticmethod
def set_color(group, color, pct=1):
"""
Sets brigthness of leds in the given group to the values specified in
color tuple. When percentage is specified, brightness of each led is
reduced proportionally.
Example::
Leds.set_color(LEFT, AMBER)
"""
for l, v in zip(group, color):
l.brightness_pct = v * pct
@staticmethod
def set(group, **kwargs):
"""
Set attributes for each led in group.
Example::
Leds.set(LEFT, brightness_pct=0.5, trigger='timer')
"""
for led in group:
for k in kwargs:
setattr(led, k, kwargs[k])
@staticmethod
def all_off():
"""
Turn all leds off
"""
Leds.red_left.brightness = 0
Leds.red_right.brightness = 0
Leds.green_left.brightness = 0
Leds.green_right.brightness = 0
# ~autogen
class Button(ButtonEVIO):
"""
EV3 Buttons
"""
# ~autogen button-property platforms.ev3.button>currentClass
@staticmethod
def on_up(state):
"""
This handler is called by `process()` whenever state of 'up' button
has changed since last `process()` call. `state` parameter is the new
state of the button.
"""
pass
@staticmethod
def on_down(state):
"""
This handler is called by `process()` whenever state of 'down' button
has changed since last `process()` call. `state` parameter is the new
state of the button.
"""
pass
@staticmethod
def on_left(state):
"""
This handler is called by `process()` whenever state of 'left' button
has changed since last `process()` call. `state` parameter is the new
state of the button.
"""
pass
@staticmethod
def on_right(state):
"""
This handler is called by `process()` whenever state of 'right' button
has changed since last `process()` call. `state` parameter is the new
state of the button.
"""
pass
@staticmethod
def on_enter(state):
"""
This handler is called by `process()` whenever state of 'enter' button
has changed since last `process()` call. `state` parameter is the new
state of the button.
"""
pass
@staticmethod
def on_backspace(state):
"""
This handler is called by `process()` whenever state of 'backspace' button
has changed since last `process()` call. `state` parameter is the new
state of the button.
"""
pass
_buttons = {
'up': {'name': '/dev/input/by-path/platform-gpio-keys.0-event', 'value': 103},
'down': {'name': '/dev/input/by-path/platform-gpio-keys.0-event', 'value': 108},
'left': {'name': '/dev/input/by-path/platform-gpio-keys.0-event', 'value': 105},
'right': {'name': '/dev/input/by-path/platform-gpio-keys.0-event', 'value': 106},
'enter': {'name': '/dev/input/by-path/platform-gpio-keys.0-event', 'value': 28},
'backspace': {'name': '/dev/input/by-path/platform-gpio-keys.0-event', 'value': 14},
}
@property
def up(self):
"""
Check if 'up' button is pressed.
"""
return 'up' in self.buttons_pressed
@property
def down(self):
"""
Check if 'down' button is pressed.
"""
return 'down' in self.buttons_pressed
@property
def left(self):
"""
Check if 'left' button is pressed.
"""
return 'left' in self.buttons_pressed
@property
def right(self):
"""
Check if 'right' button is pressed.
"""
return 'right' in self.buttons_pressed
@property
def enter(self):
"""
Check if 'enter' button is pressed.
"""
return 'enter' in self.buttons_pressed
@property
def backspace(self):
"""
Check if 'backspace' button is pressed.
"""
return 'backspace' in self.buttons_pressed
# ~autogen
|
D8TM/railtracker | scripts/retrieve_dc_incidents.py | Python | apache-2.0 | 2,130 | 0.005164 | from django.conf import settings
from railtracker.mapfeed.models import Incident, MapCity, MapLine
from pprint import pprint
import twitter, httplib, urllib, base64, json
#Functions
def loa | dIncidents():
city = MapCity.objects.get(city_name="Washington D.C.").id
lines = MapLine.objects.filter(map=city)
try:
conn.request("GET", "/Incidents.svc/json/Incidents?%s" % params, "", headers)
response = conn.getresponse()
data = response.read()
conn.close()
except Exception as e:
pprint("[Errno {0}] {1}".format(e.errno, e.strerror))
decoded_inc = json.loads(data)
pprint(decoded_inc)
#Go through incident list and load into datab | ase
for incident in decoded_inc['Incidents']:
obj_date = incident['DateUpdated'].replace('T', ' ')
obj_desc = incident['Description']
num_results = Incident.objects.filter(incident_date=obj_date).filter(description=obj_desc).count()
if num_results < 1:
inc_model = Incident(
description=obj_desc,
incident_date=obj_date,
warning=incident['IncidentType'][:3]
)
pprint(inc_model)
inc_model.save()
pprint(inc_model)
for line in lines:
if line.line_code in incident['LinesAffected']:
inc_model.lines.add(line)
def loadTweets():
api = twitter.Api(
consumer_key=settings.TWITTER_CONSUMER_KEY,
consumer_secret=settings.TWITTER_CONSUMER_SECRET,
access_token_key=settings.TWITTER_TOKEN,
access_token_secret=settings.TWITTER_TOKEN_SECRET
)
statuses = api.GetUserTimeline(screen_name='MetroRailInfo')
for s in statuses:
if '@' not in s.text:
pprint(s.text)
#Vars
headers = {
}
params = urllib.urlencode({
'api_key': settings.DC_PRIMARY_KEY
})
conn = httplib.HTTPSConnection('api.wmata.com')
test_inc = {'Incidents': [{
'Description': 'This is a test incident.',
'DateUpdated': '2015-02-04 12:59:59',
}]}
#Calls
loadIncidents()
#loadTweets()
|
xchen101/analysis-preservation.cern.ch | cap/modules/experiments/permissions/lhcb.py | Python | gpl-2.0 | 1,489 | 0.000672 | # -*- coding: utf-8 -*-
#
# This file is part of CERN Analysis Preservation Framework.
# Copyright (C) 2016 CERN.
#
# CERN Analysis Preservation Framework is free software; you can redistribute
# it and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# CERN Analysis Preservation Framework is distributed in the hope that it will
# be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CERN Analysis Preservation Framework; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities g | ranted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""CAP LHCb permissions"""
from invenio_access import DynamicPermission
from cap.modules.experiments.permissions.common import get_collaboration_group_needs, get_superuser_needs
lhcb_group_need = set(
[g for g in get_collaboration_group_needs('LHCb')])
lhcb_group_need |= set([g for g in
get_superuser_needs()])
lhcb_permission = DynamicPe | rmission(*lhcb_group_need)
|
zhimin711/nova | nova/tests/unit/test_api_validation.py | Python | apache-2.0 | 50,677 | 0.000335 | # Copyright 2013 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import re
import fixtures
from jsonschema import exceptions as jsonschema_exc
import six
from nova.api.openstack import api_version_request as api_version
from nova.api import validation
from nova.api.validation import parameter_types
from nova.api.validation import validators
from nova import exception
from nova import test
class FakeRequest(object):
api_version_request = api_version.APIVersionRequest("2.1")
environ = {}
legacy_v2 = False
def is_legacy_v2(self):
return self.legacy_v2
class ValidationRegex(test.NoDBTestCase):
def test_cell_names(self):
cellre = re.compile(parameter_types.valid_cell_name_regex.regex)
self.assertTrue(cellre.search('foo'))
self.assertFalse(cellre.search('foo.bar'))
self.assertFalse(cellre.search('foo@bar'))
self.assertFalse(cellre.search('foo!bar'))
self.assertFalse(cellre.search(' foo!bar'))
self.assertFalse(cellre.search('\nfoo!bar'))
def test_build_regex_range(self):
# this is much easier to think about if we only use the ascii
# subset because it's a printable range we can think
# about. The algorithm works for all ranges.
def _get_all_chars():
for i in range(0x7F):
yield six.unichr(i)
self.useFixture(fixtures.MonkeyPatch(
'nova.api.validation.parameter_types._get_all_chars',
_get_all_chars))
r = parameter_types._build_regex_range(ws=False)
self.assertEqual(r, re.escape('!') + '-' + re.escape('~'))
# if we allow whitespace the range starts earlier
r = parameter_types._build_regex_range(ws=True)
self.assertEqual(r, re.escape(' ') + '-' + re.escape('~'))
# excluding a character will give us 2 ranges
r = parameter_types._build_regex_range(ws=True, exclude=['A'])
self.assertEqual(r,
re.escape(' ') + '-' + re.escape('@') +
'B' + '-' + re.escape('~'))
# inverting which gives us all the initial unprintable characters.
r = parameter_types._build_regex_range(ws=False, invert=True)
self.assertEqual(r,
re.escape('\x00') + '-' + re.escape(' '))
# excluding characters that create a singleton. Naively this would be:
# ' -@B-BD-~' which seems to work, but ' -@BD-~' is more natural.
r = parameter_types._build_regex_range(ws=True, exclude=['A', 'C'])
self.assertEqual(r,
re.escape(' ') + '-' + re.escape('@') +
'B' + 'D' + '-' + re.escape('~'))
# ws=True means the positive regex has printable whitespaces,
# so the inverse will not. The inverse will include things we
# exclude.
r = parameter_types._build_regex_range(
ws=True, exclude=['A', 'B', 'C', 'Z'], invert=True)
self.assertEqual(r,
re.escape('\x00') + '-' + re.escape('\x1f') + 'A-CZ')
class APIValidationTestCase(test.NoDBTestCase):
def check_validation_error(self, method, body, expected_detail, req=None):
if not req:
req = FakeRequest()
try:
method(body=body, req=req,)
except exception.ValidationError as ex:
self.assertEqual(400, ex.kwargs['code'])
if not re.match(expected_detail, ex.kwargs['detail']):
self.assertEqual(expected_detail, ex.kwargs['detail'],
'Exception details did not match expected')
except Exception as ex:
self.fail('An unexpected exception happens: %s' % ex)
else:
self.fail('Any exception does not happen.')
class FormatCheckerTestCase(test.NoDBTestCase):
def test_format_checker_failed(self):
format_checker = validators.FormatChecker()
exc = self.assertRaises(jsonschema_exc.FormatError,
format_checker.check, " ", "name")
self.assertIsInstance(exc.cause, exception.InvalidName)
self.assertEqual("An invalid 'name' value was provided. The name must "
"be: printable characters. "
"Can not start or end with whitespace.",
exc.cause.format_message())
def test_format_checker_failed_with_non_string(self):
checks = ["name", "name_with_leading_trailing_spaces",
"cell_name", "cell_name_with_leading_trailing_spaces"]
format_checker = validators.FormatChecker()
for check in checks:
exc = self.assertRaises(jsonschema_exc.FormatError,
format_checker.check, None, "name")
self.assertIsInstance(exc.cause, exception.InvalidName)
self.assertEqual("An invalid 'name' value was provided. The name "
"must be: printable characters. "
"Can not start or end with whitespace.",
exc.cause.format_message())
class MicroversionsSchemaTestCase(APIValidationTestCase):
def setUp(self):
super(MicroversionsSchemaTestCase, self).setUp()
schema_v21_int = {
'type': 'object',
'properties': {
'foo': {
'type': 'integer',
}
}
}
schema_v20_str = copy. | deepcopy(schema_v21_int)
schema_v20_str['properties']['foo'] = {'type': 'string'}
@validation.schema(schema_v20_str, '2.0', '2.0')
@validation.schema(schema_v21_int, '2.1')
def post(req, body):
return 'Validation succeeded.'
self.post = post
def test_validate_v2compatible_request(self):
req = FakeRequest()
req.legacy_v2 = True
self.assertEqual(self.post(body | ={'foo': 'bar'}, req=req),
'Validation succeeded.')
detail = ("Invalid input for field/attribute foo. Value: 1. "
"1 is not of type 'string'")
self.check_validation_error(self.post, body={'foo': 1},
expected_detail=detail, req=req)
def test_validate_v21_request(self):
req = FakeRequest()
self.assertEqual(self.post(body={'foo': 1}, req=req),
'Validation succeeded.')
detail = ("Invalid input for field/attribute foo. Value: bar. "
"'bar' is not of type 'integer'")
self.check_validation_error(self.post, body={'foo': 'bar'},
expected_detail=detail, req=req)
def test_validate_v2compatible_request_with_none_min_version(self):
schema_none = {
'type': 'object',
'properties': {
'foo': {
'type': 'integer'
}
}
}
@validation.schema(schema_none)
def post(req, body):
return 'Validation succeeded.'
req = FakeRequest()
req.legacy_v2 = True
self.assertEqual('Validation succeeded.',
post(body={'foo': 1}, req=req))
detail = ("Invalid input for field/attribute foo. Value: bar. "
"'bar' is not of type 'integer'")
self.check_validation_error(post, body={'foo': 'bar'},
expected_detail=detail, req=req)
class RequiredDisableTestCase(APIValidationTestCase):
def setUp(self):
super(R |
engineer0x47/SCONS | engine/SCons/Platform/irix.py | Python | mit | 1,605 | 0.000623 | """SCons.Platform.irix
Platform-specific initialization for SGI IRIX systems.
There normally shouldn't be any need to import this module directly. It
will usually be imported through the generic SCons.Platform.Platform()
selection method.
"""
#
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without li | mitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit | persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Platform/irix.py 2014/08/24 12:12:31 garyo"
import posix
def generate(env):
posix.generate(env)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
vsc-squared/FileShareHeroku | FileShare/urls.py | Python | mit | 2,578 | 0.001552 | from django.conf.urls import include, url
from django.contrib.auth.views import log | in
from registration.views import *
from groupmanagement.views import *
from reports.views import *
from message.views import *
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
urlpatterns | = [
url(r'^$', login),
url(r'^logout/$', logout_page),
url(r'^accounts/login/$', login),
url(r'^register/$', register),
url(r'^register/success/$', register_success),
url(r'^home/$', home),
url(r'^createGroup/$', createGroup),
url(r'^admin/', admin.site.urls),
url(r'^allusers/(.*)', displayUsers),
url(r'^YourGroups/', viewGroups),
url(r'^group/(.*)', groupActionsView),
url(r'^addmember/', addMember),
url(r'^addReports/$', addReports),
url(r'^removeMember', removeMember),
url(r'^createReport/$', createReport),
url(r'^editReport/$', editReport),
url(r'^viewYourReport/$', viewYourReports),
url(r'^viewReportDescription/$', viewReports),
url(r'^viewReport/$', viewReport),
url(r'^searchReport/$', searchReports),
url(r'^deleteReport/$', deleteReport),
url(r'^createFolder/$', createFolder),
url(r'^renameFolder/$', renameFolder),
url(r'^deleteFolder/$', deleteFolder),
url(r'^addToFolder/$', addToFolder),
url(r'^viewFolderContent/$', viewFolderContent),
url(r'^removeReportFromFolder', removeReports),
url(r'^viewFolder/$', viewFolder),
url(r'^viewFolderDescription/$', viewFolderDescription),
url(r'^messageHome/$', messageHome),
url(r'^allMessages/$', displayMessage),
url(r'^checkMessage/$', checkMessage),
url(r'^createMessage/$', createMessage),
url(r'^changeUserRoles/', changeUserRole),
url(r'^updatePrivilege/', updatePrivilege),
url(r'^allMessages/(?P<message_id>[0-9]+)/$', detail, name="detail"),
url(r'^checkMessage/(?P<message_id>[0-9]+)/$', detail, name="detail"),
url(r'^deleteMessage/(?P<message_id>[0-9]+)/$', deleteMessage, name="delete"),
url(r'^download/(?P<file_name>.*.+)$', download),
url(r'^reportHome/$', reportHome, name="report"),
url(r'^folderHome/$', folderHome, name="folder"),
url(r'^groupHome/$', groupHome, name="group"),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root = settings.STATIC_ROOT)
|
JonSeijo/filelines-measurer | filelines.py | Python | gpl-3.0 | 3,504 | 0.006564 | import subprocess
import re
import matplotlib.pyplot as plt
import argparse
import os
outfile_git_name = "tmp_git.txt"
outfile_format_name = "tmp_formatted.txt"
git_log_constant = "1 file changed, " # Used for grep, do not modify
diff_list = []
total_list = []
# Parse arguments passed by command line
parser = argparse.ArgumentParser()
parser.add_argument("filepath", help="Relative or absolute path to the file you want to measure")
parser.add_argument("-p", help="Use points instead of line in graphs", action='store_true')
parser.add_argument("--y_line", help="Draw horizontal line on y position (integer)", type=int)
parser.add_argument("--gitdir",
help="Specify repository directory if the file to measure is in a ouside repository. "
+ "(Relative or absolute)")
args = parser.parse_args()
filepath = args.filepath
custom_y = args.y_line
use_point = args.p
if not os.path.isfile(filepath):
raise ValueError("NOT A FILE: " + filepath)
#git "--git-dir=/home/repo/"
command_git_log = "git"
if args.gitdir != None:
command_git_log += " -C " + args.gitdir
command_git_log += " log --stat --pretty=format: " + filepath
def is_insertion(str):
return str == "insertion" or str == "insertions"
def get_stats(line):
words = re.findall("\w+", line)
current_diff = 0
# Magic number 4 to avoid "1 file changed" string
for i in range(4, len(words), 2):
if is_insertion(words[i]):
current_diff += int(words[i-1])
else:
current_diff -= int(words[i-1])
diff_list.append(current_diff)
with open(outfile_git_name, "w") as outfile:
subprocess.call(command_git_log.split(), stdout = outfile)
with open(outfile_format_name, "w") as outfile:
subprocess.call(["grep", git_log_constant, outfile_git_name], stdout = outfile)
# Delete tmp file
subprocess.call(["rm", outfile_git_name])
with open(outfile_format_name) as f:
for line in f:
get_stats(line)
# Delete tmp file
subprocess.call(["rm", outfile_format_name])
# I dont want to throw an Error..
if (len(diff_list) == 0):
msg_no_lines = "\nThe file " + filepath + " never had any lines ever! \n"
msg_no_lines += "Probably it is located in an external repository and you didn't specyfy --gitdir"
print(msg_no_lines)
else:
# Reverse diffs to be in incremental time
diff_list.reverse()
total_list.append(diff_list[0]) # Need at least one element
total_max = total_list[0]
for i in range(1, len(diff_list)):
total_list.append(total_list[i-1] + diff_list[i])
if (total_list[-1] > total_max):
total_max = total_list[-1]
total_cur | rent = total_list[-1]
# pl | ot
if (use_point):
plt.plot([i for i in range(1, len(total_list) + 1)], total_list, 'bo')
else:
plt.plot(total_list)
plt.grid()
plt.title(filepath)
plt.ylabel('File lines')
plt.xlabel('Commits')
# Plot max line
plt.axhline(y=total_max, color='r', linestyle='-')
plt.text(len(total_list)/2, total_max + 1, r'max=' + str(total_max))
# Plot custom line
if (custom_y != None):
plt.axhline(y=custom_y, color='g', linestyle='-')
plt.text(len(total_list)/2, custom_y + 1, r'custom=' + str(custom_y))
# Plot current line
if (total_current != total_max):
plt.axhline(y=total_current, color='b', linestyle='-')
plt.text(0, total_current + 1, r'curr=' + str(total_current))
plt.show()
|
tensorflow/probability | spinoffs/fun_mc/fun_mc/using_jax.py | Python | apache-2.0 | 1,013 | 0 | # Copyright 2021 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, | software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""FunMC API using the JAX backend."""
# Need to register the rewrite hooks.
from fun_mc.backen | ds import rewrite
from fun_mc.dynamic.backend_jax import api # pytype: disable=import-error
# pylint: disable=wildcard-import
from fun_mc.dynamic.backend_jax.api import * # pytype: disable=import-error
del rewrite
__all__ = api.__all__
|
mokieyue/mopidy | mopidy/listener.py | Python | apache-2.0 | 1,658 | 0 | from __future__ import absolute_import, unicode_literals
import logging
import pykka
logger = logging.getLogger(__name__)
def send(cls, event, **kwargs):
listeners = pykka.ActorRegistry.get_by_class(cls)
logger.debug('Sending %s to %s: %s', event, cls.__name__, kwargs)
for listener in listeners:
# Save time by calling methods on Pykka actor without creating a
# throwaway actor proxy.
#
# Because we use `.tell()` there is no return channel for any errors,
# so Pykka logs them immediately. The alternative would be to use
# `.ask()` and `.get()` the returned futures to block for the listeners
# to react and return their exceptions to us. Since emitting events in
# practise is making calls upwards in the stack, blocking here would
# quickly deadlock.
listener.tell({
'command': 'pykka_call',
'attr_path': ('on_event',),
'args': (event,),
'kwargs': kwargs,
})
|
class Listener(object):
def on_event(self, event, **kwargs):
"""
Called on all events.
*MAY* be implemented by actor. By default, this method forwards the
event to the specific event methods.
:param event: the event name
:type event: string
:param kwargs: any other arguments to the specific event handlers
"""
try:
getattr(self, event)(**kwargs)
except Excep | tion:
# Ensure we don't crash the actor due to "bad" events.
logger.exception(
'Triggering event failed: %s(%s)', event, ', '.join(kwargs))
|
x8lucas8x/python-zeroless | tests/test_client_server.py | Python | lgpl-2.1 | 483 | 0.004141 | import pytest |
from zeroless import (Server, Client)
class TestClientServer:
def test_server_port_property(self):
port = 1050
server = Server(port=port)
assert server.port == port
def test_client_addresses_property(self):
client = Client()
addresses = (('10.0.0.1', 1567), ('10.0.0.2', 1568), ('10.0.0.3', 1569))
for ip, port in addresses:
client.connect(ip, port)
assert client.addresses == addresses
| |
reviewboard/rbtools | rbtools/commands/land.py | Python | mit | 16,983 | 0 | from __future__ import unicode_literals
import logging
import six
from rbtools.api.errors import APIError
from rbtools.clients.errors import MergeError, PushError
from rbtools.commands import Command, CommandError, Option, RB_MAIN
from rbtools.utils.commands import (build_rbtools_cmd_argv,
extract_commit_message)
from rbtools.utils.console import confirm
from rbtools.utils.graphs import toposort
from rbtools.utils.process import execute
from rbtools.utils.review_request import (get_draft_or_current_value,
get_revisions,
guess_existing_review_request,
parse_review_request_url)
class Land(Command):
"""Land changes from a review request onto the remote repository.
This command takes a review request, applies it to a feature branch,
merges it with the specified destination branch, and pushes the
changes to an upstream repository.
Notes:
The review request needs to be approved first.
``--local`` option can be used to skip the patching step.
"""
name = 'land'
author = 'The Review Board Project'
needs_api = | True
needs_scm_client = True
needs_repository = True
args = '[<branch-name>]'
option_list = [
Option('--dest',
dest='destination_branch',
default=None,
config_key='LAND_DEST_BRANCH',
help='Specifies the destination branch to land changes on.'),
Option('-r', '--review-request-id',
dest='rid',
metavar='ID',
default=None,
| help='Specifies the review request ID.'),
Option('--local',
dest='is_local',
action='store_true',
default=None,
help='Forces the change to be merged without patching, if '
'merging a local branch. Defaults to true unless '
'--review-request-id is used.'),
Option('-p', '--push',
dest='push',
action='store_true',
default=False,
config_key='LAND_PUSH',
help='Pushes the branch after landing the change.'),
Option('-n', '--no-push',
dest='push',
action='store_false',
default=False,
config_key='LAND_PUSH',
help='Prevents pushing the branch after landing the change, '
'if pushing is enabled by default.'),
Option('--squash',
dest='squash',
action='store_true',
default=False,
config_key='LAND_SQUASH',
help='Squashes history into a single commit.'),
Option('--no-squash',
dest='squash',
action='store_false',
default=False,
config_key='LAND_SQUASH',
help='Disables squashing history into a single commit, '
'choosing instead to merge the branch, if squashing is '
'enabled by default.'),
Option('-e', '--edit',
dest='edit',
action='store_true',
default=False,
help='Invokes the editor to edit the commit message before '
'landing the change.'),
Option('--delete-branch',
dest='delete_branch',
action='store_true',
config_key='LAND_DELETE_BRANCH',
default=True,
help="Deletes the local branch after it's landed. Only used if "
"landing a local branch. This is the default."),
Option('--no-delete-branch',
dest='delete_branch',
action='store_false',
config_key='LAND_DELETE_BRANCH',
default=True,
help="Prevents the local branch from being deleted after it's "
"landed."),
Option('--dry-run',
dest='dry_run',
action='store_true',
default=False,
help='Simulates the landing of a change, without actually '
'making any changes to the tree.'),
Option('--recursive',
dest='recursive',
action='store_true',
default=False,
help='Recursively fetch patches for review requests that the '
'specified review request depends on. This is equivalent '
'to calling "rbt patch" for each of those review '
'requests.',
added_in='1.0'),
Command.server_options,
Command.repository_options,
Command.branch_options,
]
def patch(self, review_request_id, squash=False):
"""Patch a single review request's diff using rbt patch.
Args:
review_request_id (int):
The ID of the review request to patch.
squash (bool, optional):
Whether to squash multiple commits into a single commit.
Raises:
rbtools.commands.CommandError:
There was an error applying the patch.
"""
patch_command = [RB_MAIN, 'patch']
patch_command.extend(build_rbtools_cmd_argv(self.options))
if self.options.edit:
patch_command.append('-c')
else:
patch_command.append('-C')
if squash:
patch_command.append('--squash')
patch_command.append(six.text_type(review_request_id))
rc, output = execute(patch_command, ignore_errors=True,
return_error_code=True)
if rc:
raise CommandError('Failed to execute "rbt patch":\n%s'
% output)
def can_land(self, review_request):
"""Determine if the review request is land-able.
A review request can be landed if it is approved or, if the Review
Board server does not keep track of approval, if the review request
has a ship-it count.
This function returns the error with landing the review request or None
if it can be landed.
"""
try:
is_rr_approved = review_request.approved
approval_failure = review_request.approval_failure
except AttributeError:
# The Review Board server is an old version (pre-2.0) that
# doesn't support the `approved` field. Determine it manually.
if review_request.ship_it_count == 0:
is_rr_approved = False
approval_failure = \
'The review request has not been marked "Ship It!"'
else:
is_rr_approved = True
except Exception as e:
logging.exception(
'Unexpected error while looking up review request '
'approval state: %s',
e)
return ('An error was encountered while executing the land '
'command.')
finally:
if not is_rr_approved:
return approval_failure
return None
def land(self, destination_branch, review_request, source_branch=None,
squash=False, edit=False, delete_branch=True, dry_run=False):
"""Land an individual review request.
Args:
destination_branch (unicode):
The destination branch that the change will be committed or
merged to.
review_request (rbtools.api.resource.ReviewRequestResource):
The review request containing the change to land.
source_branch (unicode, optional):
The source branch to land, if landing from a local branch.
squash (bool, optional):
Whether to squash the changes on the branch, for repositories
that support it.
edit (bool, optional):
Whether to edit the commit message before landing.
|
viswimmer1/PythonGenerator | data/python_files/28486514/util_threadpool.py | Python | gpl-2.0 | 9,770 | 0.011157 | import sys, traceback
import Queue as queue
import threading
import collections
theVmIsGILFree = False
if sys.platform == 'cli':
theVmIsGILFree = True
# perhaps "unladen swallow will goes GIL free.
if sys.platform == 'cli':
# workaround for IronPython
import System
theAvailableNativeThreads = System.Environment.ProcessorCount
else:
import multiprocessing
theAvailableNativeThreads = multiprocessing.cpu_count()
INFINITE = object()
_ENDMARK = object()
_NOID = object()
class Full(queue.Full): pass
class Empty(queue.Empty): pass
class ThreadPool(object):
__slots__ = [ '__taskQ', '__resultQ', '__workerThreads', '__taskRemains',
'__taskCounter', '__keepEmptyResults', '__printStackTrace' ]
def __init__(self, workerCount, queueSize=N | one,
keepEmptyResults=False, printStackTrace=False):
assert workerCount > 0
assert queueSize is None or queueSize >= workerCount
self.__printStackTrace = printStackTrace
if queueSize is None:
queueSize = workerCount
if queueSize is INFINITE:
self.__taskQ = queue.Queue(0) # infinite
else:
self.__taskQ = queue.Queue(queueSize)
self.__resultQ | = collections.deque() # deque is thread safe in both CPython and IronPython.
self.__workerThreads = []
for _ in xrange(workerCount):
t = threading.Thread(target=self.__worker_func)
t.setDaemon(True)
t.start()
self.__workerThreads.append(t)
self.__taskRemains = 0
self.__taskCounter = 0
def __worker_func(self):
taskQ_get = self.__taskQ.get
taskQ_task_done = self.__taskQ.task_done
resultQ_append = self.__resultQ.append
while True:
taskID, func, args = taskQ_get()
if taskID is _ENDMARK:
taskQ_task_done()
break # while
try:
resultQ_append(( taskID, func(*args) ))
except Exception, e:
if self.__printStackTrace:
sys.stderr.write("".join(traceback.format_exception(*sys.exc_info())))
resultQ_append(( taskID, e ))
taskQ_task_done()
def __len__(self):
return self.__taskRemains
def __nonzero__(self):
return self.__taskRemains != 0
def apply_nowait(self, func, args, taskID=_NOID):
"""
Add a task to the thread pool. When the queue of the tread pool is full,
raise an exception Full.
A task is represented as a callable (func) and its arguments (args).
When taskID is not given, it will automatically assigned a serial number
starting from 1.
"""
if not self.__workerThreads: raise ValueError("the thread pool is already join()'ed.")
assert taskID is not _ENDMARK
self.__taskCounter += 1
try:
self.__taskQ.put_nowait(( (taskID if taskID is not _NOID else self.__taskCounter), func, args ))
except queue.Full:
# roll back
self.__taskCounter -= 1
raise Full
self.__taskRemains += 1
def apply(self, func, args, taskID=_NOID):
"""
Add a task to the thread pool. When the queue of the tread pool is full,
wait until a worker thread in the pool get a task in the queue.
A task is represented as a callable (func) and its arguments (args).
When taskID is not given, it will automatically assigned a serial number
starting from 1.
"""
if not self.__workerThreads: raise ValueError("apply() is called for join()'ed thread pool")
assert taskID is not _ENDMARK
self.__taskCounter += 1
self.__taskQ.put(( (taskID if taskID is not _NOID else self.__taskCounter), func, args ))
self.__taskRemains += 1
def get_nowait(self):
"""
If there is no task in the thread pool, raise Empty.
Otherwise, if no task in the pool finished, return None.
Otherwise, remove a finished task in the pool and return a tuple
of its taskID and its return value.
In case of a task raising an exception, the returned tuple will have
task ID and the exception.
"""
if self.__taskRemains == 0: raise Empty
if not self.__resultQ:
return None # no available result now
r = self.__resultQ.popleft()
self.__taskRemains -= 1
return r
def get(self):
"""
If there is no task in the thread pool, raise Empty.
get() can call after calling join() of the thread pool.
In case of a task raising an exception, the returned tuple will have
task ID and the exception.
"""
if self.__workerThreads: raise ValueError("get() is called for not join()'ed thread pool")
if self.__taskRemains == 0: return Empty
r = self.__resultQ.popleft()
self.__taskRemains -= 1
return r
def join(self):
"""
Wait until all tasks in the thread pool finish.
"""
if self.__workerThreads is None: return
for _ in self.__workerThreads:
self.__taskQ.put(( _ENDMARK, None, None ))
for t in self.__workerThreads:
t.join()
self.__taskQ.join()
self.__workerThreads = None
def get_iter(self):
"""
Repeat get() up-to maxCount times.
get_iter() can call after calling join() of the thread pool.
"""
if self.__workerThreads: raise ValueError("get_iter() is called for not join()'ed thread pool")
self_resultQ_popleft = self.__resultQ.popleft
while self.__taskRemains != 0:
r = self_resultQ_popleft()
self.__taskRemains -= 1
yield r
def get_iter_nowait(self):
"""
Repeat get_nowait() up-to maxCount times.
"""
self_resultQ_popleft = self.__resultQ.popleft
while self.__taskRemains != 0 and self.__resultQ:
r = self_resultQ_popleft()
self.__taskRemains -= 1
yield r
def apply_iter(self, tasks, func=None):
"""
Repeat apply() for the given tasks, call join(), and repeat get() until
all results of the tasks being extracted.
tasks is an iteratable, and each item of tasks is a tuple.
When func is None, an item is either; (function, args, taskID) or
(function, args). When func is a callable, an item is either;
(args, taskID) or (args,).
"""
if not self.__workerThreads: raise ValueError("apply_iter() is called for join()'ed thread pool")
if self.__taskRemains != 0: raise ValueError("apply_iter() requires no task remains in the thread pool")
assert self.__taskRemains == 0
if func is None:
self_get_iter_nowait = self.get_iter_nowait
self_apply = self.apply
for task in tasks:
for v in self_get_iter_nowait(): yield v
if len(task) in ( 2, 3 ):
self_apply(*task)
else:
raise ValueError("apply_iter()'s argument task must be a tuple with length 2 or 3")
else:
self_get_iter_nowait = self.get_iter_nowait
self_taskQ_put = self.__taskQ.put
def __apply2(args, taskID=_NOID):
assert taskID is not _ENDMARK
self.__taskCounter += 1
self_taskQ_put(( (taskID if taskID is not _NOID else self.__taskCounter), func, args ))
self.__taskRemains += 1
for task in tasks:
|
zodiac/incubator-airflow | airflow/configuration.py | Python | apache-2.0 | 26,596 | 0.000263 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import copy
import errno
import logging
import os
import six
import subprocess
import warnings
from future import standard_library
standard_library.install_aliases()
from builtins import str
from collections import OrderedDict
from configparser import ConfigParser
from .exceptions import AirflowConfigException
# show Airflow's deprecation warnings
warnings.filterwarnings(
action='default', category=DeprecationWarning, module='airflow')
warnings.filterwarnings(
action='default', category=PendingDeprecationWarning, module='airflow')
try:
from cryptography.fernet import Fernet
except ImportError:
pass
def generate_fernet_key():
try:
FERNET_KEY = Fernet.generate_key().decode()
except NameError:
FERNET_KEY = "cryptography_not_found_storing_passwords_in_plain_text"
return FERNET_KEY
def expand_env_var(env_var):
"""
Expands (potentially nested) env vars by repeatedly applying
`expandvars` and `expanduser` until interpolation stops having
any effect.
"""
if not env_var:
return env_var
while True:
interpolated = os.path.expanduser(os.path.expandvars(str(env_var)))
if interpolated == env_var:
return interpolated
else:
env_var = interpolated
def run_command(command):
"""
Runs command and returns stdout
"""
process = subprocess.Popen(
command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, stderr = process.communicate()
if process.returncode != 0:
raise AirflowConfigException(
"Cannot execute {}. Error code is: {}. Output: {}, Stderr: {}"
.format(command, process.returncode, output, stderr)
)
return output
DEFAULT_CONFIG = """\
[core]
# The home folder for airflow, default is ~/airflow
airflow_home = {AIRFLOW_HOME}
# The folder where your airflow pipelines live, most likely a
# subfolder in a code repository
# This path must be absolute
dags_folder = {AIRFLOW_HOME}/dags
# The folder where airflow should store its log files
# This path must be absolute
base_log_folder = {AIRFLOW_HOME}/logs
# Airflow can store logs remotely in AWS S3 or Google Cloud Storage. Users
# must supply a remote location URL (starting with either 's3://...' or
# 'gs://...') and an Airflow connection id that provides access to the storage
# location.
remote_base_log_folder =
remote_log_conn_id =
# Use server-side encryption for logs stored in S3
encrypt_s3_logs = False
# DEPRECATED option for remote log storage, use remote_base_log_folder instead!
s3_log_folder =
# The executor class that airflow should use. Choices include
# SequentialExecutor, LocalExecutor, CeleryExecutor, DaskExecutor
executor = SequentialExecutor
# The SqlAlchemy connection string to the metadata database.
# SqlAlchemy supports many different database engine, more information
# their website
sql_alchemy_conn = sqlite:///{AIRFLOW_HOME}/airflow.db
# The SqlAlchemy pool size is the maximum number of database connections
# in the pool.
sql_alchemy_pool_size = 5
# The SqlAlchemy pool recycle is the number of seconds a connection
# can be idle in the pool before it is invalidated. This config does
# not apply to sqlite.
sql_alchemy_pool_recycle = 3600
# The amount of parallelism as a setting to the executor. This defines
# the max number of task instances that should run simultaneously
# on this airflow installation
parallelism = 32
# The number of task instances allowed to run concurrently by the scheduler
dag_concurrency = 16
# Are DAGs paused by default at creation
dags_are_paused_at_creation = True
# When not using pools, tasks are run in the "default pool",
# whose size is guided by this config element
non_pooled_task_slot_count = 128
# The maximum number of active DAG runs per DAG
max_active_runs_per_dag = 16
# Whether to load the examples that ship with Airflow. It's good to
# get started, but you probably want to set this to False in a production
# environment
load_examples = True
# Where your Airflow plugins are stored
plugins_folder = {AIRFLOW_HOME}/plugins
# Secret key to save connection passwords in the db
fernet_key = {FERNET_KEY}
# Whether to disable pickling dags
donot_pickle = False
# How long before timing out a python file import while filling the DagBag
dagbag_import_timeout = 30
# The class to use for running task instances in a subprocess
task_runner = BashTaskRunner
# If set, tasks without a `run_as_user` argument will be run with this user
# Can be used to de-elevate a sudo user running Airflow when executing tasks
default_impersonation =
# What security module to use (for example kerberos):
security =
# Turn unit test mode on (overwrites many configuration options with test
# values at runtime)
unit_test_mode = False
[cli]
# In what way should the cli access the API. The LocalClient will use the
# database directly, while the json_client will use the api running on the
# webserver
api_client = airflow.api.client.local_client
endpoint_url = http://localhost:8080
[api]
# How to authenticate users of the API
auth_backend = airflow.api.auth.backend.default
[operators]
# The default owner assigned to each new operator, unless
# provided explicitly or passed via `default_args`
default_owner = Airflow
default_cpus = 1
default_ram = 512
default_disk = 512
default_gpus = 0
[webserver]
# The base url of your website as airflow cannot guess what domain or
# cname you are using. This is used in automated emails that
# airflow sends to point links to the right web server
base_url = http://localhost:8080
# The ip specified when starting the web server
web_server_host = 0.0.0.0
# The port on which to run the web server
web_server_port = 8080
# Paths to the SSL certificate and key fo | r the web server. When both are
# provided SSL will be enabled. This does not change the web server | port.
web_server_ssl_cert =
web_server_ssl_key =
# Number of seconds the gunicorn webserver waits before timing out on a worker
web_server_worker_timeout = 120
# Number of workers to refresh at a time. When set to 0, worker refresh is
# disabled. When nonzero, airflow periodically refreshes webserver workers by
# bringing up new ones and killing old ones.
worker_refresh_batch_size = 1
# Number of seconds to wait before refreshing a batch of workers.
worker_refresh_interval = 30
# Secret key used to run your flask app
secret_key = temporary_key
# Number of workers to run the Gunicorn web server
workers = 4
# The worker class gunicorn should use. Choices include
# sync (default), eventlet, gevent
worker_class = sync
# Log files for the gunicorn webserver. '-' means log to stderr.
access_logfile = -
error_logfile = -
# Expose the configuration file in the web server
expose_config = False
# Set to true to turn on authentication:
# http://pythonhosted.org/airflow/security.html#web-authentication
authenticate = False
# Filter the list of dags by owner name (requires authentication to be enabled)
filter_by_owner = False
# Filtering mode. Choices include user (default) and ldapgroup.
# Ldap group filtering requires using the ldap backend
#
# Note that the ldap server needs the "memberOf" overlay to be set up
# in order to user the ldapgroup mode.
owner_mode = user
# Default DAG orientation. Valid values are:
# LR (Left->Right), TB (Top->Bottom), RL (Right->Left), BT (Bottom->Top)
dag_orientation = LR
# Puts the webserver in demonstration mode; b |
hassaanm/stock-trading | src/pybrain/rl/learners/directsearch/policygradient.py | Python | apache-2.0 | 4,200 | 0.001905 | __author__ = 'Thomas Rueckstiess, ruecksti@in.tum.de'
from pybrain.rl.learners.directsearch.directsearch import DirectSearchLearner
from pybrain.rl.learners.learner import DataSetLearner, ExploringLearner
from pybrain.utilities import abstractMethod
from pybrain.auxiliary import GradientDescent
from pybrain.rl.explorers.continuous.normal import NormalExplorer
from pybrain.datasets.dataset import DataSet
from pybrain.structure.networks import FeedForwardNetwork
from pybrain.structure.connections import IdentityConnection
class LoglhDataSet(DataSet):
def __init__(self, dim):
DataSet.__init__(self)
self.addField('loglh', dim)
self.linkFields(['loglh'])
self.index = 0
class PolicyGradientLearner(DirectSearchLearner, DataSetLearner, ExploringLearner):
""" PolicyGradientLearner is a super class for all continuous direct search
algorithms that use the log likelihood of the executed action to update
the weights. Subclasses are ENAC, GPOMDP, or REINFORCE.
"""
_module = None
def __init__(self):
# gradient descender
self.gd = GradientDescent()
# create default explorer
self._explorer = None
# loglh dataset
self.loglh = None
# network to tie module and explorer together
self.network = None
def _setLearningRate(self, alpha):
""" pass the alpha value through to the gradient descent object """
self.gd.alpha = alpha
def _getLearningRate(self | ):
return self.gd.alpha
learningRate = property(_getLearningRate, _setLearningRate)
def _setModule(self, module):
""" initialize gradient descender with module parameters and
the loglh dataset with the outd | im of the module. """
self._module = module
# initialize explorer
self._explorer = NormalExplorer(module.outdim)
# build network
self._initializeNetwork()
def _getModule(self):
return self._module
module = property(_getModule, _setModule)
def _setExplorer(self, explorer):
""" assign non-standard explorer to the policy gradient learner.
requires the module to be set beforehand.
"""
assert self._module
self._explorer = explorer
# build network
self._initializeNetwork()
def _getExplorer(self):
return self._explorer
explorer = property(_getExplorer, _setExplorer)
def _initializeNetwork(self):
""" build the combined network consisting of the module and
the explorer and initializing the log likelihoods dataset.
"""
self.network = FeedForwardNetwork()
self.network.addInputModule(self._module)
self.network.addOutputModule(self._explorer)
self.network.addConnection(IdentityConnection(self._module, self._explorer))
self.network.sortModules()
# initialize gradient descender
self.gd.init(self.network.params)
# initialize loglh dataset
self.loglh = LoglhDataSet(self.network.paramdim)
def learn(self):
""" calls the gradient calculation function and executes a step in direction
of the gradient, scaled with a small learning rate alpha. """
assert self.dataset != None
assert self.module != None
# calculate the gradient with the specific function from subclass
gradient = self.calculateGradient()
# scale gradient if it has too large values
if max(gradient) > 1000:
gradient = gradient / max(gradient) * 1000
# update the parameters of the module
p = self.gd(gradient.flatten())
self.network._setParameters(p)
self.network.reset()
def explore(self, state, action):
# forward pass of exploration
explorative = ExploringLearner.explore(self, state, action)
# backward pass through network and store derivs
self.network.backward()
self.loglh.appendLinked(self.network.derivs.copy())
return explorative
def reset(self):
self.loglh.clear()
def calculateGradient(self):
abstractMethod()
|
dracos/QGIS | python/plugins/processing/algs/qgis/GeometryConvert.py | Python | gpl-2.0 | 9,841 | 0.001219 | # -*- coding: utf-8 -*-
"""
***************************************************************************
Gridify.py
---------------------
Date : May 2010
Copyright : (C) 2010 by Michael Minn
Email : pyqgis at michaelminn dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Michael Minn'
__date__ = 'May 2010'
__copyright__ = '(C) 2010, Michael Minn'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from qgis.core import QGis, QgsFeature, QgsGeometry
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.GeoAlgorithmExecutionException import GeoAlgorithmExecutionException
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterSelection
from processing.core.outputs import OutputVector
from processing.tools import dataobjects, vector
class GeometryConvert(GeoAlgorithm):
INPUT = 'INPUT'
TYPE = 'TYPE'
OUTPUT = 'OUTPUT'
TYPES = ['Centroids',
'Nodes',
'Linestrings',
'Multilinestrings',
'Polygons'
]
def defineCharacteristics(self):
self.name = 'Convert geometry type'
self.group = 'Vector geometry tools'
self.addParameter(ParameterVector(self.INPUT,
self.tr('Input layer'), [ParameterVector.VECTOR_TYPE_ANY]))
self.addParameter(ParameterSelection(self.TYPE,
self.tr('New geometry type'), self.TYPES))
self.addOutput(OutputVector(self.OUTPUT, self.tr('Converted')))
def processAlgorithm(self, progress):
layer = dataobjects.getObjectFromUri(
self.getParameterValue(self.INPUT))
index = self.getParameterValue(self.TYPE)
splitNodes = False
if index == 0:
newType = QGis.WKBPoint
elif index == 1:
newType = QGis.WKBPoint
splitNodes = True
elif index == 2:
newType = QGis.WKBLineString
elif index == 3:
newType = QGis.WKBMultiLineString
elif index == 4:
newType = QGis.WKBPolygon
else:
newType = QGis.WKBPoint
writer = self.getOutputFromName(self.OUTPUT).getVectorWriter(
layer.pendingFields(), newType, layer.crs())
features = vector.features(layer)
count = len(features)
total = 100.0 / float(count)
for count, f in enumerate(features):
geom = f.geometry()
geomType = geom.wkbType()
if geomType in [QGis.WKBPoint, QGis.WKBPoint25D]:
if newType == QGis.WKBPoint:
writer.addFeature(f)
else:
raise GeoAlgorithmExecutionException(
self.tr('Cannot convert from %s to %s', geomType, newType))
elif geomType in [QGis.WKBMultiPoint, QGis.WKBMultiPoint25D]:
if newType == QGis.WKBPoint and splitNodes:
points = geom.asMultiPoint()
for p in points:
feat = QgsFeature()
feat.setAttributes(f.attributes())
feat.setGeometry(QgsGeometry.fromPoint(p))
writer.addFeature(feat)
elif newType == QGis.WKBPoint:
feat = QgsFeature()
feat.setAttributes(f.attributes())
feat.setGeometry(geom.centroid())
writer.addFeature(feat)
else:
raise GeoAlgorithmExecutionException(
self.tr('Cannot convert from %s to %s', geomType, newType))
elif geomType in [QGis.WKBLineString, QGis.WKBLineString25D]:
if newType == QGis.WKBPoint and splitNodes:
points = geom.asPolyline()
for p in points:
feat = QgsFeature()
feat.setAttributes(f.attributes())
feat.setGeometry(QgsGeometry.fromPoint(p))
writer.addFeature(feat)
elif newType == QGis.WKBPoint:
feat = QgsFeature()
feat.setAttributes(f.attributes())
feat.setGeometry(geom.centroid())
writer.addFeature(feat)
elif newType == QGis.WKBLineString:
writer.addFeature(f)
else:
raise GeoAlgorithmExecutionException(
self.tr('Cannot convert f | rom %s to %s', geomType, newType))
elif geomType in [QGis.WKBMultiLineString, QGis.WKBMultiLineString25D]:
if newType == QGis.WKBPoint and splitNodes:
lines = geom.asMultiPolyline()
for line in lines:
for p in line:
feat = QgsFeature()
feat.setAttributes(f.attributes())
feat.setGeometry(QgsGeometry.fromPoint(p)) |
writer.addFeature(feat)
elif newType == QGis.WKBPoint:
feat = QgsFeature()
feat.setAttributes(f.attributes())
feat.setGeometry(geom.centroid())
writer.addFeature(feat)
elif newType == QGis.WKBLineString:
lines = geom.asMultiPolyline()
for line in lines:
feat = QgsFeature()
feat.setAttributes(f.attributes())
feat.setGeometry(QgsGeometry.fromPolyline(line))
writer.addFeature(feat)
elif newType == QGis.WKBMultiLineString:
writer.addFeature(f)
else:
raise GeoAlgorithmExecutionException(
self.tr('Cannot convert from %s to %s', geomType, newType))
elif geomType in [QGis.WKBPolygon, QGis.WKBPolygon25D]:
if newType == QGis.WKBPoint and splitNodes:
rings = geom.asPolygon()
for ring in rings:
for p in ring:
feat = QgsFeature()
feat.setAttributes(f.attributes())
feat.setGeometry(QgsGeometry.fromPoint(p))
writer.addFeature(feat)
elif newType == QGis.WKBPoint:
feat = QgsFeature()
feat.setAttributes(f.attributes())
feat.setGeometry(geom.centroid())
writer.addFeature(feat)
elif newType == QGis.WKBMultiLineString:
rings = geom.asPolygon()
feat = QgsFeature()
feat.setAttributes(f.attributes())
feat.setGeometry(QgsGeometry.fromMultiPolyline(rings))
writer.addFeature(feat)
elif newType == QGis.WKBPolygon:
writer.addFeature(f)
else:
raise GeoAlgorithmExecutionException(
self.tr('Cannot convert from %s to %s', geomType, newType))
elif geomType in [QGis.WKBMultiPolygon, QGis.WKBMultiPolygon25D]:
if newType == QGis.WKBPoint and splitNodes:
polygons = geom.asMultiPolygon()
for polygon in polygons:
for li |
litex-hub/litex-boards | litex_boards/platforms/xilinx_kv260.py | Python | bsd-2-clause | 1,483 | 0.005394 | #
# This file is part of LiteX-Boards.
#
# Copyright (c) 2022 Ilia Sergachev <ilia@sergachev.ch>
# SPDX-License-Identifier: BSD-2-Clause
from litex.build.generic_platform import *
from litex.build.xilinx import XilinxPlatform, VivadoProgrammer
# IOs ----------------------------------------------------------------------------------------------
_io = [
# Fan.
("fan", 0, Pins("A12"), IOStandard("LVCMOS33")),
# Seems like there are no on-board clock sources for PL when PS is not used so here a
# clock-capable PMOD connector pin is added as a possible clock input (not tested).
("pmod_hda16_cc", 0, Pins("B21"), IOStandard("LVCMOS33")),
]
# Platform -----------------------------------------------------------------------------------------
class Platform(XilinxPlatform):
default_clk_name = "pmod_hda16_cc"
default_clk_period = 1e9/100e6
def __init__(self, toolchain="vivado"):
XilinxPlatform.__init__(self, "xck26-sfvc784-2lv-c", _io, toolchain=toolchain)
self.toolchain.bitstream_commands = \
["set_property BITSTREAM.GENERAL.COMPRESS TRUE [cu | rrent_design]", ]
self.default_clk_freq = 1e9 / self.default_clk_period |
def create_programmer(self):
return VivadoProgrammer()
def do_finalize(self, fragment, *args, **kwargs):
XilinxPlatform.do_finalize(self, fragment, *args, **kwargs)
self.add_period_constraint(self.lookup_request("pmod_hda16_cc", loose=True), 1e9/100e6)
|
kpeiruza/incubator-spot | spot-ingest/pipelines/proxy/worker.py | Python | apache-2.0 | 2,534 | 0.02131 | #!/bin/env python
import os
import logging
import json
from common.utils import Util
class Worker(object):
def __init__(self,db_name,hdfs_app_path,kafka_consumer,conf_type,processes):
self._initialize_members(db_name,hdfs_app_path,kafka_consumer,conf_type,processes)
def _initialize_members(self,db_name,hdfs_app_path,kafka_consumer,conf_type,processes):
# get logger instance.
self._logger = Util.get_logger('SPOT.INGEST.WRK.PROXY')
self._db_name = db_name
self._hdfs_app_path = hdfs_app_path
self._kafka_consumer = kafka_consumer
# read proxy configuration.
self._script_path = os.path.dirname(os.path.abspath(__file__))
conf_file = "{0}/ingest_conf.json".format(os.path.dirname(os.path.dirname(self._script_path)))
conf = json.loads(open(conf_file).read())
self._spark_conf = conf["spark-streaming"]
self._conf = conf["pipelines"][conf_type]
self._processes = processes
def start(self):
self._logger.info("Creating Spark Job for topic: {0}".format(self._kafka_consumer.Topic))
# parser
parser = self._conf["parser"]
#spark conf
diver_memory = self._spark_conf["driver_memory"]
num_exec = self._spark_conf["spark_exec"]
exec_memory = self._spark_conf["spark_executor_memory"]
exec_cores = self._spark_conf["spark_executor_cores"]
batch_size = self._spark_conf["spark_batch_size"]
jar_path = os.path.dirname(os.path.dirname(self._script_path))
# spark job command.
spark_job_cmd = ("spark-submit --master yarn "
"--driver-memory {0} "
| "--num-executors {1} | "
"--conf spark.executor.memory={2} "
"--conf spark.executor.cores={3} "
"--jars {4}/common/spark-streaming-kafka-0-8-assembly_2.11-2.0.0.jar "
"{5}/{6} "
"-zk {7} "
"-t {8} "
"-db {9} "
"-dt {10} "
"-w {11} "
"-bs {12}".format(diver_memory,num_exec,exec_memory,exec_cores,jar_path,self._script_path,parser,self._kafka_consumer.ZookeperServer,self._kafka_consumer.Topic,self._db_name,"proxy",self._processes,batch_size))
# start spark job.
Util.execute_cmd(spark_job_cmd,self._logger)
|
Answers4AWS/backup-monkey | backup_monkey/cli.py | Python | apache-2.0 | 5,890 | 0.004754 | # Copyright 2013 Answers for AWS LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import sys
from backup_monkey.core import BackupMonkey, Logging
from backup_monkey import __version__
from backup_monkey.exceptions import BackupMonkeyException
from boto.utils import get_instance_metadata
__all__ = ('run', )
log = logging.getLogger(__name__)
LIMIT_LABEL = 32 # Label is added to description when created snapshot.
# The description limit in aws is 255
def _fail(message="Unknown failure", code=1):
log.error(message)
sys.exit(code)
def run():
parser = argparse.ArgumentParser(description='Loops through all EBS volumes, and snapshots them, then loops through all snapshots, and removes the oldest ones.')
parser.add_argument('--region', metavar='REGION',
help='the region to loop through and snapshot (default is current region of EC2 instance this is running on). E.g. us-east-1')
parser.add_argument('--max-snapshots-per-volume', metavar='SNAPSHOTS', default=3, type=int,
help='the maximum number of snapshots to keep per EBS volume. The oldest snapshots will be deleted. Default: 3')
parser.add_argument('--snapshot-only', action='store_true', default=False,
help='Only snapshot EBS volumes, do not remove old snapshots')
parser.add_argument('--remove-only', action='store_true', default=False,
help='Only remove old snapshots, do not create new snapshots')
parser.add_argument('--verbose', '-v', action='count',
help='enable verbose output (-vvv for more)')
parser.add_argument('--version', action='version', version='%(prog)s ' + __version__,
help='display version number and exit')
parser.add_argument('--tags', nargs="+",
help='Only snapshot instances that match passed in tags. E.g. --tag Name:foo will snapshot all instances with a tag `Name` and value is `foo`')
parser.add_argument('--reverse-tags', action='store_true', default=False,
help='Do a reverse match on the passed in tags. E.g. --tag Name:foo --reverse-tags will snapshot all instances that do not have a `Name` tag with the value `foo`')
parser.add_argument('--label', action='store',
help='Only snapshot instances that match passed in label are created or deleted. Default: None. Selected all snapshot. You have the posibility of create a different strategies for daily, weekly and monthly for example. Label daily won\'t deleted label weekly')
parser.add_argument('--cross-account-number', action='store',
help='Do a cross-account snapshot (this is the account number to do snapshots on). NOTE: This requires that you pass in the --cross-account-role parameter. E.g. --cross-account-number 111111111111 --cross-account-role Snapshot')
parser.add_argument('--cross-account-role', action='store',
help='The name of the role that backup-monkey will assume when doing a cross-account snapshot. E.g. --cross-account-role Snapshot')
args = parser.parse_args()
if args.cross_account_number and not args.cross_account_role:
parser.error('The --cross-account-role parameter is required if you specify --cross-account-number (doing a cross-account snapshot)')
if args.cross_account_role and not args.cross_account_number:
parser.error('The --cross-account-number parameter is required if you specify --cross-account-role (doing a cross-account snapshot)')
if args.reverse_tags and not args.tags:
parser.error('The --tags parameter is required if you specify --reverse-tags (doing a blacklist filter)')
if args.label and len(args.label) > LIMIT_LABEL:
parser.error('The --label parameter lenght should be less than 32')
Logging().configure(args. | verbose)
log.debug("CLI p | arse args: %s", args)
if args.region:
region = args.region
else:
# If no region was specified, assume this is running on an EC2 instance
# and work out what region it is in
log.debug("Figure out which region I am running in...")
instance_metadata = get_instance_metadata(timeout=5)
log.debug('Instance meta-data: %s', instance_metadata)
if not instance_metadata:
_fail('Could not determine region. This script is either not running on an EC2 instance (in which case you should use the --region option), or the meta-data service is down')
region = instance_metadata['placement']['availability-zone'][:-1]
log.debug("Running in region: %s", region)
try:
monkey = BackupMonkey(region,
args.max_snapshots_per_volume,
args.tags,
args.reverse_tags,
args.label,
args.cross_account_number,
args.cross_account_role)
if not args.remove_only:
monkey.snapshot_volumes()
if not args.snapshot_only:
monkey.remove_old_snapshots()
except BackupMonkeyException as e:
_fail(e.message)
log.info('Backup Monkey completed successfully!')
sys.exit(0)
|
thijsmie/imp_flask | imp_flask/middleware.py | Python | mit | 3,368 | 0.002672 | """Flask middleware definitions. This is also where template filters are defined.
To be imported by the application.current_app() factory.
"""
from logging import getLogger
import os
from flask import current_app, render_template, request
from markupsafe import Markup
import simplejson as json
from imp_flask.core.email import send_exception
from imp_flask.paths import APP_ROOT_FOLDER
LOG = getLogger(__name__)
# Setup default error templates.
@current_app.errorhandler(400)
@current_app.errorhandler(403)
@current_app.errorhandler(404)
@current_app.errorhandler(500)
def error_handler(e):
code = getattr(e, 'code', 500) # If 500, e == the exception.
if code == 500:
# Send email to all ADMI | NS.
exception_name = e.__class__.__name__
view_module = request.endpoint
send_exception('{} exception in {}'.format(exception_name, view_module))
return render_template('{}.html'.format(code)), code
# Template filters.
@current_app.template_filter()
|
def whitelist(value):
"""Whitelist specific HTML tags and strings.
Positional arguments:
value -- the string to perform the operation on.
Returns:
Markup() instance, indicating the string is safe.
"""
translations = {
'&quot;': '"',
'&#39;': ''',
'&lsquo;': '‘',
'&nbsp;': ' ',
'<br>': '<br>',
}
escaped = str(Markup.escape(value)) # Escapes everything.
for k, v in translations.items():
escaped = escaped.replace(k, v) # Un-escape specific elements using str.replace.
return Markup(escaped) # Return as 'safe'.
@current_app.template_filter()
def sum_key(value, key):
"""Sums up the numbers in a 'column' in a list of dictionaries or objects.
Positional arguments:
value -- list of dictionaries or objects to iterate through.
Returns:
Sum of the values.
"""
values = [r.get(key, 0) if hasattr(r, 'get') else getattr(r, key, 0) for r in value]
return sum(values)
@current_app.template_filter()
def max_key(value, key):
"""Returns the maximum value in a 'column' in a list of dictionaries or objects.
Positional arguments:
value -- list of dictionaries or objects to iterate through.
Returns:
Sum of the values.
"""
values = [r.get(key, 0) if hasattr(r, 'get') else getattr(r, key, 0) for r in value]
return max(values)
@current_app.template_filter()
def average_key(value, key):
"""Returns the average value in a 'column' in a list of dictionaries or objects.
Positional arguments:
value -- list of dictionaries or objects to iterate through.
Returns:
Sum of the values.
"""
values = [r.get(key, 0) if hasattr(r, 'get') else getattr(r, key, 0) for r in value]
return float(sum(values)) / (len(values) or float('nan'))
@current_app.template_filter()
def format_date(value, date_format='%Y-%m-%d'):
return value.strftime(date_format)
@current_app.context_processor
def load_strings():
"""Inject 'strings' into the jinja2 environment so it is available everywhere
"""
with open(os.path.join(APP_ROOT_FOLDER, 'strings.json')) as fp:
strings = json.load(fp)
return dict(strings=strings)
|
koduj-z-klasa/pylab | zadania/fun2_z04.py | Python | agpl-3.0 | 517 | 0 | #! /us | r/bin/env python
# -*- coding: utf-8 -*-
# ZADANIE: wykonaj wykres funkcji f(x), gdzie x = <-10;10> z krokiem 0.5
# f(x) = x/-3 + a dla x <= 0
# f(x) = x*x/3 dla x >= 0
import pylab
x = pylab.arange(-10, 10.5, 0.5) # lista argumentów x
a = int(raw_input("Podaj współczynnik a: "))
y1 = [i / -3 + a for i in x if i <= 0]
y2 = [i**2 / 3 for i in x if i >= 0]
x1 = [i for i in x if i <= 0]
x2 = [i for i in x if i >= 0]
pylab.plot(x1, y1, x2, y2)
pylab.title('Wykres f(x)')
pylab.grid( | True)
pylab.show()
|
HulaSamsquanch/1100d_lantern | contrib/indy/parse_lens60.py | Python | gpl-2.0 | 1,632 | 0.028186 | #parse LENS00.BIN
import sys
from struct import unpack
from binascii import unhexlify, hexlify
def getLongLE(d, a):
return unpack('<L',(d)[a:a+4])[0]
def getShortLE(d, a):
return unpack('<H',(d)[a:a+2])[0]
f = open(sys.argv[1], 'rb')
m = f.read()
f.close()
base = 0x850
print 'filesize=%d' % len(m)
print 'base=%d/0x%x' % (base, base)
i=0x50
lens_id = 1
while lens_id > 0:
lens_id = getLongLE(m, i)
offset = getLongLE(m, i+12)
if lens_id > 0:
val = m[base+offset: base+offset+0xa90]
print 'Lens_id=%4d, offset=0x%x' % (lens_id&0xffff, base+offset)
print ' %d-%d' % ( getShortLE(val,4), getShortLE(val,6) )
print hexlify(val[:3]),hexlify(val[3:5]),hexlify(val[5:7]),hexlify(val[7:32])
for t in range(32,40,2):
print hexlify(val[t:t+2]),
print hexlify(val[40:50]),hexlify(val[50:60])
for t in range(60, 860, 200):
print hexlify(val[t:t+8])
for x in range(t+8, t+192, 12):
print hexlify(val[x:x+12]),
print
print hexlify(val[0x35c:0x370])
print hexlify(val[0x3 | 7c:0x37c+6]), hexlify(val[0x37c+6:0x38c])
for t in range(0x38c,0x38c+12*16,12):
print hexlify(val[t:t+12]),
print
print hexlify(val[0x44c:0x44c+4])
print hexlify(val[0x450:0x452]), hexlify(val[0x452:0x454]), hexlify(val[0x454:0x456]),hexlify(val[0x456:0x458]),
print hexlify(val[0x458:0x458+10]), hexlify(val[0x462:0x462+10])
for t in range(0x46c, 0x46c+4*(8+ 16*24), 8+ 16*24):
print hexlify(val[t | :t+8])
for x in range(t+8, t+8+16*24, 24):
print hexlify(val[x:x+24]),
print
print hexlify(val[0xa8c:0xa90])
i = i + 16 |
iwm911/plaso | plaso/winreg/cache_test.py | Python | apache-2.0 | 1,609 | 0.001865 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2013 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR COND | ITIONS OF ANY KIND, either e | xpress or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the Windows Registry objects cache."""
import unittest
from plaso.winreg import cache
from plaso.winreg import test_lib
from plaso.winreg import winregistry
class CacheTest(test_lib.WinRegTestCase):
"""Tests for the Windows Registry objects cache."""
def testBuildCache(self):
"""Tests creating a Windows Registry objects cache."""
registry = winregistry.WinRegistry(
winregistry.WinRegistry.BACKEND_PYREGF)
test_file = self._GetTestFilePath(['SYSTEM'])
file_entry = self._GetTestFileEntry(test_file)
winreg_file = registry.OpenFile(file_entry, codepage='cp1252')
winreg_cache = cache.WinRegistryCache()
# Test if this function does not raise an exception.
winreg_cache.BuildCache(winreg_file, 'SYSTEM')
self.assertEqual(
winreg_cache.attributes['current_control_set'], 'ControlSet001')
if __name__ == '__main__':
unittest.main()
|
tangentlabs/django-fancypages | sandbox/settings/common.py | Python | bsd-3-clause | 4,089 | 0.000489 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
import os
import django
import fancypages as fp
from configurations import Configuration, values
class Common(Configuration):
DEBUG = True
TEMPLATE_DEBUG = DEBUG
SECRET_KEY = values.Value('insecure secret key')
ADMINS = [('Sebastian Vetter', 'svetter@snowballdigital.com.au')]
MANAGERS = ADMINS
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
USE_I18N = True
USE_L10N = True
USE_TZ = True
TIME_ZONE = 'Australia/Melbourne'
LANGUAGE_CODE = 'en-gb'
LANGUAGES = (
('de', 'German'),
('en-gb', 'English'))
########## FANCYPAGES SETTINGS
FP_FORM_BLOCK_CHOICES = {
'contact-us': {
'name': "Contact Us Form",
'form': 'contact_us.forms.ContactUsForm',
'url': 'contact-us',
'template_name': 'contact_us/contact_us_form.html'}}
########## END FANCYPAGES SETTINGS
MEDIA_URL = '/media/'
STATIC_URL = '/static/'
STATICFILES_DIRS = []
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder')
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader')
TEMPLATE_CONTEXT_PROCESSORS = [
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.request",
"django.core.context_processors.debug",
"dja | ngo.core.context_pro | cessors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.contrib.messages.context_processors.messages"]
MIDDLEWARE_CLASSES = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'fancypages.middleware.EditorMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',)
LOGIN_URL = '/admin/login/'
LOGIN_REDIRECT_URL = '/accounts/'
APPEND_SLASH = True
ALLOWED_HOSTS = ['*']
SITE_ID = 1
@property
def ROOT_URLCONF(self):
return "{}.urls".format(self.SANDBOX_MODULE)
@property
def WSGI_APPLICATION(self):
return "{}.wsgi.application".format(self.SANDBOX_MODULE)
@classmethod
def pre_setup(cls):
super(Common, cls).pre_setup()
from fancypages.defaults import FANCYPAGES_SETTINGS
for key, value in FANCYPAGES_SETTINGS.iteritems():
if not hasattr(cls, key):
setattr(cls, key, value)
@property
def TEMPLATE_DIRS(self):
return [self.get_location('templates')]
@property
def MEDIA_ROOT(self):
return self.get_location('public/media')
@property
def STATIC_ROOT(self):
return self.get_location('public/static')
@property
def DATABASES(self):
return {'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': self.get_location('db.sqlite3')}}
@property
def REQUIRED_APPS(self):
apps = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin'] + fp.get_required_apps() + ['contact_us']
if django.VERSION[1] < 7:
apps.append('south')
return apps
@classmethod
def get_location(cls, *path):
""" Get absolute path for path relative to this file's directory. """
path = (cls.SANDBOX_MODULE,) + path
return os.path.join(
os.path.dirname(os.path.realpath(__file__)), '..', *path)
|
sjdv1982/seamless | docs/archive/spyder-like-silk/typeparse/macros/enum.py | Python | mit | 520 | 0.005769 | # Copyright 2016, Sjoerd de Vries
from ...exceptions import SilkSyntaxError
def macro_enum(name, content):
if name != "Enum":
return
c = content.strip()
| lparen = c.find("(")
if lparen == -1:
raise SilkSyntaxError("'%s': missing ( in Enum" % content)
rparen = c.rfind(")")
if rparen == -1:
raise SilkSyntaxError("'%s': missing ) in Enum" % | content)
enum = c[lparen+1:rparen]
return "String " + c[:lparen] + c[rparen+1:] \
+ "\nenum {\n " + enum + "\n}\n" \
|
stwb/pywinux | ls.py | Python | mit | 2,725 | 0.022018 | #!/usr/bin/env python3
import os, sys, argparse, stat, time, copy
# From StackOverflow :: http://stackoverflow.com/a/1094933
def sizeof_fmt(num, suffix='B'):
for unit in ['','Ki','Mi','Gi','Ti','Pi','Ei','Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
def cprint(string, colour=None, bold=False):
pass
def main():
# Configure the command-line argument parser and handle arguments
parser = argparse.ArgumentParser(
description='List directory contents'
)
parser.add_argument('target', action='store', nargs='*', default='.',
help='Target filename or directory'
)
parser.add_argument('-l', '--long', action='store_true',
help='Show long / detailed information'
)
parser.add_argument('-H', '--human-readable', action='store_true',
help='Show information in sensible units'
)
args = parser.parse_args()
# List the directory contents
for i, directory in enumerate( | args.target):
if not os.path.exists(directory):
print(os.path.basename(sys.argv[0]) + ': cannot access adsf: No such file or directory')
continue
output = list()
if len(args.target) > 1:
print(('\n' if i > 0 else '') + directory + ':')
for aFile in os.listdir(directory):
line = ls(directory, aFile, args)
if line:
| output.append(list(line))
# Format output in columns
if args.long:
lengths = list(map(lambda x: list(map(len, x)), output))
if len(lengths) == 0:
continue
col_sizes = list(map(lambda i: max(l[i] for l in lengths) + 2, range(len(lengths[0])-1)))
col_sizes[0] -= 2 # Don't need padding on first column
format_str = '%' + 's%'.join(map(str, col_sizes)) + 's %s'
for line in output:
print(format_str % tuple(line))
else:
for line in output:
print(' '.join(list(line)))
def ls(directory, filename, args):
line = list()
# Get file information
try:
st = os.stat(os.path.expanduser(os.path.join(directory, filename)))
except FileNotFoundError:
return None # Ignore it if we cannot open a file
except PermissionError:
return None # If we cannot access the file, we effectively cannot see it
if args.long:
line.append(stat.filemode(st.st_mode))
line.append(st.st_nlink)
line.append(st.st_uid)
line.append(st.st_gid)
if args.human_readable:
line.append(sizeof_fmt(st.st_size))
else:
line.append(st.st_size)
dt = time.ctime(st.st_mtime)
line.append(dt)
# Add the actual filename last and output
line.append(filename)
return map(str, line)
if __name__ == '__main__':
sys.exit(main())
|
dunkhong/grr | grr/server/grr_response_server/gui/selenium_tests/settings_view_test.py | Python | apache-2.0 | 2,790 | 0.005018 | #!/usr/bin/env python
""" | Tests for GRR settings-related views."""
from __future__ import absolute_ | import
from __future__ import division
from __future__ import unicode_literals
from absl import app
from grr_response_server.gui import gui_test_lib
from grr_response_server.gui.api_plugins import config_test as api_config_test
from grr.test_lib import test_lib
class TestSettingsView(gui_test_lib.GRRSeleniumTest):
"""Test the settings GUI."""
def testSettingsView(self):
with test_lib.ConfigOverrider({
"ACL.group_access_manager_class": "Foo bar.",
"AdminUI.bind": "127.0.0.1"
}):
self.Open("/#/config")
self.WaitUntil(self.IsTextPresent, "Configuration")
# Check that configuration values are displayed.
self.WaitUntil(self.IsTextPresent, "ACL.group_access_manager_class")
self.WaitUntil(self.IsTextPresent, "Foo bar.")
self.WaitUntil(self.IsTextPresent, "AdminUI.bind")
self.WaitUntil(self.IsTextPresent, "127.0.0.1")
class TestManageBinariesView(gui_test_lib.GRRSeleniumTest,
api_config_test.ApiGrrBinaryTestMixin):
"""Test the Binaries GUI."""
def setUp(self):
super(TestManageBinariesView, self).setUp()
self.SetUpBinaries()
def testNotAccessibleForNonAdmins(self):
self.Open("/")
self.WaitUntil(self.IsElementPresent,
"css=li[grr-nav-link]:contains('Binaries') i.fa-lock")
def testEachBinaryIsCorrectlyShown(self):
self.CreateAdminUser(u"gui_user")
self.Open("/#/manage-binaries")
self.WaitUntil(self.IsElementPresent,
"css=li[grr-nav-link]:contains('Binaries')")
self.WaitUntilNot(self.IsElementPresent,
"css=li[grr-nav-link]:contains('Binaries') i.fa-lock")
self.WaitUntil(
self.IsElementPresent, "css=grr-config-binaries-view "
"div.panel:contains('Python Hacks') tr:contains('test')")
self.WaitUntil(
self.IsElementPresent, "css=grr-config-binaries-view "
"div.panel:contains('Python Hacks') tr:contains('17B')")
self.WaitUntil(
self.IsElementPresent, "css=grr-config-binaries-view "
"div.panel:contains('Python Hacks') "
"tr:contains('1970-01-01 00:00:43 UTC')")
self.WaitUntil(
self.IsElementPresent, "css=grr-config-binaries-view "
"div.panel:contains('Executables') tr:contains('test.exe')")
self.WaitUntil(
self.IsElementPresent, "css=grr-config-binaries-view "
"div.panel:contains('Executables') tr:contains('18B')")
self.WaitUntil(
self.IsElementPresent, "css=grr-config-binaries-view "
"div.panel:contains('Executables') "
"tr:contains('1970-01-01 00:00:42 UTC')")
if __name__ == "__main__":
app.run(test_lib.main)
|
sunlightlabs/emailcongress | emailcongress/settings/production.py | Python | mit | 856 | 0 | from emailcongress.settings.shared import *
ALLOWED_HOSTS += CONFIG_DICT['django']['allowed_hosts']
# see http://developer.yahoo.com/performance/rules.html#expires
AWS_HEADERS = {
'Expires': 'Thu, 31 Dec 2099 20:00:00 GMT',
'Cache-Control': 'max-age=94608000',
}
AWS_ACCESS_KEY_ID = CONFIG_DICT['aws']['access_key_id']
AWS_SECRET_ACCESS_KEY = CONFIG_DICT['aws']['secret_access_key']
AWS_STORAGE_BUCKET_NAME = CONFIG_DICT['aws']['storage_bucket_name']
# see https://github.com/bot | o/boto/issues/2836
AWS_S3_CALLING_FORMAT = 'boto.s3.connection.OrdinaryCallingFormat'
AWS_S3_CUSTOM_DOMAIN = CONFIG_DICT['aws']['cloudfront_url']
STATICFILES_STORAGE = 'emailcongress.settings.MyS3BotoStorage'
SESSION_COOKIE_SEC | URE = True
CSRF_COOKIE_SECURE = True
PROTOCOL = CONFIG_DICT.get('protocol', 'https')
SECRET_KEY = CONFIG_DICT['django']['secret-key']
|
antoinecarme/sklearn2sql_heroku | tests/databases/test_client_pgsql.py | Python | bsd-3-clause | 740 | 0.016216 |
import pickle, json, requests, base64
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data
Y = iris.target
# print(iris.DESCR)
from sklearn.neural_network import ML | PClassifier
clf = MLPClassifier()
clf.fit(X, Y)
def test_ws_sql_gen(pickle_data):
WS_URL="https://sklearn2sql.herokuapp.com/model"
b64_data = base64.b64encode(pickle_data).decode('utf-8')
data={"Name":"model1", "PickleData":b64_data , "SQLDialect":"postgresql"}
r = requests.post(WS_URL, json=data)
content = r.json()
# print(content.keys())
# print(content)
lSQL = content["model"]["SQLGenrationResult"][0]["SQL"]
return lSQL;
|
pickle_data = pickle.dumps(clf)
lSQL = test_ws_sql_gen(pickle_data)
print(lSQL)
|
kumarvaradarajulu/prboard | prboard/tests/unit/test_hub.py | Python | gpl-3.0 | 2,446 | 0.004906 | import unittest
import mock
import github
from github import Requester
from prboard import utils, filters, settings, hub
class TestGithub(unittest.TestCase):
def setUp(self):
pass
def test_github_init(self):
""" Test if Github gets instantiated with addditional methods """
g = hub.Github()
self.assertTrue(hasattr(g, 'get_user_repos'))
self.assertTrue(hasattr(g, 'get_org_repos'))
@mock.patch.object(github.PaginatedList, "PaginatedList")
def test_github_get_user_repos_raises_assert_error(self, mock_paginated_list):
""" Test if Github.get_user_repos raises assertion error if since is not a valid value """
g = hub.Github()
self.assertRaises(AssertionError, g.get_user_repos, "kumar", "a")
@mock.patch.object(github.PaginatedList, "PaginatedList")
def test_github_get_user_repos_pass(self, mock_paginated_list):
""" Test if Github.get_user_repos raises assertion error if since is not a valid value """
args = [mock.MagicMock(), "", "", ""]
data = [github.Repository.Repository(*args), github.Repository.Repository(*args), github.Repository.Repository(*args)]
mock_paginated_list.return_value = data
g = hub.Github()
repos = g.get_user_repos("kumar")
# Cannot use assert_called_once_with as the requester object gets an instance
self.assertEqual(mock_paginated_list.call_args[0][0], github.Repository.Repository)
self.assertEqual(mock_paginated_list.call_args[0 | ][2], "/users/{0}/repos".format("kumar"))
self.assertEqual(repos, data)
@mock.patch.object(github.PaginatedList, "PaginatedList")
def test_github_get_org_repos_pass(self, mock_paginated_list):
""" Test if Github.get_org_repos raises assertion error if since is not a valid value """
args = [mock.MagicMock(), "", "", ""]
data = | [github.Repository.Repository(*args), github.Repository.Repository(*args), github.Repository.Repository(*args)]
mock_paginated_list.return_value = data
g = hub.Github()
repos = g.get_org_repos("kumar")
# Cannot use assert_called_once_with as the requester object gets an instance
self.assertEqual(mock_paginated_list.call_args[0][0], github.Repository.Repository)
self.assertEqual(mock_paginated_list.call_args[0][2], "orgs/{0}/repositories".format("kumar"))
self.assertEqual(repos, data)
|
atmark-techno/atmark-dist | user/python/Doc/tools/refcounts.py | Python | gpl-2.0 | 2,236 | 0 | """Support functions for loading the reference count data file."""
__version__ = '$Revision: 1.2 $'
import os
import string
import sys
# Determine the expected location of the reference count file:
try:
p = os.path.dirname(__file__)
except NameError:
p = sys.path[0]
p = os.path.normpath(os.path.join(os.getcwd(), p, os.pardir,
"api", "refcounts.dat"))
DEFAULT_PATH = p
del p
def load(path=DEFAULT_PATH):
return loadfile(open(path))
def loadfile(fp):
d = {}
while 1:
line = fp.readline()
if not line:
break
line = string.strip(line)
if line[:1] in ("", "#"):
# blank lines and comments
continue
parts = string.split(line, ":", 4)
function, type, arg, refcount, comment = parts
if refcount == "null":
refcount = None
elif refcount:
refcount = int(refcount)
else:
refcount = None
#
# Get the entry, creating it if needed:
#
try:
entry = d[function]
except KeyError:
entry = d[function] = Entry(function)
#
# Update the entry with the new parameter or the result information.
#
if arg:
entry.args.append((arg, type, refcount))
else:
entry.result_type = type
entry.result_refs = refcount
return d
class Entry:
def __init__(self, name):
self.name = name
self.args = []
self.result_type = ''
self.result_refs = None
def dump(d):
"""Dump the data in the 'canonical' format, with functions in
sorted order."""
items = d.items()
items.sort()
first = 1
for k, entry in items:
if first:
first = 0
else:
print
s = entry.name + ":%s:%s: | %s:"
if entry.result_refs is None:
r = ""
else:
r = entry.result_refs
prin | t s % (entry.result_type, "", r)
for t, n, r in entry.args:
if r is None:
r = ""
print s % (t, n, r)
def main():
d = load()
dump(d)
if __name__ == "__main__":
main()
|
mlewe/trueskill_kicker | trueskill_kicker/test_trueskill.py | Python | apache-2.0 | 1,198 | 0 | from trueskill import Rating, rate
import scraper
class test:
def __init__(self):
self.locker_room = {}
_, results = scraper.scrape_matches(history=True)
for result in reversed(results):
line = []
line.extend(result[0].split(' / '))
line.extend(result[1].split(' / '))
line.extend(result[2].split(':'))
self.process_match(line)
def process_match(self, line):
players = list(map(str, line[:4]))
score = list(map(int, line[4:]))
locker_room = self.locker_room
for player in players:
if player not in locker_room:
locker_room[player] = Rating()
t1 = [locker_room.pop(players[i]) for i in range(2)]
t2 = [locker_room.pop( | players[i]) for i in range(2, 4) | ]
ranks = [score[1] > score[0], score[1] < score[0]]
t1, t2 = rate([t1, t2], ranks=ranks)
for i in range(2):
locker_room[players[i]] = t1[i]
locker_room[players[i + 2]] = t2[i]
tr = test()
i = iter(tr.locker_room.items())
for score, player in reversed(sorted((v.mu - 3 * v.sigma, k) for k, v in i)):
print(player, score)
|
dochang/ansible-modules-core | cloud/amazon/ec2_asg.py | Python | gpl-3.0 | 34,382 | 0.005381 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
---
module: ec2_asg
short_description: Create or delete AWS Autoscaling Groups
description:
- Can create or delete AWS Autoscaling Groups
- Works with the ec2_lc module to manage Launch Configurations
version_added: "1.6"
author: "Gareth Rushgrove (@garethr)"
options:
state:
description:
- register or deregister the instance
required: true
choices: ['present', 'absent']
name:
description:
- Unique name for group to be created or deleted
required: true
load_balancers:
description:
- List of ELB names to use for the group
required: false
availability_zones:
description:
- List of availability zone names in which to create the group. Defaults to all the availability zones in the region if vpc_zone_identifier is not set.
required: false
launch_config_name:
description:
- Name of the Launch configuration to use for the group. See the ec2_lc module for managing these.
required: true
min_size:
description:
- Minimum number of instances in group, if unspecified then the current group value will be used.
required: false
max_size:
description:
- Maximum number of instances in group, if unspecified then the current group value will be used.
required: false
desired_capacity:
description:
- Desired number of instances in group, if unspecified then the current group value will be used.
required: false
replace_all_instances:
description:
- In a rolling fashion, replace all instances with an old launch configuration with one from the current launch configuration.
required: false
version_added: "1.8"
default: False
replace_batch_size:
description:
- Number of instances you'd like to replace at a time. Used with replace_all_instances.
required: false
version_added: "1.8"
default: 1
replace_instances:
description:
- List of instance_ids belonging to the named ASG that you would like to terminate and be replaced with instances matching the current launch configuration.
| required: false
version_added: "1.8"
default: None
lc_check:
description:
- Check to make sure instances that are being replaced with replace | _instances do not aready have the current launch_config.
required: false
version_added: "1.8"
default: True
vpc_zone_identifier:
description:
- List of VPC subnets to use
required: false
default: None
tags:
description:
- A list of tags to add to the Auto Scale Group. Optional key is 'propagate_at_launch', which defaults to true.
required: false
default: None
version_added: "1.7"
health_check_period:
description:
- Length of time in seconds after a new EC2 instance comes into service that Auto Scaling starts checking its health.
required: false
default: 500 seconds
version_added: "1.7"
health_check_type:
description:
- The service you want the health status from, Amazon EC2 or Elastic Load Balancer.
required: false
default: EC2
version_added: "1.7"
choices: ['EC2', 'ELB']
default_cooldown:
description:
- The number of seconds after a scaling activity completes before another can begin.
required: false
default: 300 seconds
version_added: "2.0"
wait_timeout:
description:
- how long before wait instances to become viable when replaced. Used in concjunction with instance_ids option.
default: 300
version_added: "1.8"
wait_for_instances:
description:
- Wait for the ASG instances to be in a ready state before exiting. If instances are behind an ELB, it will wait until the ELB determines all instances have a lifecycle_state of "InService" and a health_status of "Healthy".
version_added: "1.9"
default: yes
required: False
termination_policies:
description:
- An ordered list of criteria used for selecting instances to be removed from the Auto Scaling group when reducing capacity.
- For 'Default', when used to create a new autoscaling group, the "Default" value is used. When used to change an existent autoscaling group, the current termination policies are mantained
required: false
default: Default
choices: ['OldestInstance', 'NewestInstance', 'OldestLaunchConfiguration', 'ClosestToNextInstanceHour', 'Default']
version_added: "2.0"
extends_documentation_fragment:
- aws
- ec2
"""
EXAMPLES = '''
# Basic configuration
- ec2_asg:
name: special
load_balancers: [ 'lb1', 'lb2' ]
availability_zones: [ 'eu-west-1a', 'eu-west-1b' ]
launch_config_name: 'lc-1'
min_size: 1
max_size: 10
desired_capacity: 5
vpc_zone_identifier: [ 'subnet-abcd1234', 'subnet-1a2b3c4d' ]
tags:
- environment: production
propagate_at_launch: no
# Rolling ASG Updates
Below is an example of how to assign a new launch config to an ASG and terminate old instances.
All instances in "myasg" that do not have the launch configuration named "my_new_lc" will be terminated in
a rolling fashion with instances using the current launch configuration, "my_new_lc".
This could also be considered a rolling deploy of a pre-baked AMI.
If this is a newly created group, the instances will not be replaced since all instances
will have the current launch configuration.
- name: create launch config
ec2_lc:
name: my_new_lc
image_id: ami-lkajsf
key_name: mykey
region: us-east-1
security_groups: sg-23423
instance_type: m1.small
assign_public_ip: yes
- ec2_asg:
name: myasg
launch_config_name: my_new_lc
health_check_period: 60
health_check_type: ELB
replace_all_instances: yes
min_size: 5
max_size: 5
desired_capacity: 5
region: us-east-1
To only replace a couple of instances instead of all of them, supply a list
to "replace_instances":
- ec2_asg:
name: myasg
launch_config_name: my_new_lc
health_check_period: 60
health_check_type: ELB
replace_instances:
- i-b345231
- i-24c2931
min_size: 5
max_size: 5
desired_capacity: 5
region: us-east-1
'''
import time
import logging as log
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
log.getLogger('boto').setLevel(log.CRITICAL)
#log.basicConfig(filename='/tmp/ansible_ec2_asg.log',level=log.DEBUG, format='%(asctime)s: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
try:
import boto.ec2.autoscale
from boto.ec2.autoscale import AutoScaleConnection, AutoScalingGroup, Tag
from boto.exception import BotoServerError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
ASG_ATTRIBUTES = ('availability_zones', 'default_cooldown', 'desired_capacity',
'health_check_period', 'health_check_type', 'launch_config_name',
'load_balancers', 'max_size', 'min_size', 'name', 'placement_group',
'termination_policies', 'vpc_zone_identifier')
INSTANCE_ATTRIBUTES = ('instance_id', 'health_status', 'lifecycle_state', 'launch_config_name')
def enforce_required_arguments(module):
''' As many arguments are not required for autoscale group deletion
they cannot be mandatory arguments for the module, so we enforce
them here '''
missing_args = []
for arg in ('min_size', 'max_size', 'launch_config_name'):
if module.params[arg] is None:
missing_args.append(arg)
if missing_args:
module.fail_json(msg="Miss |
blaquee/androguard | androguard/core/bytecodes/dvm.py | Python | apache-2.0 | 256,233 | 0.016434 | # This file is part of Androguard.
#
# Copyright (C) 2012/2013/2014, Anthony Desnos <desnos at t0t0.fr>
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from androguard.core import bytecode
from androguard.core.bytecodes.apk import APK
from androguard.core.androconf import CONF, debug, warning, is_android_raw
from androguard.util import read
import sys
import re
import struct
from struct import pack, unpack, calcsize
DEX_FILE_MAGIC_35 = 'dex\n035\x00'
DEX_FILE_MAGIC_36 = 'dex\n036\x00'
ODEX_FILE_MAGIC_35 = 'dey\n035\x00'
ODEX_FILE_MAGIC_36 = 'dey\n036\x00'
TYPE_MAP_ITEM = {
0x0: "TYPE_HEADER_ITEM",
0x1: "TYPE_STRING_ID_ITEM",
0x2: "TYPE_TYPE_ID_ITEM",
0x3: "TYPE_PROTO_ID_ITEM",
0x4: "TYPE_FIELD_ID_ITEM",
0x5: "TYPE_METHOD_ID_ITEM",
0x6: "TYPE_CLASS_DEF_ITEM",
0x1000: "TYPE_MAP_LIST",
0x1001: "TYPE_TYPE_LIST",
0x1002: "TYPE_ANNOTATION_SET_REF_LIST",
0x1003: "TYPE_ANNOTATION_SET_ITEM",
0x2000: "TYPE_CLASS_DATA_ITEM",
0x2001: "TYPE_CODE_ITEM",
0x2002: "TYPE_STRING_DATA_ITEM",
0x2003: "TYPE_DEBUG_INFO_ITEM",
0x2004: "TYPE_ANNOTATION_ITEM",
0x2005: "TYPE_ENCODED_ARRAY_ITEM",
0x2006: "TYPE_ANNOTATIONS_DIRECTORY_ITEM",
}
ACCESS_FLAGS = [
(0x1, 'public'),
(0x2, 'private'),
(0x4, 'protected'),
(0x8, 'static'),
(0x10, 'final'),
(0x20, 'synchronized'),
(0x40, 'bridge'),
(0x80, 'varargs'),
(0x100, 'native'),
(0x200, 'interface'),
(0x400, 'abstract'),
(0x800, 'strictfp'),
(0x1000, 'synthetic'),
(0x4000, 'enum'),
(0x8000, 'unused'),
(0x10000, 'constructor'),
(0x20000, 'synchronized'),
]
TYPE_DESCRIPTOR = {
'V': 'void',
'Z': 'boolean',
'B': 'byte',
'S': 'short',
'C': 'char',
'I': 'int',
'J': 'long',
'F': 'float',
'D': 'double',
}
def get_access_flags_string(value):
"""
Transform an access flags to the corresponding string
:param value: the value of the access flags
:type value: int
:rtype: string
"""
buff = ""
for i in ACCESS_FLAGS:
if (i[0] & value) == i[0]:
buff += i[1] + " "
if buff != "":
return buff[:-1]
return buff
def get_type(atype, size=None):
"""
Retrieve the type of a descriptor (e.g : I)
"""
if atype.startswith('java.lang'):
atype = atype.replace('java.lang.', '')
res = TYPE_DESCRIPTOR.get(atype.lstrip('java.lang'))
if res is None:
if atype[0] == 'L':
res = atype[1:-1].replace('/', '.')
elif atype[0] == '[':
if size is None:
res = '%s[]' % get_type(atype[1:])
else:
res = '%s[%s]' % (get_type(atype[1:]), size)
else:
res = atype
return res
MATH_DVM_OPCODES = { "add." : '+',
"div." : '/',
"mul." : '*',
"or." : '|',
"sub." : '-',
"and." : '&',
"xor." : '^',
"shl." : "<<",
"shr." : ">>" | ,
}
FIELD_READ_DVM_OPCODES = [ ".get" ]
FIELD_WRITE_DVM_OPCODES = [ ".put" ]
BREAK_DVM_OPCODES = [ "invoke.", "move.", ".put", "if." ]
BRANCH_DVM_OPCODES = [ "throw", "throw.", "if.", "goto", "goto.", "return", "return.", "packed-switch$", "sparse-switch$" ]
def clean_name_instruction( instruction ):
op_value = instruction.get_op_value()
# goto range
if op_value >= 0x28 and op_value <= 0x2a:
return "goto"
return instruction.get_name()
def stat | ic_operand_instruction( instruction ):
buff = ""
if isinstance(instruction, Instruction):
# get instructions without registers
for val in instruction.get_literals():
buff += "%s" % val
op_value = instruction.get_op_value()
if op_value == 0x1a or op_value == 0x1b:
buff += instruction.get_string()
return buff
html_escape_table = {
"&": "&",
'"': """,
"'": "'",
">": ">",
"<": "<",
}
def readuleb128(buff):
result = ord( buff.read(1) )
if result > 0x7f:
cur = ord( buff.read(1) )
result = (result & 0x7f) | ((cur & 0x7f) << 7)
if cur > 0x7f:
cur = ord( buff.read(1) )
result |= (cur & 0x7f) << 14
if cur > 0x7f:
cur = ord( buff.read(1) )
result |= (cur & 0x7f) << 21
if cur > 0x7f:
cur = ord( buff.read(1) )
if cur > 0x0f:
warning("possible error while decoding number")
result |= cur << 28
return result
def readusleb128(buff):
result = ord( buff.read(1) )
if result > 0x7f:
cur = ord( buff.read(1) )
result = (result & 0x7f) | ((cur & 0x7f) << 7)
if cur > 0x7f:
cur = ord( buff.read(1) )
result |= (cur & 0x7f) << 14
if cur > 0x7f:
cur = ord( buff.read(1) )
result |= (cur & 0x7f) << 21
if cur > 0x7f:
cur = ord( buff.read(1) )
result |= cur << 28
return result
def readuleb128p1(buff):
return readuleb128( buff ) - 1
def readsleb128(buff):
result = 0
shift = 0
for x in range(0, 5):
cur = ord( buff.read(1) )
result |= (cur & 0x7f) << shift
shift += 7
if not cur & 0x80:
bit_left = max(32 - shift, 0)
result = result << bit_left
if result > 0x7fffffff:
result = (0x7fffffff & result) - 0x80000000
result = result >> bit_left
break
return result
def get_sbyte(buff):
return unpack( '=b', buff.read(1) )[0]
def writeuleb128(value):
remaining = value >> 7
buff = ""
while remaining > 0:
buff += pack( "=B", ((value & 0x7f) | 0x80) )
value = remaining
remaining >>= 7
buff += pack( "=B", value & 0x7f )
return buff
def writesleb128(value):
remaining = value >> 7
hasMore = True
end = 0
buff = ""
if (value & (-sys.maxint - 1)) == 0:
end = 0
else:
end = -1
while hasMore:
hasMore = (remaining != end) or ((remaining & 1) != ((value >> 6) & 1))
tmp = 0
if hasMore:
tmp = 0x80
buff += pack( "=B", (value & 0x7f) | (tmp) )
value = remaining
remaining >>= 7
return buff
def determineNext(i, end, m):
op_value = i.get_op_value()
# throw + return*
if (op_value == 0x27) or (0x0e <= op_value <= 0x11):
return [ -1 ]
# goto
elif 0x28 <= op_value <= 0x2a:
off = i.get_ref_off() * 2
return [ off + end ]
# if
elif 0x32 <= op_value <= 0x3d:
off = i.get_ref_off() * 2
return [ end + i.get_length(), off + (end) ]
# sparse/packed
elif op_value in (0x2b, 0x2c):
x = []
x.append( end + i.get_length() )
code = m.get_code().get_bc()
off = i.get_ref_off() * 2
data = code.get_ins_off( off + end )
if data != None:
for target in data.get_targ |
protonyx/labtronyx-gui | labtronyxgui/application/include/ManagerPages.py | Python | mit | 2,751 | 0.011996 | import Tkinter as Tk
import tkMessageBox
class a_ConnectToHost(Tk.Toplevel):
def __init__(self, master, cb_func):
Tk.Toplevel.__init__(self, master)
# Store reference to parent window callback function
self.cb_func = cb_func
self.wm_title('Connect to host...')
Tk.Label(self, text='Connect to remote host').grid(row=0, column=0, columnspan=2)
Tk.Label(self, text='Address or Hostname').grid(row=1, column=0)
self.txt_address = Tk.Entry(self)
self.txt_address.grid(row=1, column=1)
Tk.Label(self, text='Port').grid(row=2, column=0)
self.txt_port = Tk.Entry(self)
self.txt_port.grid(row=2, column=1)
Tk.Button(self, text='Cancel', command=lambda: self.cb_Cancel()).grid(row=3, column=0)
Tk.Button(self, text='Connect', command=lambda: self.cb_Add()).grid(row=3, column=1)
# Make this dialog modal
self.focus_set()
self.grab_set()
def cb_Add(self):
address = self.txt_address.get()
port = self.txt_port.get()
self.cb_func(address, port)
# Close this window
self.destroy()
def cb_Cancel(self):
self.destroy()
class a_AddResource(Tk.Toplevel):
def __init__(self, master, mainWindow, controllers, cb_func):
Tk.Toplevel.__init__(self, master)
# Store reference to parent window callback function
self.mainWindow = mainWindow
self.cb_func = cb_func
self.controller = Tk.StringVar(self)
self.controller.set(controllers[0])
self.wm_title('Add Resource')
Tk.Label(self, text='Controller').grid(row=0, column=0 | )
Tk.Label(self, text='Resource ID').grid(row=1, column=0)
self.lst_controller = Tk.OptionMenu(self, self.controller, *controllers)
self.lst_controller.grid(row=0, column=1, columnspan=2)
self.txt_resID = Tk.Entry(self)
self.txt_resID.grid(row=1, column=1, columnspan=2)
Tk.Button(sel | f, text='Cancel', command=lambda: self.cb_Cancel()).grid(row=2, column=1)
Tk.Button(self, text='Connect', command=lambda: self.cb_Add()).grid(row=2, column=2)
# Make this dialog modal
#self.focus_set()
#self.grab_set()
def cb_Add(self):
controller = self.controller.get()
resID = self.txt_resID.get()
self.cb_func(controller, resID)
# Refresh treeview
self.mainWindow.rebuildTreeview()
# Close this window
self.destroy()
def cb_Cancel(self):
self.destroy()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.