repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
kevenli/scrapydd
|
scrapydd/migrates/versions/018_nodes_is_deleted.py
|
Python
|
apache-2.0
| 458
| 0.002183
|
from sqlalchemy im
|
port *
from migrate import *
meta = MetaData()
def upgrade(migrate_engine):
meta.bind = migrate_engine
nodes_table = Table('nodes', meta, autoload=True)
nodes_table_is_deleted = Column('is_deleted', Integer, default=False)
nodes_table_is_deleted.cr
|
eate(nodes_table)
def downgrade(migrate_engine):
meta.bind = migrate_engine
nodes_table = Table('nodes', meta, autoload=True)
nodes_table.c['is_deleted'].drop()
|
nkgilley/home-assistant
|
homeassistant/components/bluesound/const.py
|
Python
|
apache-2.0
| 240
| 0.004167
|
"""Constants for t
|
he Bluesound HiFi wireless speakers and audio integrations component."""
DOMAIN = "bluesound"
SERVICE_CLEAR_TIMER =
|
"clear_sleep_timer"
SERVICE_JOIN = "join"
SERVICE_SET_TIMER = "set_sleep_timer"
SERVICE_UNJOIN = "unjoin"
|
crazy-canux/xomnibus
|
omnibus/base.py
|
Python
|
apache-2.0
| 6,156
| 0.000162
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
######################################################################
# Copyright (C) 2015 Canux CHENG #
# All rights reserved #
# Name: base.py
# Author: Canux canuxcheng@gmail.com #
# Version: V1.0 #
# Time: Tue 11 Aug 2015 04:28:53 AM EDT
######################################################################
# Description:
######################################################################
import logging
import argparse
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import omnibus
class OmnibusAuto(object):
"""This is the base class for this project."""
# Initialize a new class.
def __init__(self, name=None, version='', description=''):
# Init the basic information.
self.name = os.path.basename(sys.argv[0]) if not name else name
self.version = omnibus.__version__
self.description = omnibus.__description__
self.username = os.getenv("USER")
self.path = "/home/%s/GIT/omnibus/db" % self.username
# Init the logger.
logging.basicConfig(format='[%(levelname)s] (%(module)s) %(message)s')
self.logger = logging.getLogger("omnibus")
self.logger.setLevel(logging.INFO)
# Init the arguments.
self.__define_options()
self.define_sub_options()
self.__parse_options()
if self.args.debug:
self.logger.setLevel(logging.DEBUG)
self.logger.debug("==== BEGIN DEBUG ====")
self.logger.debug("name: %s", self.name)
self.logger.debug("version: %s", self.version)
self.logger.debug("description: %s", self.description)
# Get the path.
self.cur = os.getcwd()
self.conf = "/".join(self.cur.split("/")[:-1]) + "/etc"
self.user = self.path + "/users"
self.logger.debug("self.path = {}".format(self.path))
self.logger.debug("self.cur = {}".format(self.cur))
self.logger.debug("self.conf = {}".format(self.conf))
if self.__class__.__name__ == "Omnibus":
self.logger.debug("==== END DEBUG ====")
def __define_options(self):
"""Define parser instant and basic options."""
# Define parser instant.
self.parser = argparse.ArgumentParser(description=self.description)
# Define the basic options.
self.parser.add_argument("-V", "--version",
action="version",
version="%s %s" % (self.name, self.version),
help="Show version.")
self.parser.add_argument("-D", "--debug",
action="store_true",
required=False,
help="Show debug information.",
dest="debug")
# Define alternative options for main program.
self.group_parser = self.parser.add_argument_group('Alternative',
|
'If not define, use\
default value.')
self.group_parser.add_argument("-F", "--force",
action="store_true",
|
required=False,
help="If file exist, rewrite it.",
dest="force")
self.group_parser.add_argument("-P", "--path",
default="%s" % self.path,
required=False,
help="Default is %(default)s",
dest="path")
def define_sub_options(self):
# Define sub parser for different actions.
self.deploy_parser = self.parser.add_argument_group('Deploy options',
'Options for\
deploy to remote\
server.')
self.subparsers = self.parser.add_subparsers(title='Action options',
description='Make choice',
help='Choose your action')
def __parse_options(self):
try:
self.args = self.parser.parse_args()
except Exception as e:
self.error("__parse_options : %s" % e)
def input(self, tips):
"""Used for choice your options."""
positive = ["Y", "y", "yes", "YES"]
negtive = ["N", "n", "no", "NO"]
choice = raw_input(tips)
if choice in positive:
return 0
elif choice in negtive:
return 1
else:
self.error("input: please use regular char.")
def error(self, msg):
"""When error print some message and exit the program."""
raise OmnibusError(msg)
def not_exist(self, msg):
"""When remove file and it's not exist take a warning."""
comment = "--------------------------------------"
print "%s%s" % (comment, comment)
print "%s not exist." % msg
print "%s%s" % (comment, comment)
def already_exist(self, msg):
"""When create file and it's exist take a warning."""
comment = "++++++++++++++++++++++++++++++++++++++"
print "%s%s" % (comment, comment)
print "%s already exist." % msg
print "%s%s" % (comment, comment)
def delete_blank_line(self, src, des):
"""Delete the blank line in a file."""
inf = open(src, "r")
out = open(des, "w")
lines = inf.readlines()
for line in lines:
if line.split():
out.writelines(line)
inf.close()
out.close()
class OmnibusError(Exception):
def __init__(self, msg):
print "Error - %s" % msg
raise SystemExit(-1)
|
google/gae-secure-scaffold-python3
|
src/securescaffold/factory.py
|
Python
|
apache-2.0
| 3,574
| 0
|
# Copyright 2020 Google LLC
#
# Li
|
censed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# U
|
nless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import secrets
from typing import Optional
import flask
import flask_seasurf
import flask_talisman
from google.cloud import ndb
class AppConfig(ndb.Model):
"""Datastore model for storing app-wide configuration.
This is used by `create_app` to save a random value for SECRET_KEY that
persists across application startup, rather than defining SECRET_KEY in
your source code.
"""
SINGLETON_ID = "config"
secret_key = ndb.StringProperty()
@classmethod
def singleton(cls) -> "AppConfig":
"""Create a datastore entity to store app-wide configuration."""
config = cls.initial_config()
obj = cls.get_or_insert(cls.SINGLETON_ID, **config)
return obj
@classmethod
def initial_config(cls) -> dict:
"""Initial values for app configuration."""
config = {
"secret_key": secrets.token_urlsafe(16),
}
return config
def create_app(*args, **kwargs) -> flask.Flask:
"""Create a Flask app with secure default behaviours.
:return: A Flask application.
:rtype: Flask
"""
app = flask.Flask(*args, **kwargs)
configure_app(app)
# Both these extensions can be used as view decorators. Bit worried that
# this circular reference will cause memory leaks.
talisman_kwargs = get_talisman_config(app.config)
app.talisman = flask_talisman.Talisman(app, **talisman_kwargs)
app.csrf = flask_seasurf.SeaSurf(app)
return app
def configure_app(app: flask.Flask) -> None:
"""Read configuration and create a SECRET_KEY.
The configuration is read from "securescaffold.settings", and from the
filename in the "FLASK_SETTINGS_FILENAME" environment variable (if
it exists).
If there is no SECRET_KEY setting, then a random string is generated,
saved in the datastore, and set.
:param Flask app: The Flask app that requires configuring.
:return: None
"""
app.config.from_object("securescaffold.settings")
app.config.from_envvar("FLASK_SETTINGS_FILENAME", silent=True)
if not app.config["SECRET_KEY"]:
config = get_config_from_datastore()
app.config["SECRET_KEY"] = config.secret_key
def get_config_from_datastore() -> AppConfig:
# This happens at application startup, so we use a new NDB context.
client = ndb.Client()
with client.context():
obj = AppConfig.singleton()
return obj
def get_talisman_config(config: dict) -> dict:
"""Get a dict of keyword arguments to configure flask-talisman."""
# Talisman doesn't read settings from the Flask app config.
names = {
"CSP_POLICY": "content_security_policy",
"CSP_POLICY_NONCE_IN": "content_security_policy_nonce_in",
"CSP_POLICY_REPORT_ONLY": "content_security_policy_report_only",
"CSP_POLICY_REPORT_URI": "content_security_policy_report_uri",
}
result = {kwarg: config[setting] for setting, kwarg in names.items()}
return result
|
chebhou/Text-from-to-.XML
|
text_io_xml.py
|
Python
|
gpl-2.0
| 3,503
| 0.015701
|
bl_info = {
"name" : "text objects to-from xml",
"author" : "chebhou",
"version" : (1, 0),
"blender" : (2, 7, 3),
"location" : "file->export->text to-from xml",
"discription" : "copys an text objectx from-to xml file",
"wiki_url" : " https://github.com/chebhou",
"tracker_url" : "https://github.com/chebhou",
"category" : "Import-Export"
}
import bpy
from bpy.types import Operator
from bpy_extras.io_utils import ExportHelper
from bpy.props import EnumProperty, BoolProperty
from xml.dom import minidom
from xml.dom.minidom import Document
def txt_sync(filepath):
dom = minidom.parse(filepath)
scenes =dom.getElementsByTagName('scene')
for scene in scenes:
scene_name=scene.getAttribute('name')
print("\n",scene_name)
bl_scene = bpy.data.scenes[scene_name]
txt_objs =scene.getElementsByTagName('object')
for obj in txt_objs:
obj_name = obj.getAttribute('name')
obj_body = obj.childNodes[0].nodeValue
bl_obj = bl_scene.objects[obj_name].data.body = obj_body
print(obj_name," ",obj_body)
def txt_export(filepath):
doc = Document()
root = doc.createElement('data')
doc.appendChild(root)
for sce in bpy.data.scenes :
#create a scene
scene = doc.createElement('scene')
scene.setAttribute('name', sce.name)
root.appendChild(scene)
for obj in sce.objects :
if obj.type == 'FONT':
#add object element
object = doc.createElement('object')
object.setAttribute('name', obj.name)
txt_node = doc.createTe
|
xtNode(obj.data.body)
object.appendChild(txt_node)
scene.appendChild(object)
#write to a file
file
|
_handle = open(filepath,"wb")
file_handle.write(bytes(doc.toprettyxml(indent='\t'), 'UTF-8'))
file_handle.close()
class text_export(Operator, ExportHelper):
"""write and read text objects to a file"""
bl_idname = "export_scene.text_xml"
bl_label = "text from-to xml"
bl_options = {'REGISTER', 'UNDO'} #should remove undo ?
# ExportHelper mixin class uses this
filename_ext = ".xml"
#parameters and variables
convert = EnumProperty(
name="Convert",
description="Choose conversion",
items=(('W', "write objects", "write text objects to xml"),
('R', "read objects", "read text objects from xml")),
default='W',
)
#main function
def execute(self, context):
bpy.ops.object.mode_set(mode = 'OBJECT')
if self.convert == 'W':
txt_export(self.filepath)
else:
txt_sync(self.filepath)
bpy.context.scene.update()
self.report({'INFO'},"Conversion is Done")
return {'FINISHED'}
def menu_func_export(self, context):
self.layout.operator(text_export.bl_idname, text="Text to-from xml")
def register():
bpy.utils.register_class(text_export)
bpy.types.INFO_MT_file_export.append(menu_func_export)
bpy.types.INFO_MT_file_import.append(menu_func_export)
def unregister():
bpy.utils.unregister_class(text_export)
bpy.types.INFO_MT_file_export.remove(menu_func_export)
bpy.types.INFO_MT_file_import.append(menu_func_export)
if __name__ == "__main__":
register()
|
eli261/jumpserver
|
apps/settings/serializers.py
|
Python
|
gpl-2.0
| 1,295
| 0.003089
|
from rest_framework import serializers
class MailTestSerializer(serializers.Serializer):
EMAIL_HOST = serializers.CharField(max_length=1024, required=True)
EMAIL
|
_PORT = serializers.IntegerField(default=25)
EMAIL_HOST_USER = serializers.Ch
|
arField(max_length=1024)
EMAIL_HOST_PASSWORD = serializers.CharField(required=False, allow_blank=True)
EMAIL_FROM = serializers.CharField(required=False, allow_blank=True)
EMAIL_USE_SSL = serializers.BooleanField(default=False)
EMAIL_USE_TLS = serializers.BooleanField(default=False)
class LDAPTestSerializer(serializers.Serializer):
AUTH_LDAP_SERVER_URI = serializers.CharField(max_length=1024)
AUTH_LDAP_BIND_DN = serializers.CharField(max_length=1024, required=False, allow_blank=True)
AUTH_LDAP_BIND_PASSWORD = serializers.CharField(required=False, allow_blank=True)
AUTH_LDAP_SEARCH_OU = serializers.CharField()
AUTH_LDAP_SEARCH_FILTER = serializers.CharField()
AUTH_LDAP_USER_ATTR_MAP = serializers.CharField()
AUTH_LDAP_START_TLS = serializers.BooleanField(required=False)
class LDAPUserSerializer(serializers.Serializer):
id = serializers.CharField()
username = serializers.CharField()
email = serializers.CharField()
existing = serializers.BooleanField(read_only=True)
|
sunqm/pyscf
|
pyscf/mcscf/test/test_n2_df.py
|
Python
|
apache-2.0
| 10,791
| 0.00834
|
#!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy
import scipy.linalg
from pyscf import gto
from pyscf import scf
from pyscf import df
from pyscf import ao2mo
from pyscf import mcscf
b = 1.4
mol = gto.M(
verbose = 5,
output = '/dev/null',
atom = [
['N',( 0.000000, 0.000000, -b/2)],
['N',( 0.000000, 0.000000, b/2)], ],
basis = {'N': 'ccpvdz', },
max_memory = 1,
)
m = scf.RHF(mol)
m.conv_tol = 1e-9
m.scf()
molsym = gto.M(
verbose = 5,
output = '/dev/null',
atom = [
['N',( 0.000000, 0.000000, -b/2)],
['N',( 0.000000, 0.000000, b/2)], ],
basis = {'N': 'ccpvdz', },
max_memory = 1,
symmetry = True,
)
msym = scf.RHF(molsym)
msym.conv_tol = 1e-9
msym.scf()
def tearDownModule():
global mol, molsym, m, msym
mol.stdout.close()
molsym.stdout.close()
del mol, molsym, m, msym
class KnownValues(unittest.TestCase):
def test_mc1step_4o4e(self):
mc = mcscf.approx_hessian(mcscf.CASSCF(m, 4, 4), auxbasis='weigend')
emc = mc.mc1step()[0]
self.assertAlmostEqual(emc, -108.913786407955, 7)
self.assertAlmostEqual(numpy.linalg.norm(mc.analyze()),
2.7015375913946591, 4)
def test_mc2step_4o4e(self):
mc = mcscf.approx_hessian(mcscf.CASSCF(m, 4, 4), auxbasis='weigend')
emc = mc.mc2step()[0]
self.assertAlmostEqual(emc, -108.913786407955, 7)
self.assertAlmostEqual(numpy.linalg.norm(mc.analyze()),
2.7015375913946591, 4)
def test_mc1step_4o4e_df(self):
mc = mcscf.DFCASSCF(m, 4, 4, auxbasis='weigend')
emc = mc.mc1step()[0]
self.assertAlmostEqual(emc, -108.9105231091045, 7)
def test_mc2step_4o4e_df(self):
mc = mcscf.density_fit(mcscf.CASSCF(m, 4, 4), auxbasis='weigend')
emc = mc.mc2step()[0]
self.assertAlmostEqual(emc, -108.91052310869014, 7)
def test_mc1step_6o6e_high_cost(self):
mc = mcscf.approx_hessian(mcscf.CASSCF(m, 6, 6), auxbasis='weigend')
emc = mc.mc1step()[0]
self.assertAlmostEqual(emc, -108.980105451388, 7)
def test_mc2step_6o6e_high_cost(self):
mc = mcscf.approx_hessian(mcscf.CASSCF(m, 6, 6), auxbasis='weigend')
emc = mc.mc2step()[0]
self.assertAlmostEqual(emc, -108.980105451388, 7)
def test_mc1step_symm_4o4e(self):
mc = mcscf.approx_hessian(mcscf.CASSCF(msym, 4, 4), auxbasis='weigend')
emc = mc.mc1step()[0]
self.assertAlmostEqual(emc, -108.913786407955, 7)
self.assertAlmostEqual(numpy.linalg.norm(mc.analyze()),
2.7015375913946591, 4)
def test_mc2step_symm_4o4e(self):
mc = mcscf.approx_hessian(mcscf.CASSCF(msym, 4, 4), auxbasis='weigend')
emc = mc.mc2step()[0]
self.assertAlmostEqual(emc, -108.913786407955, 7)
self.assertAlmostEqual(numpy.linalg.norm(mc.analyze()),
2.7015375913946591, 4)
def test_mc1step_symm_6o6e(self):
mc = mcscf.approx_hessian(mcscf.CASSCF(msym, 6, 6), auxbasis='weigend')
emc = mc.mc1step()[0]
self.assertAlmostEqual(emc, -108.980105451388, 7)
def test_mc2step_symm_6o6e(self):
mc = mcscf.approx_hessian(mcscf.CASSCF(msym, 6, 6), auxbasis='weigend')
emc = mc.mc2step()[0]
self.assertAlmostEqual(emc, -108.980105451388, 7)
def test_casci_4o4e(self):
mc = mcscf.CASCI(m.density_fit('weigend'), 4, 4)
emc = mc.casci()[0]
self.assertAlmostEqual(emc, -108.88669369639578, 7)
self.assertAlmostEqual(numpy.linalg.norm(mc.analyze()),
2.6910276344981119, 4)
def test_casci_symm_4o4e(self):
mc = mcscf.CASCI(msym.density_fit('weigend'), 4, 4)
emc = mc.casci()[0]
self.assertAlmostEqual(emc, -108.88669369639578, 7)
self.assertAlmostEqual(numpy.linalg.norm(mc.analyze()),
2.6910276344981119, 4)
def test_casci_4o4e_1(self):
mc = mcscf.DFCASCI(m.density_fit('weigend'), 4, 4)
emc = mc.casci()[0]
self.assertAlmostEqual(emc, -108.88669369639578, 7)
def test_casci_symm_4o4e_1(self):
mc = mcscf.DFCASCI(msym.density_fit('weigend'), 4, 4)
emc = mc.casci()[0]
self.assertAlmostEqual(emc, -108.88669369639578, 7)
def test_casci_from_uhf(self):
mf = scf.UHF(mol).run()
mc = mcscf.CASCI(mf.density_fit('weigend'), 4, 4)
emc = mc.casci()[0]
self.assertAlmostEqual(emc, -108.88669369639578, 6)
self.assertAlmostEqual(numpy.linalg.norm(mc.analyze()),
2.6910275883606078, 4)
def test_casci_from_uhf1(self):
mf = scf.UHF(mol)
mf.scf()
mc = mcscf.approx_hessian(mcscf.CASSCF(mf, 4, 4))
emc = mc.mc1step()[0]
self.assertAlmostEqual(emc, -108.913786407955, 7)
emc = mc.mc2step()[0]
self.assertAlmostEqual(emc, -108.913786407955, 7)
def test_df_ao2mo(self):
mf = scf.density_fit(msym, auxbasis='weigend')
mf.max_memory = 100
mf.kernel()
mc = mcscf.DFCASSCF(mf, 4, 4)
with df.load(mf._cderi) as feri:
cderi = numpy.asarray(feri)
eri0 = numpy.dot(cderi.T, cderi)
nmo = mc.mo_coeff.shape[1]
|
ncore = mc.ncore
nocc = ncore + mc.ncas
eri0 = ao2mo.restore(1, ao2mo.kernel(eri0, mc.mo_coeff), nmo)
eris = mc.ao2mo(mc.mo_coeff)
self.assertTrue(numpy.a
|
llclose(eri0[:,:,ncore:nocc,ncore:nocc], eris.ppaa))
self.assertTrue(numpy.allclose(eri0[:,ncore:nocc,:,ncore:nocc], eris.papa))
def test_assign_cderi(self):
nao = molsym.nao_nr()
w, u = scipy.linalg.eigh(mol.intor('int2e_sph', aosym='s4'))
idx = w > 1e-9
mf = scf.density_fit(scf.RHF(molsym))
mf._cderi = (u[:,idx] * numpy.sqrt(w[idx])).T.copy()
mf.kernel()
mc = mcscf.DFCASSCF(mf, 6, 6)
mc.kernel()
self.assertAlmostEqual(mc.e_tot, -108.98010545803884, 7)
def test_init(self):
from pyscf.mcscf import df
mf = scf.RHF(mol)
self.assertTrue(isinstance(mcscf.CASCI(mf, 2, 2), mcscf.casci.CASCI))
self.assertTrue(isinstance(mcscf.CASCI(mf.density_fit(), 2, 2), df._DFCASSCF))
self.assertTrue(isinstance(mcscf.CASCI(mf.newton(), 2, 2), mcscf.casci.CASCI))
self.assertTrue(isinstance(mcscf.CASCI(mf.density_fit().newton(), 2, 2), df._DFCASSCF))
self.assertTrue(isinstance(mcscf.CASCI(mf.newton().density_fit(), 2, 2), mcscf.casci.CASCI))
self.assertTrue(isinstance(mcscf.CASCI(mf.density_fit().newton().density_fit(), 2, 2), df._DFCASSCF))
self.assertTrue(isinstance(mcscf.CASSCF(mf, 2, 2), mcscf.mc1step.CASSCF))
self.assertTrue(isinstance(mcscf.CASSCF(mf.density_fit(), 2, 2), df._DFCASSCF))
self.assertTrue(isinstance(mcscf.CASSCF(mf.newton(), 2, 2), mcscf.mc1step.CASSCF))
self.assertTrue(isinstance(mcscf.CASSCF(mf.density_fit().newton(), 2, 2), df._DFCASSCF))
self.assertTrue(isinstance(mcscf.CASSCF(mf.newton().density_fit(), 2, 2), mcscf.mc1step.CASSCF))
self.assertTrue(isinstance(mcscf.CASSCF(mf.density_fit().newton().density_fit(), 2, 2), df._DFCASSCF))
self.assertTrue(isinstance(mcscf.DFCASCI(mf, 2, 2), df._DFCASSCF))
self.assertTrue(isinstance(mcscf.DFCASCI(mf.density_fit(), 2, 2), df._DFCASSCF))
self.assertTrue(isinstance(mcscf.DFCASCI(mf.newton(), 2, 2), df._DFCASSCF))
self.assertTrue(isinstance(mcscf.DFCASCI(mf.density
|
michaelbrooks/django-stream-analysis
|
stream_analysis/management/commands/cleanup_streams.py
|
Python
|
mit
| 300
| 0.003333
|
from django.core.management.base import BaseCommand
from stream_analysis.utils
|
import cleanup
class Command(BaseCommand):
"""
Removes streaming data we no longer need.
"""
help = "Removes streaming data we no longer need."
def ha
|
ndle(self, *args, **options):
cleanup()
|
dana-i2cat/felix
|
vt_manager/src/python/vt_manager/tests/testcreatesync.py
|
Python
|
apache-2.0
| 407
| 0.012285
|
from vt_manager.mod
|
els import *
import os
import xmlrpc
|
lib
am = xmlrpclib.Server("https://expedient:expedient@192.168.254.193:8445/xmlrpc/plugin")
#xml = open("/opt/ofelia/vt_manager/src/python/vt_manager/tests/xmltest.xml", "r").read()
xml = open(os.path.join(os.path.dirname(__file__), "xmltest.xml"), "r").read()
am.send_sync("https://expedient:expedient@llull.ctx.i2cat.net:38445/xmlrpc/plugin/",xml)
|
Ziqi-Li/bknqgis
|
pandas/doc/plots/stats/moments_rolling_binary.py
|
Python
|
gpl-2.0
| 617
| 0
|
from moment_plots import *
n
|
p.random.seed(1)
ts = test_series()
s = ts.cumsum()
ts2 = test_series()
s2 = ts2.cumsum()
s[20:50] = np.NaN
s[120:150] = np.NaN
fig, axes = plt.subplots(3, 1, figsize=(8, 10), sharex=True)
ax0, ax1, ax2 = axes
ax0.plot(s.index, s.values)
ax0.plot(s2.index, s2.values)
ax0.set_title('time series')
ax1.plot(s.index, m.rolling_corr(s, s2, 50, min_periods=1).values)
ax1.set_title('rolling_corr')
ax2.plot(s.index, m.rolling_cov(s, s2, 50, min_periods=1).values)
ax2.set_title('rolling_cov')
fig.autofmt_xda
|
te()
fig.subplots_adjust(bottom=0.10, top=0.95)
plt.show()
plt.close('all')
|
jesuscript/topo-mpi
|
topo/projection/basic.py
|
Python
|
bsd-3-clause
| 15,525
| 0.012367
|
"""
Repository for the basic Projection types, without any special
dependencies or requirements.
$Id$
"""
__version__ = "$Revision$"
from copy import copy
from numpy import exp,ones,zeros,array,nonzero
import param
# So all Projections are present in this package
from topo.base.projection import Projection
from topo.base.boundingregion import BoundingBox
from topo.base.sheet import activity_type
from topo.base.sheetcoords import Slice
from topo.base.cf import CFProjection,MPI_CFProjection, ConnectionField,MaskedCFIter, CFPLearningFn,CFPLF_Identity,CFPOutputFn,CFIter,ResizableCFProjection
from topo.base.patterngenerator import PatternGenerator,Constant
from topo.base.functionfamily import CoordinateMapperFn,IdentityMF
from topo.misc.util import rowcol2idx
from topo.transferfn.basic import TransferFn,IdentityTF
from topo.learningfn.basic import LearningFn,IdentityLF
from topo.base import patterngenerator
class CFPOF_SharedWeight(CFPOutputFn):
"""
CFPOutputFn for use with SharedWeightCFProjections.
Applies the single_cf_fn to the single shared CF's weights.
"""
single_cf_fn = param.ClassSelector(TransferFn,default=IdentityTF())
# CEBALERT: remove norm_values?
def __call__(self, cfs, norm_values=None, **params):
"""Apply the specified single_cf_fn to every CF."""
if type(self.single_cf_fn) is not IdentityTF:
cf = cfs[0,0]
self.single_cf_fn(cf.weights)
from topo.base.cf import _create_mask
class SharedWeightCF(ConnectionField):
__slots__ = []
def __init__(self,cf,input_sheet,x=0.0,y=0.0,template=BoundingBox(radius=0.1),
mask=patterngenerator.Constant(),
min_matrix_radius=1):
"""
From an existing copy of ConnectionField (CF) that acts as a
template, create a new CF that shares weights with the
template CF. Copies all the properties of CF to stay
identical except the weights variable that actually contains
the data.
The only difference from a normal CF is that the weights of
the CF are implemented as a numpy view into the single master
copy of the weights stored in the CF template.
"""
# CEBALERT: There's no call to super's __init__; see JAHACKALERT
# below.
template = copy(template)
if not isinstance(template,Slice):
template = Slice(template,input_sheet,force_odd=True,
min_matrix_radius=min_matrix_radius)
# Note: if passed in, mask is shared between CFs (but not if created here)
if not hasattr(mask,'view'):
mask = _create_mask(patterngenerator.Constant(),
template.compute_bounds(input_sheet),
input_sheet,True,0.5)
self._has_norm_total=False
self.mask=mask
weights_slice = self._create_input_sheet_slice(input_sheet,x,y,template,min_matrix_radius=min_matrix_radius)
self.weights = weights_slice.submatrix(cf.weights)
# JAHACKALERT the TransferFn cannot be applied in SharedWeightCF
# - another inconsistency in the class tree design - there
# should be nothing in the parent class that is ignored in its
# children. Probably need to extract some functionality of
# Con
|
nectionField into a shared abstract parent class.
# We have agreed to make this right by adding a constant property that
# will be set true if the learning should be active
|
# The SharedWeightCFProjection class and its anccestors will
# have this property set to false which means that the
# learning will be deactivated
class SharedWeightCFProjection(CFProjection):
"""
A Projection with a single set of weights, shared by all units.
Otherwise similar to CFProjection, except that learning is
currently disabled.
"""
### JABHACKALERT: Set to be constant as a clue that learning won't
### actually work yet, but we could certainly extend it to support
### learning if desired, e.g. to learn position-independent responses.
learning_fn = param.ClassSelector(CFPLearningFn,CFPLF_Identity(),constant=True)
weights_output_fns = param.HookList(default=[CFPOF_SharedWeight()])
precedence = param.Number(default=0.5)
def __init__(self,**params):
"""
Initialize the Projection with a single cf_type object
(typically a ConnectionField),
"""
# We don't want the whole set of cfs initialized, but we
# do want anything that CFProjection defines.
super(SharedWeightCFProjection,self).__init__(initialize_cfs=False,**params)
# We want the sharedcf to be located on the grid, so use the
# center of a unit
sheet_rows,sheet_cols=self.src.shape
# arbitrary (e.g. could use 0,0)
center_row,center_col = sheet_rows/2,sheet_cols/2
center_unitxcenter,center_unitycenter=self.src.matrixidx2sheet(center_row,
center_col)
self.__sharedcf=self.cf_type(self.src,
x=center_unitxcenter,
y=center_unitycenter,
template=self._slice_template,
weights_generator=self.weights_generator,
mask=self.mask_template,
output_fns=[wof.single_cf_fn for wof in self.weights_output_fns],
min_matrix_radius=self.min_matrix_radius)
self._create_cfs()
def _create_cf(self,x,y):
x_cf,y_cf = self.coord_mapper(x,y)
# Does not pass the mask, as it would have to be sliced
# for each cf, and is only used for learning.
CF = SharedWeightCF(self.__sharedcf,self.src,x=x_cf,y=y_cf, #JUDE ADDED
template=self._slice_template,
min_matrix_radius=self.min_matrix_radius,
mask=self.mask_template)
return CF
def learn(self):
"""
Because of how output functions are applied, it is not currently
possible to use learning functions and learning output functions for
SharedWeightCFProjections, so we disable them here.
"""
pass
def apply_learn_output_fns(self,active_units_mask=True):
"""
Because of how output functions are applied, it is not currently
possible to use learning functions and learning output functions for
SharedWeightCFProjections, so we disable them here.
"""
pass
def n_bytes(self):
return self.activity.nbytes + self.__sharedcf.weights.nbytes + \
sum([cf.input_sheet_slice.nbytes
for cf,i in CFIter(self)()])
# JABALERT: Can this be replaced with a CFProjection with a Hysteresis output_fn?
# If not it should probably be replaced with a new output_fn type instead.
class LeakyCFProjection(ResizableCFProjection):
"""
A projection that has a decay_rate parameter so that incoming
input is decayed over time as x(t) = input + x(t-1)*exp(-decay_rate),
and then the weighted sum of x(t) is calculated.
"""
decay_rate = param.Number(default=1.0,bounds=(0,None),
doc="Input decay rate for each leaky synapse")
precedence = param.Number(default=0.4)
def __init__(self,**params):
super(LeakyCFProjection,self).__init__(**params)
self.leaky_input_buffer = zeros(self.src.activity.shape)
def activate(self,input_activity):
"""
Retain input_activity from the previous step in leaky_input_buffer
and add a leaked version of it to the current input_activity. This
function needs to deal with a finer time-scale.
"""
self.leaky_input_buffer = input_activity + self.leaky_input_buffer*exp(-self.decay_rate)
|
s20121035/rk3288_android5.1_repo
|
external/chromium_org/chrome/test/chromedriver/test/webserver.py
|
Python
|
gpl-3.0
| 6,705
| 0.00865
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import BaseHTTPServer
import os
import threading
class Responder(object):
"""Sends a HTTP response. Used with TestWebServer."""
def __init__(self, handler):
self._handler = handler
def SendResponse(self, body):
"""Sends OK response with body."""
self.SendHeaders(len(body))
self.SendBody(body)
def SendResponseFromFile(self, path):
"""Sends OK response with the given file as the body."""
with open(path, 'r') as f:
self.SendResponse(f.read())
def SendHeaders(self, content_length=None):
"""Sends headers for OK response."""
self._handler.send_response(200)
if content_length:
self._handler.send_header('Content-Length', content_length)
self._handler.end_headers()
def SendError(self, code):
"""Sends response for the given HTTP error code."""
self._handler.send_error(code)
def SendBody(self, body):
"""Just sends the body, no headers."""
self._handler.wfile.write(body)
class Request(object):
"""An HTTP request."""
def __init__(self, handler):
self._handler = handler
def GetPath(self):
return self._handler.path
def GetHeader(self, name):
return self._handler.headers.getheader(name)
class _BaseServer(BaseHTTPServer.HTTPServer):
"""Internal server that throws if timed out waiting for a request."""
def __init__(self, on_request, server_cert_and_key_path=None):
"""Starts the server.
It is an HTTP server if parameter server_cert_and_key_path is not provided.
Otherwise, it is an HTTPS server.
Args:
server_cert_and_key_path: path to a PEM file containing the cert and key.
if it is None, start the server as an HTTP one.
"""
class _Handler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Internal handler that just asks the server to handle the request."""
def do_GET(self):
if self.path.endswith('favicon.ico'):
self.send_error(404)
return
on_request(Request(self), Responder(self))
def log_message(self, *args, **kwargs):
"""Overriddes base class method to disable logging."""
pass
BaseHTTPServer.HTTPServer.__init__(self, ('127.0.0.1', 0), _Handler)
if server_cert_and_key_path is not None:
self._is_https_enabled = True
self._server.socket = ssl.wrap_socket(
self._server.socket, certfile=server_cert_and_key_path,
server_side=True)
else:
self._is_https_enabled = False
def handle_timeout(self):
"""Overridden from SocketServer."""
raise RuntimeError('Timed out waiting for http request')
def GetUrl(self):
"""Returns the base URL of the server."""
postfix = '://127.0.0.1:%s' % self.server_port
if self._is_https_enabled:
return 'https' + postfix
return 'http' + postfix
class WebServer(object):
"""An HTTP or HTTPS server that serves on its own thread.
Serves files from given directory but may use custom data for specific paths.
"""
def __init__(self, root_dir, server_cert_and_key_path=None):
"""Starts the server.
It is an HTTP server if parameter server_cert_and_key_path is not provided.
Otherwise, it is an HTTPS server.
Args:
root_dir: root path to serve files from. This parameter is required.
server_cert_and_key_path: path to a PEM file containing the cert and key.
if it is None, start the server as an HTTP one.
"""
self._root_dir = os.path.abspath(root_dir)
self._server = _BaseS
|
erver(self._OnRequest, server_cert_and_key_path)
self._thread = threading.Thread(target=self._server.serve_forever)
self._thread.daemon = True
self._thread.start()
self._path_data_map = {}
|
self._path_callback_map = {}
self._path_maps_lock = threading.Lock()
def _OnRequest(self, request, responder):
path = request.GetPath().split('?')[0]
# Serve from path -> callback and data maps.
self._path_maps_lock.acquire()
try:
if path in self._path_callback_map:
body = self._path_callback_map[path](request)
if body:
responder.SendResponse(body)
else:
responder.SendError(503)
return
if path in self._path_data_map:
responder.SendResponse(self._path_data_map[path])
return
finally:
self._path_maps_lock.release()
# Serve from file.
path = os.path.normpath(
os.path.join(self._root_dir, *path.split('/')))
if not path.startswith(self._root_dir):
responder.SendError(403)
return
if not os.path.exists(path):
responder.SendError(404)
return
responder.SendResponseFromFile(path)
def SetDataForPath(self, path, data):
self._path_maps_lock.acquire()
try:
self._path_data_map[path] = data
finally:
self._path_maps_lock.release()
def SetCallbackForPath(self, path, func):
self._path_maps_lock.acquire()
try:
self._path_callback_map[path] = func
finally:
self._path_maps_lock.release()
def GetUrl(self):
"""Returns the base URL of the server."""
return self._server.GetUrl()
def Shutdown(self):
"""Shuts down the server synchronously."""
self._server.shutdown()
self._thread.join()
class SyncWebServer(object):
"""WebServer for testing.
Incoming requests are blocked until explicitly handled.
This was designed for single thread use. All requests should be handled on
the same thread.
"""
def __init__(self):
self._server = _BaseServer(self._OnRequest)
# Recognized by SocketServer.
self._server.timeout = 10
self._on_request = None
def _OnRequest(self, request, responder):
self._on_request(responder)
self._on_request = None
def Respond(self, on_request):
"""Blocks until request comes in, then calls given handler function.
Args:
on_request: Function that handles the request. Invoked with single
parameter, an instance of Responder.
"""
if self._on_request:
raise RuntimeError('Must handle 1 request at a time.')
self._on_request = on_request
while self._on_request:
# Don't use handle_one_request, because it won't work with the timeout.
self._server.handle_request()
def RespondWithContent(self, content):
"""Blocks until request comes in, then handles it with the given content."""
def SendContent(responder):
responder.SendResponse(content)
self.Respond(SendContent)
def GetUrl(self):
return self._server.GetUrl()
|
diogocs1/comps
|
web/addons/account/tests/test_tax.py
|
Python
|
apache-2.0
| 1,740
| 0.000575
|
from openerp.tests.common import TransactionCase
class TestTax(TransactionCase):
"""Tests for taxes (account.tax)
We don't really need at this point to link taxes to tax code
|
s
(account.tax.code) nor to companies (base.company) to check computation
results.
"""
def setUp(self):
super(TestTax, self).setUp()
self
|
.tax_model = self.registry('account.tax')
def test_programmatic_tax(self):
cr, uid = self.cr, self.uid
tax_id = self.tax_model.create(cr, uid, dict(
name="Programmatic tax",
type='code',
python_compute='result = 12.0',
python_compute_inv='result = 11.0',
))
tax_records = self.tax_model.browse(cr, uid, [tax_id])
res = self.tax_model.compute_all(cr, uid, tax_records, 50.0, 2)
tax_detail = res['taxes'][0]
self.assertEquals(tax_detail['amount'], 24.0)
self.assertEquals(res['total_included'], 124.0)
def test_percent_tax(self):
"""Test computations done by a 10 percent tax."""
cr, uid = self.cr, self.uid
tax_id = self.tax_model.create(cr, uid, dict(
name="Percent tax",
type='percent',
amount='0.1',
))
tax_records = self.tax_model.browse(cr, uid, [tax_id])
res = self.tax_model.compute_all(cr, uid, tax_records, 50.0, 2)
tax_detail = res['taxes'][0]
self.assertEquals(tax_detail['amount'], 10.0)
self.assertEquals(res['total_included'], 110.0)
# now the inverse computation
res = self.tax_model.compute_inv(cr, uid, tax_records, 55.0, 2)
self.assertEquals(res[0]['amount'], 10.0)
|
NINAnor/QGIS
|
python/plugins/processing/gui/ConfigDialog.py
|
Python
|
gpl-2.0
| 10,846
| 0.00083
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
ConfigDialog.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from PyQt4 import uic
from PyQt4.QtCore import Qt, QEvent, QPyNullVariant
from PyQt4.QtGui import (QFileDialog, QDialog, QIcon, QStyle,
QStandardItemModel, QStandardItem, QMessageBox, QStyledItemDelegate,
QLineEdit, QWidget, QToolButton, QHBoxLayout,
QComboBox)
from qgis.gui import QgsDoubleSpinBox, QgsSpinBox
from processing.core.ProcessingConfig import ProcessingConfig, Setting
from processing.core.Processing import Processing
pluginPath = os.path.split(os.path.dirname(__file__))[0]
WIDGET, BASE = uic.loadUiType(
os.path.join(pluginPath, 'ui', 'DlgConfig.ui'))
class ConfigDialog(BASE, WIDGET):
def __init__(self, toolbox):
super(ConfigDialog, self).__init__(None)
self.setupUi(self)
self.toolbox = toolbox
self.groupIcon = QIcon()
self.groupIcon.addPixmap(self.style().standardPixmap(
QStyle.SP_DirClosedIcon), QIcon.Normal, QIcon.Off)
self.groupIcon.addPixmap(self.style().standardPixmap(
QStyle.SP_DirOpenIcon), QIcon.Normal, QIcon.On)
if hasattr(self.searchBox, 'setPlaceholderText'):
self.searchBox.setPlaceholderText(self.tr('Search...'))
self.model = QStandardItemModel()
self.tree.setModel(self.model)
self.delegate = SettingDelegate()
self.tree.setItemDelegateForColumn(1, self.delegate)
self.searchBox.textChanged.connect(self.fillTree)
self.fillTree()
self.tree.expanded.connect(self.adjustColumns)
def fillTree(self):
self.items = {}
self.model.clear()
self.model.setHorizontalHeaderLabels([self.tr('Setting'),
self.tr('Value')])
text = unicode(self.searchBox.text())
settings = ProcessingConfig.getSettings()
rootItem = self.model.invisibleRootItem()
priorityKeys = [self.tr('General'), self.tr('Models'), self.tr('Scripts')]
for group in priorityKeys:
groupItem = QStandardItem(group)
icon = ProcessingConfig.getGroupIcon(group)
groupItem.setIcon(icon)
groupItem.setEditable(False)
emptyItem = QStandardItem()
emptyItem.setEditable(False)
rootItem.insertRow(0, [groupItem, emptyItem])
for setting in settings[group]:
if setting.hidden:
continue
if text == '' or text.lower() in setting.description.lower():
labelItem = QStandardItem(setting.description)
labelItem.setIcon(icon)
labelItem.setEditable(False)
self.items[setting] = SettingItem(setting)
groupItem.insertRow(0, [labelItem, self.items[setting]])
if text != '':
self.tree.expand(groupItem.index())
providersItem = QStandardItem(self.tr('Providers'))
icon = QIcon(os.path.join(pluginPath, 'images', 'alg.png'))
providersItem.setIcon(icon)
providersItem.setEditable(False)
emp
|
tyItem = QStandar
|
dItem()
emptyItem.setEditable(False)
rootItem.insertRow(0, [providersItem, emptyItem])
for group in settings.keys():
if group in priorityKeys:
continue
groupItem = QStandardItem(group)
icon = ProcessingConfig.getGroupIcon(group)
groupItem.setIcon(icon)
groupItem.setEditable(False)
for setting in settings[group]:
if setting.hidden:
continue
if text == '' or text.lower() in setting.description.lower():
labelItem = QStandardItem(setting.description)
labelItem.setIcon(icon)
labelItem.setEditable(False)
self.items[setting] = SettingItem(setting)
groupItem.insertRow(0, [labelItem, self.items[setting]])
emptyItem = QStandardItem()
emptyItem.setEditable(False)
providersItem.appendRow([groupItem, emptyItem])
self.tree.sortByColumn(0, Qt.AscendingOrder)
self.adjustColumns()
def accept(self):
for setting in self.items.keys():
if isinstance(setting.value, bool):
setting.setValue(self.items[setting].checkState() == Qt.Checked)
else:
try:
setting.setValue(unicode(self.items[setting].text()))
except ValueError as e:
QMessageBox.warning(self, self.tr('Wrong value'),
self.tr('Wrong value for parameter "%s":\n\n%s' % (setting.description, unicode(e))))
return
setting.save()
Processing.updateAlgsList()
QDialog.accept(self)
def adjustColumns(self):
self.tree.resizeColumnToContents(0)
self.tree.resizeColumnToContents(1)
class SettingItem(QStandardItem):
def __init__(self, setting):
QStandardItem.__init__(self)
self.setting = setting
self.setData(setting, Qt.UserRole)
if isinstance(setting.value, bool):
self.setCheckable(True)
self.setEditable(False)
if setting.value:
self.setCheckState(Qt.Checked)
else:
self.setCheckState(Qt.Unchecked)
else:
self.setData(setting.value, Qt.EditRole)
class SettingDelegate(QStyledItemDelegate):
def __init__(self, parent=None):
QStyledItemDelegate.__init__(self, parent)
def createEditor(
self,
parent,
options,
index,
):
setting = index.model().data(index, Qt.UserRole)
if setting.valuetype == Setting.FOLDER:
return FileDirectorySelector(parent)
elif setting.valuetype == Setting.FILE:
return FileDirectorySelector(parent, True)
elif setting.valuetype == Setting.SELECTION:
combo = QComboBox(parent)
combo.addItems(setting.options)
return combo
else:
value = self.convertValue(index.model().data(index, Qt.EditRole))
if isinstance(value, (int, long)):
spnBox = QgsSpinBox(parent)
spnBox.setRange(-999999999, 999999999)
return spnBox
elif isinstance(value, float):
spnBox = QgsDoubleSpinBox(parent)
spnBox.setRange(-999999999.999999, 999999999.999999)
spnBox.setDecimals(6)
return spnBox
elif isinstance(value, (str, unicode)):
return QLineEdit(parent)
def setEditorData(self, editor, index):
value = self.convertValue(index.model().data(index, Qt.EditRole))
setting = index.model().data(index, Qt.UserRole)
if setting.valuetype == Setting.SELECTION:
editor.setCurrentIndex(editor.findText(value))
els
|
kanjie128/test
|
pymavlink/fgFDM.py
|
Python
|
lgpl-3.0
| 9,572
| 0.007522
|
#!/usr/bin/env python
# parse and construct FlightGear NET FDM packets
# Andrew Tridgell, November 2011
# released under GNU GPL version 2 or later
import struct, math
class fgFDMError(Exception):
'''fgFDM error class'''
def __init__(self, msg):
Exception.__init__(self, msg)
self.message = 'fgFDMError: ' + msg
class fgFDMVariable(object):
'''represent a single fgFDM variable'''
def __init__(self, index, arraylength, units):
self.index = index
self.arraylength = arraylength
self.units = units
class fgFDMVariableList(object):
'''represent a list of fgFDM variable'''
def __init__(self):
self.vars = {}
self._nextidx = 0
def add(self, varname, arraylength=1, units=None):
self.vars[varname] = fgFDMVariable(self._nextidx, arraylength, units=units)
self._nextidx += arraylength
class fgFDM(object):
'''a flightgear native FDM parser/generator'''
def __init__(self):
'''init a fgFDM object'''
self.FG_NET_FDM_VERSION = 24
self.pack_string = '>I 4x 3d 6f 11f 3f 2f I 4I 4f 4f 4f 4f 4f 4f 4f 4f 4f I 4f I 3I 3f 3f 3f I i f 10f'
self.values = [0]*98
self.FG_MAX_ENGINES = 4
self.FG_MAX_WHEELS = 3
self.FG_MAX_TANKS = 4
# supported unit mappings
self.unitmap = {
('radians', 'degrees') : math.degrees(1),
('rps', 'dps') : math.degrees(1),
('feet', 'meters') : 0.3048,
('fps', 'mps') : 0.3048,
('knots', 'mps') : 0.514444444,
('knots', 'fps') : 0.514444444/0.3048,
('fpss', 'mpss') : 0.3048,
('seconds', 'minutes') : 60,
('seconds', 'hours') : 3600,
}
# build a mapping between variable name and index in the values array
# note that the order of this initialisation is critical - it must
# match the wire structure
self.mapping = fgFDMVariableList()
self.mapping.add('version')
# position
self.mapping.add('longitude', units='radians') # geodetic (radians)
self.mapping.add('latitude', units='radians') # geodetic (radians)
self.mapping.add('altitude', units='meters') # above sea level (meters)
self.mapping.add('agl', units='meters') # above ground level (meters)
# attitude
self.mapping.add('phi', units='radians') # roll (radians)
self.mapping.add('theta', units='radians') # pitch (radians)
self.mapping.add('psi', units='radians') # yaw or true heading (radians)
self.mapping.add('alpha', units='radians') # angle of attack (radians)
self.mapping.add('beta', units='radians') # side slip angle (radians)
# Velocities
self.mapping.add('phidot', units='rps') # roll rate (radians/sec)
self.mapping.add('thetadot', units='rps') # pitch rate (radians/sec)
self.mapping.add('psidot', units='rps') # yaw rate (radians/sec)
self.mapping.add('vcas', units='fps') # calibrated airspeed
self.mapping.add('climb_rate', units='fps') # feet per second
self.mapping.add('v_north', units='fps') # north velocity in local/body frame, fps
self.mapping.add('v_east', units='fps') # east velocity in local/body frame, fps
self.mapping.add('v_down', units='fps') # down/vertical velocity in local/body frame, fps
self.mapping.add('v_wind_body_north', units='fps') # north velocity in local/body frame
self.mapping.add('v_wind_body_east', units='fps') # east velocity in local/body frame
self.mapping.add('v_wind_body_down', units='fps') # down/vertical velocity in local/body
# Accelerations
self.mapping.add('A_X_pilot', units='fpss') # X accel in body frame ft/sec^2
self.mapping.add('A_Y_pilot', units='fpss') # Y accel in body frame ft/sec^2
self.mapping.add('A_Z_pilot', units='fpss') # Z accel in body frame ft/sec^2
# Stall
self.mapping.add('stall_warning')
|
# 0.0 - 1.0 indicating the amount of stall
self.mapping.add('slip_deg', units='degrees') # slip ball deflection
# Engine status
self.mapping.add('num_engines') # Number of valid engines
self.mapping.add('eng_state', self.FG
|
_MAX_ENGINES) # Engine state (off, cranking, running)
self.mapping.add('rpm', self.FG_MAX_ENGINES) # Engine RPM rev/min
self.mapping.add('fuel_flow', self.FG_MAX_ENGINES) # Fuel flow gallons/hr
self.mapping.add('fuel_px', self.FG_MAX_ENGINES) # Fuel pressure psi
self.mapping.add('egt', self.FG_MAX_ENGINES) # Exhuast gas temp deg F
self.mapping.add('cht', self.FG_MAX_ENGINES) # Cylinder head temp deg F
self.mapping.add('mp_osi', self.FG_MAX_ENGINES) # Manifold pressure
self.mapping.add('tit', self.FG_MAX_ENGINES) # Turbine Inlet Temperature
self.mapping.add('oil_temp', self.FG_MAX_ENGINES) # Oil temp deg F
self.mapping.add('oil_px', self.FG_MAX_ENGINES) # Oil pressure psi
# Consumables
self.mapping.add('num_tanks') # Max number of fuel tanks
self.mapping.add('fuel_quantity', self.FG_MAX_TANKS)
# Gear status
self.mapping.add('num_wheels')
self.mapping.add('wow', self.FG_MAX_WHEELS)
self.mapping.add('gear_pos', self.FG_MAX_WHEELS)
self.mapping.add('gear_steer', self.FG_MAX_WHEELS)
self.mapping.add('gear_compression', self.FG_MAX_WHEELS)
# Environment
self.mapping.add('cur_time', units='seconds') # current unix time
self.mapping.add('warp', units='seconds') # offset in seconds to unix time
self.mapping.add('visibility', units='meters') # visibility in meters (for env. effects)
# Control surface positions (normalized values)
self.mapping.add('elevator')
self.mapping.add('elevator_trim_tab')
self.mapping.add('left_flap')
self.mapping.add('right_flap')
self.mapping.add('left_aileron')
self.mapping.add('right_aileron')
self.mapping.add('rudder')
self.mapping.add('nose_wheel')
self.mapping.add('speedbrake')
self.mapping.add('spoilers')
self._packet_size = struct.calcsize(self.pack_string)
self.set('version', self.FG_NET_FDM_VERSION)
if len(self.values) != self.mapping._nextidx:
raise fgFDMError('Invalid variable list in initialisation')
def packet_size(self):
'''return expected size of FG FDM packets'''
return self._packet_size
def convert(self, value, fromunits, tounits):
'''convert a value from one set of units to another'''
if fromunits == tounits:
return value
if (fromunits,tounits) in self.unitmap:
return value * self.unitmap[(fromunits,tounits)]
if (tounits,fromunits) in self.unitmap:
return value / self.unitmap[(tounits,fromunits)]
raise fgFDMError("unknown unit mapping (%s,%s)" % (fromunits, tounits))
def units(self, varname):
'''return the default units of a variable'''
if not varname in self.mapping.vars:
raise fgFDMError('Unknown variable %s' % varname)
return self.mapping.vars[varname].units
def variables(self):
'''return a list of available variables'''
return sorted(self.mapping.vars.keys(),
key = lambda v : self.mapping.vars[v].index)
def get(self, varname, idx=0, units=None):
'''get a variable value'''
if not varname in self.mapping.vars:
raise fgFDMError('Unknown variable %s' % varname)
if idx >= self.mapping.vars[varname].arraylength:
raise fgFDMError('index of %s beyond end of array idx=%u arraylength=%u' % (
varname, idx, self.mapping.vars[varname].arraylength))
value = self.values[self.mapping.vars[va
|
shanghaiyangming/trouter
|
test/t.py
|
Python
|
gpl-3.0
| 1,035
| 0.018357
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import tornado
from multiprocessing import Process
from tornado.httpclient import AsyncHTTPClient
repeat = 5
def handle_response(response):
global repeat
repeat -= 1
if response.error:
print "err:%s"%(response.error,)
else:
print response.body
if repeat==0:
tornado.ioloop.IOLoop.instance().stop()
def test(client_id):
for i in xrange(repeat):
url = "http://urmdemo.umaman.com/weixinredpack/index/get?activity_id=55067db6479619010a80fed5&customer_id=55067d8c4996193a3a8b4f
|
49&re_openid=yangming_%d&redpack_id=55067da0479619680980
|
ff62"%(client_id,)
request = tornado.httpclient.HTTPRequest(
url,
method='GET',
connect_timeout = 60,
request_timeout = 300
)
client = AsyncHTTPClient()
client.fetch(request,handle_response)
tornado.ioloop.IOLoop.instance().start()
for i in xrange(5):
Process(target=test).start()
|
lucienfostier/gaffer
|
python/GafferUI/PathVectorDataPlugValueWidget.py
|
Python
|
bsd-3-clause
| 4,045
| 0.026452
|
##########################################################################
#
# Copyright (c) 2011-2012, Image Engine Design Inc. All rights reserved.
# Copyright (c) 2012, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import Gaffer
import GafferUI
## Supported plug metadata - used to provide arguments to a
# PathChooserDialogue :
#
# - "path:leaf"
# - "path:valid"
# - "path:bookmarks"
class PathVectorDataPlugValueWidget( GafferUI.PlugValueWidget ) :
## path should be an instance of Gaffer.Path, optionally with
# filters applied. It will be used to convert string values to
# paths for the path uis to edit.
#
# \deprecated The pathChooserDialogueKeywords argument will be removed
# in a future version - use metadata instead.
def __init__( self, plug, path, pathChooserDialogueKeywords={}, **kw ) :
self.__dataWidget = GafferUI.PathVectorDataWidget( path=path, pathChooserDialogueKeywords=Gaffer.WeakMethod( self.__pathChooserDialogueKeywords ) )
GafferUI.PlugValueWidget.__init__( self, self.__dataWidget, plug, **kw )
self.__dataWidget.dataChangedSignal().connect( Gaffer.WeakMethod( self.__dataChanged ), scoped = False )
self.__deprecatedPathChooserDialogueKeywords = pathChooserDialogueKeywords
self._updateFromPlug()
def path( self ) :
return self.__dataWidget.path()
def _updateFromPlug( self ) :
if self.getPlug() is not None :
self.__dataWidget.setData( self.getPlug().getValue() )
self.__dataWidget.setEditable( self._editable() )
def __dataChanged( self, widget ) :
assert( widget is self.__dataWidget )
with Gaffer.UndoScope( self.getPlug().ancestor( Gaffer.ScriptNode ) ) :
with Gaffer.BlockedConnection( self._plugConnections() ) :
self.getPlug().setValue( self.__dataWidget.getData()[0] )
def __pathChooserDialogueKeywords( self ) :
result = {}
resu
|
lt["leaf"] = Gaffer.Metadata.value( self.getPlug(), "path:leaf" )
result["valid"] = Gaffer.Metadata.value( self.getPlug(), "path:valid" )
bookmarks = Gaffer.Metadata.value( self.getPlug(), "path:bookmarks" )
if bookmarks is not None :
result["bookmarks"] = GafferUI.Bookmarks.acquire( self.getPlug(), type( self.path() ), bookmarks )
if callable( self.__deprecatedPathChooserDialogueKeywords ) :
result.update( self.__deprecatedPathChooserDialogueKeywords() )
else :
|
result.update( self.__deprecatedPathChooserDialogueKeywords )
return result
|
adam111316/SickGear
|
lib/chardet/langgreekmodel.py
|
Python
|
gpl-3.0
| 12,867
| 0.343514
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
Latin7_char_to_order_map = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40
79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50
253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60
78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90
253,233, 90,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0
253,253,253,253,247,248, 61, 36, 46, 71, 73,253, 54,253,108,123, # b0
110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0
35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0
124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0
9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0
)
win1253_char_to_order_map = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40
79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50
253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60
78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90
253,233, 61,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0
253,253,253,253,247,253,253, 36, 46, 71, 73,253, 54,253,108,123, # b0
110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0
35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0
124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0
9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 98.2851%
# first 1024 sequences:1.7001%
# rest sequences: 0.0359%
# negative sequences: 0.0148%
GreekLangModel = (
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,2,2,3,3,3,3,3,3,3,3,1,3,3,3,0,2,2,3,3,0,3,0,3,2,0,3,3,3,0,
3,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,0,3,3,0,3,2,3,3,0,3,2,3,3,3,0,0,3,0,3,0,3,3,2,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,2,3,2,2,3,3,3,3,3,3,3,3,0,3,3,3,3,0,2,3,3,0,3,3,3,3,2,3,3,3,0,
2,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,2,1,3,3,3,3,2,3,3,2,3,3,2,0,
0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,2,3,3,0,
2,0,1,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,3,0,0,0,0,3,3,0,3,1,3,3,3,0,3,3,0,3,3,3,3,0,0,0,0,
2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,0,3,0,3,3,3,3,3,0,3,2,2,2,3,0,2,3,3,3,3,3,2,3,3,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,3,2,2,2,3,3,3,3,0,3,1,3,3,3,3,2,3,3,3,3,3,3,3,2,2,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,3,0,0,0,3,3,2,3,3,3,3,3,0,0,3,2,3,0,2,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,3,0,0,3,3,0,2,3,0,3,0,3,3,3,0,0,3,0,3,0,2,2,3,3,0,0,
0,0,1,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,3,2,3,3,3,3,0,3,3,3,3,3,0,3,3,2,3,2,3,3,2,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,2,3,2,3,3,3,3,3,3,0,2,3,2,3,2,2,2,3,2,3,3,2,3,0,2,2,2,3,0,
2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,0,3,3,3,2,3,3,0,0,3,0,3,0,0,0,3,2,0,3,0,3,0,0,2,0,2,0,
0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
|
,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,0,0,0,3,3,0,3,3,3,0,0,1,2,3,0,
3,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,0,3,2,2,3,3,0,3,3,3,3,3,2,1,3,0,3,2,3,3,2,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,3,0,
|
2,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,3,0,3,2,3,0,0,3,3,3,0,
3,0,0,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,2,0,3,2,3,0,0,3,2,3,0,
2,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,1,2,2,3,3,3,3,3,3,0,2,3,0,3,0,0,0,3,3,0,3,0,2,0,0,2,3,1,0,
2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,3,0,3,0,3,3,2,3,0,3,3,3,3,3,3,0,3,3,3,0,2,3,0,0,3,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,0,0,3,0,0,0,3,3,0,3,0,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,0,3,3,3,3,3,3,0,0,3,0,2,0,0,0,3,3,0,3,0,3,0,0,2,0,2,0,
0,0,0,0,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,3,0,3,0,2,0,3,2,0,3,2,3,2,3,0,0,3,2,3,2,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,2,3,3,3,3,3,0,0,0,3,0,2,1,0,0,3,2,2,2,0,3,0,0,2,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,2,0,3,0,3,0,3,3,0,2,1,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,3,0,3,3,3,3,3,3,0,2,3,0,3,0,0,0,2,1,0,2,2,3,0,0,2,2,2,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,2,3,3,3,2,3,0,0,1,3,0,2,0,0,0,0,3,0,1,0,2,0,0,1,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,1,0,3,0,0,0,3,2,0,3,2,3,3,3,0,0,3,0,3,2,2,2,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,0,0,3,0,0,0,0,2,0,2,3,3,2,2,2,2,3,0,2,0,2,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,2,0,0,0,0,0,0,2,3,0,2,0,2,3,2,0,0,3,0,3,0,3,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,3,2,3,3,2,2,3,0,2,0,3,0,0,0,2,0,0,0,0,1,2,0,2,0,2,0,
0,2,0,2,0,2,2,0,0,1,0,2,2,2,0,2,2,2,0,2,2,2,0,0,2,0,0,1,
|
mmadsen/pytransmission
|
runtests.py
|
Python
|
apache-2.0
| 709
| 0.002821
|
#!/usr/bin/env python
# Copyright (c) 2013. Mark E. Madsen <mark@pytransmission.org>
#
# This work is licensed under the terms of the Apache Software License, Version 2.0. See the file LICENSE for details.
"""
Description here
"""
import unittest
if __name__ ==
|
'__main__':
# use t
|
he default shared TestLoader instance
test_loader = unittest.defaultTestLoader
# use the basic test runner that outputs to sys.stderr
test_runner = unittest.TextTestRunner()
# automatically discover all tests in the current dir of the form test*.py
# NOTE: only works for python 2.7 and later
test_suite = test_loader.discover('test')
# run the test suite
test_runner.run(test_suite)
|
esquires/lvdb
|
test/temp2.py
|
Python
|
bsd-3-clause
| 66
| 0.015152
|
de
|
f mult(a, b):
return a * b
def div(a, b):
return a / b
| |
hsr-ba-fs15-dat/python-social-auth
|
examples/django_me_example/example/settings.py
|
Python
|
bsd-3-clause
| 7,162
| 0.000279
|
import sys
from os.path import abspath, dirname, join
import mongoengine
sys.path.insert(0, '../..')
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ROOT_PATH = abspath(dirname(__file__))
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'test.db'
}
}
TIME_ZONE = 'America/Montevideo'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
USE_I18N = True
USE_L10N = True
USE_TZ = True
MEDIA_ROOT = ''
MEDIA_URL = ''
STATIC_ROOT = ''
STATIC_URL = '/static/'
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
SECRET_KEY = '#$5btppqih8=%ae^#&7en#kyi!vh%he9rg=ed#hm6fnw9^=umc'
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'example.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'example.wsgi.application'
TEMPLATE_DIRS = (
join(ROOT_PATH, 'templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'mongoengine.django.mongo_auth',
'social.apps.django_app.me',
'example.app',
)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.contrib.messages.context_processors.messages',
'social.apps.django_app.context_processors.backends',
)
SESSION_ENGINE = 'mongoengine.django.sessions'
mongoengine.connect('psa', host='mongodb://localhost/psa')
MONGOENGINE_USER_DOCUMENT = 'example.app.models.User'
SOCIAL_AUTH_USER_MODEL = 'example.app.models.User'
AUTHENTICATION_BACKENDS = (
'social.backends.open_id.OpenIdAuth',
'social.backends.google.GoogleOpenId',
'social.backends.google.GoogleOAuth2',
'social.backends.google.GoogleOAuth',
'social.backends.twitter.TwitterOAuth',
'social.backends.yahoo.YahooOpenId',
'social.backends.stripe.StripeOAuth2',
'social.backends.persona.PersonaAuth',
'social.backends.facebook.FacebookOAuth2',
'social.backends.facebook.FacebookAppOAuth2',
'social.backends.yahoo.YahooOAuth',
'social.backends.angel.AngelOAuth2',
'social.backends.behance.BehanceOAuth2',
'social.backends.bitbucket.BitbucketOAuth',
'social.backends.box.BoxOAuth2',
'social.backends.linkedin.LinkedinOAuth',
'social.backends.linkedin.LinkedinOAuth2',
'social.backends.github.GithubOAuth2',
'social.backends.foursquare.FoursquareOAuth2',
'social.backends.instagram.InstagramOAuth2',
'social.backends.live.LiveOAuth2',
'social.backends.vk.VKOAuth2',
'social.backends.dailymotion.DailymotionOAuth2',
'social.backends.disqus.DisqusOAuth2',
'social.backends.dropbox.DropboxOAuth',
'social.backends.eveonline.EVEOnlineOAuth2',
'social.backends.evernote.EvernoteSandboxOAuth',
'social.backends.fitbit.FitbitOAuth',
'social.backends.flickr.FlickrOAuth',
'social.backends.livejournal.LiveJournalOpenId',
'social.backends.soundcloud.SoundcloudOAuth2',
'social.backends.thisismyjam.ThisIsMyJamOAuth1',
'social.backends.stocktwits.StocktwitsOAuth2',
'social.backends.tripit.TripItOAuth',
'social.backends.twilio.TwilioAuth',
'social.backends.clef.ClefOAuth2',
'social.backends.xing.XingOAuth',
'social.backends.yandex.YandexOAuth2',
'social.backends.douban.DoubanOAuth2',
'social.backends.mineid.MineIDOAuth2',
'social.backends.mixcloud.MixcloudOAuth2',
'social.backends.rdio.RdioOAuth1',
'social.backends.rdio.RdioOAuth2',
'social.backends.yammer.YammerOAuth2',
'social.backends.stackoverflow.StackoverflowOAuth2',
'social.backends.readability.ReadabilityOAuth',
'social.backends.skyrock.SkyrockOAuth',
'social.backends.tumblr.TumblrOAuth',
'social.backends.reddit.RedditOAuth2'
|
,
'social.backends.steam.SteamOpenId',
'social.backends.podio.Podio
|
OAuth2',
'social.backends.amazon.AmazonOAuth2',
'social.backends.email.EmailAuth',
'social.backends.username.UsernameAuth',
'social.backends.wunderlist.WunderlistOAuth2',
'django.contrib.auth.backends.ModelBackend',
)
LOGIN_URL = '/login/'
LOGIN_REDIRECT_URL = '/done/'
URL_PATH = ''
SOCIAL_AUTH_STRATEGY = 'social.strategies.django_strategy.DjangoStrategy'
SOCIAL_AUTH_STORAGE = 'social.apps.django_app.me.models.DjangoStorage'
SOCIAL_AUTH_GOOGLE_OAUTH_SCOPE = [
'https://www.googleapis.com/auth/drive',
'https://www.googleapis.com/auth/userinfo.profile'
]
# SOCIAL_AUTH_EMAIL_FORM_URL = '/signup-email'
SOCIAL_AUTH_EMAIL_FORM_HTML = 'email_signup.html'
SOCIAL_AUTH_EMAIL_VALIDATION_FUNCTION = 'example.app.mail.send_validation'
SOCIAL_AUTH_EMAIL_VALIDATION_URL = '/email-sent/'
# SOCIAL_AUTH_USERNAME_FORM_URL = '/signup-username'
SOCIAL_AUTH_USERNAME_FORM_HTML = 'username_signup.html'
SOCIAL_AUTH_PIPELINE = (
'social.pipeline.social_auth.social_details',
'social.pipeline.social_auth.social_uid',
'social.pipeline.social_auth.auth_allowed',
'social.pipeline.social_auth.social_user',
'social.pipeline.user.get_username',
'example.app.pipeline.require_email',
'social.pipeline.mail.mail_validation',
'social.pipeline.user.create_user',
'social.pipeline.social_auth.associate_user',
'social.pipeline.social_auth.load_extra_data',
'social.pipeline.user.user_details'
)
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
try:
from example.local_settings import *
except ImportError:
pass
|
g-weatherill/oq-risklib
|
openquake/calculators/scenario_risk.py
|
Python
|
agpl-3.0
| 5,466
| 0
|
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2014-2015, GEM Foundation
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
import os
import logging
import numpy
from openquake.baselib import general
from openquake.commonlib import parallel, datastore
from openquake.risklib import scientific
from openquake.calculators import base, calc
F64 = numpy.float64
stat_dt = numpy.dtype([('mean', F64), ('stddev', F64),
('mean_ins', F64), ('stddev_ins', F64)])
@parallel.litetask
def scenario_risk(riskinputs, riskmodel, rlzs_assoc, monitor):
"""
Core function for a scenario computation.
:param riskinputs:
a list of :class:`openquake.risklib.riskinput.RiskInput` objects
:param riskmodel:
a :class:`openquake.risklib.riskinput.RiskModel` instance
:param rlzs_assoc:
a class:`openquake.commonlib.source.RlzsAssoc` instance
:param monitor:
:class:`openquake.baselib.performance.PerformanceMonitor` instance
:returns:
a dictionary (key_type, loss_type) -> losses where the `key_type` can
be "agg" (for the aggregate losses) or "ins" (for the insured losses).
"""
logging.info('Process %d, considering %d risk input(s) of weight %d',
os.getpid(), len(riskinputs),
sum(ri.weight for ri in riskinputs))
L = len(riskmodel.loss_types)
R = len(rlzs_assoc.rea
|
lizations)
result = calc.build_dict((L, R), general.Accu
|
mDict)
lt2idx = {lt: i for i, lt in enumerate(riskmodel.loss_types)}
for out_by_rlz in riskmodel.gen_outputs(
riskinputs, rlzs_assoc, monitor):
for out in out_by_rlz:
lti = lt2idx[out.loss_type]
stats = numpy.zeros((len(out.assets), 4), F64)
# this is ugly but using a composite array (i.e.
# stats['mean'], stats['stddev'], ...) may return
# bogus numbers! even with the SAME version of numpy,
# hdf5 and h5py!! the numbers are around 1E-300 and
# different on different systems; we found issues
# with Ubuntu 12.04 and Red Hat 7 (MS and DV)
stats[:, 0] = out.loss_matrix.mean(axis=1)
stats[:, 1] = out.loss_matrix.std(ddof=1, axis=1)
stats[:, 2] = out.insured_loss_matrix.mean(axis=1)
stats[:, 3] = out.insured_loss_matrix.std(ddof=1, axis=1)
avg = result[lti, out.hid]
for asset, stat in zip(out.assets, stats):
avg['avg', asset.idx] = stat
result[lti, out.hid]['agg', 0] = out.aggregate_losses
result[lti, out.hid]['agg', 1] = out.insured_losses
return result
@base.calculators.add('scenario_risk')
class ScenarioRiskCalculator(base.RiskCalculator):
"""
Run a scenario risk calculation
"""
core_func = scenario_risk
epsilon_matrix = datastore.persistent_attribute('epsilon_matrix')
pre_calculator = 'scenario'
is_stochastic = True
def pre_execute(self):
"""
Compute the GMFs, build the epsilons, the riskinputs, and a dictionary
with the unit of measure, used in the export phase.
"""
if 'gmfs' in self.oqparam.inputs:
self.pre_calculator = None
base.RiskCalculator.pre_execute(self)
logging.info('Building the epsilons')
eps_dict = self.make_eps_dict(
self.oqparam.number_of_ground_motion_fields)
self.epsilon_matrix = numpy.array(
[eps_dict[a['asset_ref']] for a in self.assetcol])
self.riskinputs = self.build_riskinputs(self.gmfs, eps_dict)
def post_execute(self, result):
"""
Compute stats for the aggregated distributions and save
the results on the datastore.
"""
with self.monitor('saving outputs', autoflush=True):
L = len(self.riskmodel.loss_types)
R = len(self.rlzs_assoc.realizations)
N = len(self.assetcol)
arr = dict(avg=numpy.zeros((N, L, R), stat_dt),
agg=numpy.zeros((L, R), stat_dt))
for (l, r), res in result.items():
for keytype, key in res:
if keytype == 'agg':
agg_losses = arr[keytype][l, r]
mean, std = scientific.mean_std(res[keytype, key])
if key == 0:
agg_losses['mean'] = mean
agg_losses['stddev'] = std
else:
agg_losses['mean_ins'] = mean
agg_losses['stddev_ins'] = std
else:
arr[keytype][key, l, r] = res[keytype, key]
self.datastore['avglosses'] = arr['avg']
self.datastore['agglosses'] = arr['agg']
|
kanzure/pyphantomjs
|
pyphantomjs/resources.py
|
Python
|
gpl-3.0
| 354,476
| 0.000014
|
# -*- coding: utf-8 -*-
# Resource object code
#
# Created: Fri Dec 2 16:03:48 2011
# by: The Resource Compiler for PyQt (Qt v4.7.3)
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore
qt_resource_data = "\
\x00\x00\x09\xbc\
\x2f\
\x2a\x6a\x73\x6c\x69\x6e\x74\x20\x73\x6c\x6f\x70\x70\x79\x3a\x20\
\x74\x72\x75\x65\x2c\x20\x6e\x6f\x6d\x65\x6e\x3a\x20\x74\x72\x75\
\x65\x20\x2a\x2f\x0a\x2f\x2a\x67\x6c\x6f\x62\x61\x6c\x20\x77\x69\
\x6e\x64\x6f\x77\x3a\x74\x72\x75\x65\x2c\x70\x68\x61\x6e\x74\x6f\
\x6d\x3a\x74\x72\x75\x65\x20\x2a\x2f\x0a\x0a\x2f\x2a\x0a\x20\x20\
\x54\x68\x69\x73\x20\x66\x69\x6c\x65\x20\x69\x73\x20\x70\x61\x72\
\x74\x20\x6f\x66\x20\x74\x68\x65\x20\x50\x68\x61\x6e\x74\x6f\x6d\
\x4a\x53\x20\x70\x72\x6f\x6a\x65\x63\x74\x20\x66\x72\x6f\x6d\x20\
\x4f\x66\x69\x20\x4c\x61\x62\x73\x2e\x0a\x0a\x20\x20\x43\x6f\x70\
\x79\x72\x69\x67\x68\x74\x20\x28\x43\x29\x20\x32\x30\x31\x31\x20\
\x41\x72\x69\x79\x61\x20\x48\x69\x64\x61\x79\x61\x74\x20\x3c\x61\
\x72\x69\x79\x61\x2e\x68\x69\x64\x61\x79\x61\x74\x40\x67\x6d\x61\
\x69\x6c\x2e\x63\x6f\x6d\x3e\x0a\x20\x20\x43\x6f\x70\x79\x72\x69\
\x67\x68\x74\x20\x28\x43\x29\x20\x32\x30\x31\x31\x20\x49\x76\x61\
\x6e\x20\x44\x65\x20\x4d\x61\x72\x69\x6e\x6f\x20\x3c\x69\x76\x61\
\x6e\x2e\x64\x65\x2e\x6d\x61\x72\x69\x6e\x6f\x40\x67\x6d\x61\x69\
\x6c\x2e\x63\x6f\x6d\x3e\x0a\x20\x20\x43\x6f\x70\x79\x72\x69\x67\
\x68\x74\x20\x28\x43\x29\x20\x32\x30\x31\x31\x20\x4a\x61\x6d\x65\
\x73\x20\x52\x6f\x65\x20\x3c\x72\x6f\x65\x6a\x61\x6d\x65\x73\x31\
\x32\x40\x68\x6f\x74\x6d\x61\x69\x6c\x2e\x63\x6f\x6d\x3e\x0a\x20\
\x20\x43\x6f\x70\x79\x72\x69\x67\x68\x74\x20\x28\x43\x29\x20\x32\
\x30\x31\x31\x20\x65\x78\x65\x63\x6a\x6f\x73\x68\x2c\x20\x68\x74\
\x74\x70\x3a\x2f\x2f\x65\x78\x65\x63\x6a\x6f\x73\x68\x2e\x62\x6c\
\x6f\x67\x73\x70\x6f\x74\x2e\x63\x6f\x6d\x0a\x0a\x20\x20\x52\x65\
\x64\x69\x73\x74\x72\x69\x62\x75\x74\x69\x6f\x6e\x20\x61\x6e\x64\
\x20\x75\x73\x65\x20\x69\x6e\x20\x73\x6f\x75\x72\x63\x65\x20\x61\
\x6e\x64\x20\x62\x69\x6e\x61\x72\x79\x20\x66\x6f\x72\x6d\x73\x2c\
\x20\x77\x69\x74\x68\x20\x6f\x72\x20\x77\x69\x74\x68\x6f\x75\x74\
\x0a\x20\x20\x6d\x6f\x64\x69\x66\x69\x63\x61\x74\x69\x6f\x6e\x2c\
\x20\x61\x72\x65\x20\x70\x65\x72\x6d\x69\x74\x74\x65\x64\x20\x70\
\x72\x6f\x76\x69\x64\x65\x64\x20\x74\x68\x61\x74\x20\x74\x68\x65\
\x20\x66\x6f\x6c\x6c\x6f\x77\x69\x6e\x67\x20\x63\x6f\x6e\x64\x69\
\x74\x69\x6f\x6e\x73\x20\x61\x72\x65\x20\x6d\x65\x74\x3a\x0a\x0a\
\x20\x20\x20\x20\x2a\x20\x52\x65\x64\x69\x73\x74\x72\x69\x62\x75\
\x74\x69\x6f\x6e\x73\x20\x6f\x66\x20\x73\x6f\x75\x72\x63\x65\x20\
\x63\x6f\x64\x65\x20\x6d\x75\x73\x74\x20\x72\x65\x74\x61\x69\x6e\
\x20\x74\x68\x65\x20\x61\x62\x6f\x76\x65\x20\x63\x6f\x70\x79\x72\
\x69\x67\x68\x74\x0a\x20\x20\x20\x20\x20\x20\x6e\x6f\x74\x69\x63\
\x65\x2c\x20\x74\x68\x69\x73\x20\x6c\x69\x73\x74\x20\x6f\x66\x20\
\x63\x6f\x6e\x64\x69\x74\x69\x6f\x6e\x73\x20\x61\x6e\x64\x20\x74\
\x68\x65\x20\x66\x6f\x6c\x6c\x6f\x77\x69\x6e\x67\x20\x64\x69\x73\
\x63\x6c\x61\x69\x6d\x65\x72\x2e\x0a\x20\x20\x20\x20\x2a\x20\x52\
\x65\x64\x69\x73\x74\x72\x69\x62\x75\x74\x69\x6f\x6e\x73\x20\x69\
\x6e\x20\x62\x69\x6e\x61\x72\x79\x20\x66\x6f\x72\x6d\x20\x6d\x75\
\x73\x74\x20\x72\x65\x70\x72\x6f\x64\x75\x63\x65\x20\x74\x68\x65\
\x20\x61\x62\x6f\x76\x65\x20\x63\x6f\x70\x79\x72\x69\x67\x68\x74\
\x0a\x20\x20\x20\x20\x20\x20\x6e\x6f\x74\x69\x63\x65\x2c\x20\x74\
\x68\x69\x73\x20\x6c\x69\x73\x74\x20\x6f\x66\x20\x63\x6f\x6e\x64\
\x69\x74\x69\x6f\x6e\x73\x20\x61\x6e\x64\x20\x74\x68\x65\x20\x66\
\x6f\x6c\x6c\x6f\x77\x69\x6e\x67\x20\x64\x69\x73\x63\x6c\x61\x69\
\x6d\x65\x72\x20\x69\x6e\x20\x74\x68\x65\x0a\x20\x20\x20\x20\x20\
\x20\x64\x6f\x63\x75\x6d\x65\x6e\x74\x61\x74\x69\x6f\x6e\x20\x61\
\x6e\x64\x2f\x6f\x72\x20\x6f\x74\x68\x65\x72\x20\x6d\x61\x74\x65\
\x72\x69\x61\x6c\x73\x20\x70\x72\x6f\x76\x69\x64\x65\x64\x20\x77\
\x69\x74\x68\x20\x74\x68\x65\x20\x64\x69\x73\x74\x72\x69\x62\x75\
\x74\x69\x6f\x6e\x2e\x0a\x20\x20\x20\x20\x2a\x20\x4e\x65\x69\x74\
\x68\x65\x72\x20\x74\x68\x65\x20\x6e\x61\x6d\x65\x20\x6f\x66\x20\
\x74\x68\x65\x20\x3c\x6f\x72\x67\x61\x6e\x69\x7a\x61\x74\x69\x6f\
\x6e\x3e\x20\x6e\x6f\x72\x20\x74\x68\x65\x0a\x20\x20\x20\x20\x20\
\x20\x6e\x61\x6d\x65\x73\x20\x6f\x66\x20\x69\x74\x73\x20\x63\x6f\
\x6e\x74\x72\x69\x62\x75\x74\x6f\x72\x73\x20\x6d\x61\x79\x20\x62\
\x65\x20\x75\x73\x65\x64\x20\x74\x6f\x20\x65\x6e\x64\x6f\x72\x73\
\x65\x20\x6f\x72\x20\x70\x72\x6f\x6d\x6f\x74\x65\x20\x70\x72\x6f\
\x64\x75\x63\x74\x73\x0a\x20\x20\x20\x20\x20\x20\x64\x65\x72\x69\
\x76\x65\x64\x20\x66\x72\x6f\x6d\x20\x74\x68\x69\x73\x20\x73\x6f\
\x66\x74\x77\x61\x72\x65\x20\x77\x69\x74\x68\x6f\x75\x74\x20\x73\
\x70\x65\x63\x69\x66\x69\x63\x20\x70\x72\x69\x6f\x72\x20\x77\x72\
\x69\x74\x74\x65\x6e\x20\x70\x65\x72\x6d\x69\x73\x73\x69\x6f\x6e\
\x2e\x0a\x0a\x20\x20\x54\x48\x49\x53\x20\x53\x4f\x46\x54\x57\x41\
\x52\x45\x20\x49\x53\x20\x50\x52\x4f\x56\x49\x44\x45\x44\x20\x42\
\x59\x20\x54\x48\x45\x20\x43\x4f\x50\x59\x52\x49\x47\x48\x54\x20\
\x48\x4f\x4c\x44\x45\x52\x53\x20\x41\x4e\x44\x20\x43\x4f\x4e\x54\
\x52\x49\x42\x55\x54\x4f\x52\x53\x20\x22\x41\x53\x20\x49\x53\x22\
\x0a\x20\x20\x41\x4e\x44\x20\x41\x4e\x59\x20\x45\x58\x50\x52\x45\
\x53\x53\x20\x4f\x52\x20\x49\x4d\x50\x4c\x49\x45\x44\x20\x57\x41\
\x52\x52\x41\x4e\x54\x49\x45\x53\x2c\x20\x49\x4e\x43\x4c\x55\x44\
\x49\x4e\x47\x2c\x20\x42\x55\x54\x20\x4e\x4f\x54\x20\x4c\x49\x4d\
\x49\x54\x45\x44\x20\x54\x4f\x2c\x20\x54\x48\x45\
|
x0a\x20\x20\x49\
\x4d\x50\x4c\x49\x45\x44\x20\x57\x41\x52\x52\x41\x4e\x54\x49\x45\
\x53\x20\x4f\x46\x20\x4d\
|
x45\x52\x43\x48\x41\x4e\x54\x41\x42\x49\
\x4c\x49\x54\x59\x20\x41\x4e\x44\x20\x46\x49\x54\x4e\x45\x53\x53\
\x20\x46\x4f\x52\x20\x41\x20\x50\x41\x52\x54\x49\x43\x55\x4c\x41\
\x52\x20\x50\x55\x52\x50\x4f\x53\x45\x0a\x20\x20\x41\x52\x45\x20\
\x44\x49\x53\x43\x4c\x41\x49\x4d\x45\x44\x2e\x20\x49\x4e\x20\x4e\
\x4f\x20\x45\x56\x45\x4e\x54\x20\x53\x48\x41\x4c\x4c\x20\x3c\x43\
\x4f\x50\x59\x52\x49\x47\x48\x54\x20\x48\x4f\x4c\x44\x45\x52\x3e\
\x20\x42\x45\x20\x4c\x49\x41\x42\x4c\x45\x20\x46\x4f\x52\x20\x41\
\x4e\x59\x0a\x20\x20\x44\x49\x52\x45\x43\x54\x2c\x20\x49\x4e\x44\
\x49\x52\x45\x43\x54\x2c\x20\x49\x4e\x43\x49\x44\x45\x4e\x54\x41\
\x4c\x2c\x20\x53\x50\x45\x43\x49\x41\x4c\x2c\x20\x45\x58\x45\x4d\
\x50\x4c\x41\x52\x59\x2c\x20\x4f\x52\x20\x43\x4f\x4e\x53\x45\x51\
\x55\x45\x4e\x54\x49\x41\x4c\x20\x44\x41\x4d\x41\x47\x45\x53\x0a\
\x20\x20\x28\x49\x4e\x43\x4c\x55\x44\x49\x4e\x47\x2c\x20\x42\x55\
\x54\x20\x4e\x4f\x54\x20\x4c\x49\x4d\x49\x54\x45\x44\x20\x54\x4f\
\x2c\x20\x50\x52\x4f\x43\x55\x52\x45\x4d\x45\x4e\x54\x20\x4f\x46\
\x20\x53\x55\x42\x53\x54\x49\x54\x55\x54\x45\x20\x47\x4f\x4f\x44\
\x53\x20\x4f\x52\x20\x53\x45\x52\x56\x49\x43\x45\x53\x3b\x0a\x20\
\x20\x4c\x4f\x53\x53\x20\x4f\x46\x20\x55\x53\x45\x2c\x20\x44\x41\
\x54\x41\x2c\x20\x4f\x52\x20\x50\x52\x4f\x46\x49\x54\x53\x3b\x20\
\x4f\x52\x20\x42\x55\x53\x49\x4e\x45\x53\x53\x20\x49\x4e\x54\x45\
\x52\x52\x55\x50\x54\x49\x4f\x4e\x29\x20\x48\x4f\x57\x45\x56\x45\
\x52\x20\x43\x41\x55\x53\x45\x44\x20\x41\x4e\x44\x0a\x20\x20\x4f\
\x4e\x20\x41\x4e\x59\x20\x54\x48\x45\x4f\x52\x59\x20\x4f\x46\x20\
\x4c\x49\x41\x42\x49\x4c\x49\x54\x59\x2c\x20\x57\x48\x45\x54\x48\
\x45\x52\x20\x49\x4e\x20\x43\x4f\x4e\x54\x52\x41\x43\x54\x2c\x20\
\x53\x54\x52\x49\x43\x54\x20\x4c\x49\x41\x42\x49\x4c\x49\x54\x59\
\x2c\x20\x4f\x52\x20\x54\x4f\x52\x54\x0a\x20\x20\x28\x49\x4e\x43\
\x4c\x55\x44\x49\x4e\x47\x20\x4e\x45\x47\x4c\x49\x47\x45\x4e\x43\
\x45\x20\x4f\x52\x20\x4f\x54\x48\x45\x52\x57\x49\x53\x45\x29\x20\
\x41\x52\x49\x53\x49\x4e\x47\x20\x49\x4e\x20\x41\x4e\x59\x20\x57\
\x41\x59\x20\x4f\x55\x54\x20\x4f\x46\x20\x54\x48\x45\x20\x55\x53\
\x45\x20\x4f\x46\x0a\x20\x20\x54\x48\x49\x53\x20\x53\x4f\x46\x54\
\x57\x41\x52\x45\x2c\x20\x45\x56\x45\x4e\x20\x49\x46\x20\x41\x44\
\x56\x49\x53\x45\x44\x20\x4f\x46\x20\x54\x48\x45\x20\x50\x4f\x53\
\x53\x49\x42\x49\x4c\x49\x54\x59\x20\x4f\x46\x20\x53\x55\x43\x48\
\x20\x44\x41\x4d\x41\x47\x45\x2e\x0a\x2a\x2f\x0a\x0a\x66\x75\x6e\
\x63\x74\x69\x6f\x6e\x20\x72\x65\x71\x75\x69\x72\x65\x28\x6e\x61\
\x6d\x65\x29\x20\x7b\x0a\x0a\x20\x20\x20\x20\x76\x61\x72\x20\x63\
\x6f\x64\x65\x2c\x20\x66\x75\x6e\x63\x2c\x20\x65\x78\x70\x6f\x72\
\x74\x73\x3b\x0a\x0a\x20\x20\x20\x20\x69\x66\x20\x28\x6e\
|
rybesh/pybtex
|
pybtex/database/output/bibtexml.py
|
Python
|
mit
| 3,696
| 0.001623
|
# Copyright (c) 2006, 2007, 2008, 2009, 2010, 2011 Andrey Golovizin
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
try:
import cElementTree as ET
except ImportError:
try:
from elementtree import ElementTree as ET
except ImportError:
from xml.etree import ElementTree as ET
from pybtex.core import Entry
from pybtex.database.output import BaseWriter
doctype = """<!DOCTYPE bibtex:file PUBLIC
"-//BibTeXML//DTD XML for BibTeX v1.0//EN"
"bibtexml.dtd" >
"""
class PrettyTreeBuilder(ET.TreeBuilder):
def __init__(self):
ET.TreeBuilder.__init__(self)
self.stack = []
def newline(self):
self.data('\n')
def indent_line(self):
self.data(' ' * len(self.stack) * 4)
def start(self, tag, attrs=None, newline=True):
if attrs is None:
attrs = {}
self.indent_line()
self.stack.append(tag)
ET.TreeBuilder.start(self, tag, attrs)
if newline:
self.newline()
def end(self, indent=True):
tag = self.stack.pop()
if indent:
self.indent_lin
|
e()
ET.TreeBuilder.end(self, tag)
self.newline()
def element(self, tag, data):
self.start(tag, newline=False)
self.data(data)
self.end(indent=False)
class Writer(BaseWriter):
"""Outputs BibTeXML markup"""
name = 'bibtexml'
suffixes = '.xml', '.bibtexml'
def write_stream(self, bib_
|
data, stream):
def write_persons(persons, role):
if persons:
w.start('bibtex:' + role)
for person in persons:
w.start('bibtex:person')
for type in ('first', 'middle', 'prelast', 'last', 'lineage'):
name = person.get_part_as_text(type)
if name:
w.element('bibtex:' + type, name)
w.end()
w.end()
w = PrettyTreeBuilder()
bibtex_file = w.start('bibtex:file', {'xmlns:bibtex': 'http://bibtexml.sf.net/'})
w.newline()
for key, entry in bib_data.entries.iteritems():
w.start('bibtex:entry', dict(id=key))
w.start('bibtex:' + entry.type)
for field_name, field_value in entry.fields.iteritems():
w.element('bibtex:' + field_name, field_value)
for role, persons in entry.persons.iteritems():
write_persons(persons, role)
w.end()
w.end()
w.newline()
w.end()
tree = ET.ElementTree(w.close())
tree.write(stream, self.encoding)
stream.write(b'\n')
|
Thraxis/pymedusa
|
sickbeard/blackandwhitelist.py
|
Python
|
gpl-3.0
| 6,272
| 0.001435
|
# coding=utf-8
# Author: Dennis Lutter <lad1337@gmail.com>
#
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from adba.aniDBerrors import AniDBCommandTimeoutError
import sickbeard
from sickbeard import db, logger, helpers
class BlackAndWhiteList(object):
blacklist = []
whitelist = []
def __init__(self, show_id):
if not show_id:
raise BlackWhitelistNoShowIDException()
self.show_id = show_id
self.load()
def load(self):
"""
Builds black and whitelist
"""
logger.log('Building black and white list for {id}'.format
(id=self.show_id), logger.DEBUG)
self.blacklist = self._load_list(b'blacklist')
self.whitelist = self._load_list(b'whitelist')
def _add_keywords(self, table, values):
"""
DB: Adds keywords into database for current show
:param table: SQL table to add keywords to
:param values: Values to be inserted in table
"""
main_db_con = db.DBConnection()
for value in values:
main_db_con.action(
b'INSERT INTO [{table}] (show_id, keyword) '
b'VALUES (?,?)'.format(table=table),
[self.show_id, value]
)
def set_black_keywords(self, values):
"""
Sets blacklist to new value
:param values: Complete list of keywords to be set as blacklist
"""
self._del_all_keywords(b'blacklist')
self._add_keywords(b'blacklist', values)
self.blacklist = values
logger.log('Blacklist set to: {blacklist}'.format
(blacklist=self.blacklist), logger.DEBUG)
def set_white_keywords(self, values):
"""
Sets whitelist to new value
:param values: Complete list of keywords to be set as whitelist
"""
self._del_all_keywords(b'whitelist')
self._add_keywords(b'whitelist', values)
self.whitelist = values
logger.log('Whitelist set to: {whitelist}'.format(whitelist=self.whitelist), logger.DEBUG)
def _del_all_keywords(self, table):
"""
DB: Remove all keywords for current show
:param table: SQL table remove keywords from
"""
main_db_con = db.DBConnection()
main_db_con.action(
b'DELETE FROM [{table}] '
b'WHERE show_id = ?'.format(table=table),
[self.show_id]
)
def _load_list(self, table):
"""
DB: Fetch keywords for current show
:param table: Table to fetch list of keywords from
:return: keywords in list
"""
main_db_con = db.DBConnection()
sql_results = main_db_con.select(
b'SELECT keyword '
b'FROM [{table}] '
b'WHERE show_id = ?'.format(table=table),
[self.show_id]
)
groups = [result[b'keyword']
for result in sql_results
] if sql_results else []
if groups:
logger.log('BWL: {id} loaded keywords from {table}: {groups}'.format
(id=self.show_id, table=table, groups=groups), logger.DEBUG)
return groups
def is_valid(self, result):
"""
Check if result is valid according to white/blacklist for current show
:param result: Result to analyse
:return: False if result is not allowed in white/blacklist, True if it is
"""
if not (self.whitelist or self.blacklist):
logger.log(u'No Whitelist and Blacklist defined, check passed.', logger.DEBUG)
return True
elif not result.release_group:
logger.log('Invalid result, no release group detected', logger.DEBUG)
return False
whitelist = [x.lower() for x in self.whitelist]
white_result = result.release_group.lower() in whitelist if self.whitelist else True
blacklist = [x.lower() for x in self.blacklist]
black_result = result.release_group.lower() not in blacklist if self.blacklist else True
logger.log('Whitelist check: {white}. Blacklist check: {black}'.format
(white='Passed' if white_result else 'Failed',
black='Passed' if black_result else 'Failed'), logger.DEBUG)
return white_result and black_result
class BlackWhitelistNoShowIDException(Exception):
"""No show_id was given"""
def short_group_names(groups):
"""
Find AniDB short group names for release groups
:param groups: list of groups to find short group names for
:return: list of shortened group names
"""
groups = groups.split(',')
short_group_list = []
if helpers.set_up_anidb_connection():
for group_name in groups:
try:
group = sickbeard.ADBA_CONNECTION.group(gname=group_name)
except AniDBCommandTimeoutError:
logger.
|
log('Timeout while loading group from AniDB. '
'Trying next group', logger.DEBUG)
except Exception:
logger.log('Faile
|
d while loading group from AniDB. '
'Trying next group', logger.DEBUG)
else:
for line in group.datalines:
if line[b'shortname']:
short_group_list.append(line[b'shortname'])
else:
if group_name not in short_group_list:
short_group_list.append(group_name)
else:
short_group_list = groups
return short_group_list
|
asraf209/leetcode
|
src/3SumClosest/main.py
|
Python
|
gpl-3.0
| 2,156
| 0.005102
|
# Given an array S of n integers, are there elements a, b, c in S such that a + b + c = 0?
# Find all unique triplets in the array which gives the sum of zero.
# Elements in a triplet (a,b,c) must be in non-descending order. (ie, a <= b <= c)
# The solution set must not contain duplicate triplets.
# Solution:
# This problem is the extension of the problem below:
# Given a set S of n integers, find all pairs of integers of a and b in S
# such that a + b = k?
# The above problem can be solved in O(n) time, assuming that the set S is already sorted. By
# using two index first and last, each pointing to the first and last element, we look at the
# element pointed by first, which we call A. We know that we need to find B = k - A, the
# complement of A. If the element pointed by last is less than B, we know that the choice is
# to increment pointer first by one step. Similarly, if the element pointed by last is greater
# than B, we decrement pointer last by one step. We are progressively refining the sum step by
# step. Since each step we move a pointer one step, there are at most n steps, which gives the
# complexity of O(n).
# By incorporating the solution above, we can solve the 3sum problem in O(n^2) time, which is
# a straight forward extension.
'''
Created on 2013-5-19
@author: Yubin Bai
'''
class Solution:
# @return an integer
def threeSumClosest(self, num, target):
num.sort()
size =
|
len(num)
result = [1 << 33, -1, -1, -1] # a large number
for first in range(size - 2):
left = first + 1
right = size - 1
while left < right:
curr = num[first] + num[left] + num[right]
distance = abs(curr - target)
if distance < result[0]:
|
result = [distance, num[first], num[left], num[right]]
if curr < target:
left += 1
else:
right -= 1
return result[1] + result[2] + result[3]
# if __name__ == '__main__':
# data = [0,0,0]
# target = 1
# s = Solution()
# print(s.threeSumClosest(data, target))
|
djanowski/pygmentize
|
vendor/pygments/tests/test_util.py
|
Python
|
mit
| 5,303
| 0.001131
|
# -*- coding: utf-8 -*-
"""
Test suite for the util module
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import unittest
from pygments import util
class FakeLexer(object):
def analyse(text):
return float(text)
analyse = util.make_analysator(analyse)
class UtilTest(unittest.TestCase):
def test_getoptions(self):
raises = self.assertRaises
equals = self.assertEqual
equals(util.get_bool_opt({}, 'a', True), True)
equals(util.get_bool_opt({}, 'a', 1), True)
equals(util.get_bool_o
|
pt({}, 'a', 'true'), True)
equals(util.get_bool_opt({}, 'a', 'no'), False)
raises(util.OptionError, util.get_bool_opt, {}, 'a', [])
raises(util.OptionError, util.get_bool_opt, {}, 'a', 'foo')
equals(util.get_int_opt({}, 'a', 1), 1)
raises(util.OptionError, util.get_int_opt, {}, 'a', [])
raises(util.OptionError,
|
util.get_int_opt, {}, 'a', 'bar')
equals(util.get_list_opt({}, 'a', [1]), [1])
equals(util.get_list_opt({}, 'a', '1 2'), ['1', '2'])
raises(util.OptionError, util.get_list_opt, {}, 'a', 1)
def test_docstring_headline(self):
def f1():
"""
docstring headline
other text
"""
def f2():
"""
docstring
headline
other text
"""
self.assertEqual(util.docstring_headline(f1), "docstring headline")
self.assertEqual(util.docstring_headline(f2), "docstring headline")
def test_analysator_returns_float(self):
# If an analysator wrapped by make_analysator returns a floating point
# number, then that number will be returned by the wrapper.
self.assertEqual(FakeLexer.analyse('0.5'), 0.5)
def test_analysator_returns_boolean(self):
# If an analysator wrapped by make_analysator returns a boolean value,
# then the wrapper will return 1.0 if the boolean was True or 0.0 if
# it was False.
self.assertEqual(FakeLexer.analyse(True), 1.0)
self.assertEqual(FakeLexer.analyse(False), 0.0)
def test_analysator_raises_exception(self):
# If an analysator wrapped by make_analysator raises an exception,
# then the wrapper will return 0.0.
class ErrorLexer(object):
def analyse(text):
raise RuntimeError('something bad happened')
analyse = util.make_analysator(analyse)
self.assertEqual(ErrorLexer.analyse(''), 0.0)
def test_analysator_value_error(self):
# When converting the analysator's return value to a float a
# ValueError may occur. If that happens 0.0 is returned instead.
self.assertEqual(FakeLexer.analyse('bad input'), 0.0)
def test_analysator_type_error(self):
# When converting the analysator's return value to a float a
# TypeError may occur. If that happens 0.0 is returned instead.
self.assertEqual(FakeLexer.analyse(None), 0.0)
def test_shebang_matches(self):
self.assertTrue(util.shebang_matches('#!/usr/bin/env python', r'python(2\.\d)?'))
self.assertTrue(util.shebang_matches('#!/usr/bin/python2.4', r'python(2\.\d)?'))
self.assertTrue(util.shebang_matches('#!/usr/bin/startsomethingwith python',
r'python(2\.\d)?'))
self.assertTrue(util.shebang_matches('#!C:\\Python2.4\\Python.exe',
r'python(2\.\d)?'))
self.assertFalse(util.shebang_matches('#!/usr/bin/python-ruby',
r'python(2\.\d)?'))
self.assertFalse(util.shebang_matches('#!/usr/bin/python/ruby',
r'python(2\.\d)?'))
self.assertFalse(util.shebang_matches('#!', r'python'))
def test_doctype_matches(self):
self.assertTrue(util.doctype_matches(
'<!DOCTYPE html PUBLIC "a"> <html>', 'html.*'))
self.assertFalse(util.doctype_matches(
'<?xml ?> <DOCTYPE html PUBLIC "a"> <html>', 'html.*'))
self.assertTrue(util.html_doctype_matches(
'<?xml ?><!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN">'))
def test_xml(self):
self.assertTrue(util.looks_like_xml(
'<?xml ?><!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN">'))
self.assertTrue(util.looks_like_xml('<html xmlns>abc</html>'))
self.assertFalse(util.looks_like_xml('<html>'))
def test_unirange(self):
first_non_bmp = u'\U00010000'
r = re.compile(util.unirange(0x10000, 0x20000))
m = r.match(first_non_bmp)
self.assertTrue(m)
self.assertEquals(m.end(), len(first_non_bmp))
self.assertFalse(r.match(u'\uffff'))
self.assertFalse(r.match(u'xxx'))
# Tests that end is inclusive
r = re.compile(util.unirange(0x10000, 0x10000) + '+')
# Tests that the plus works for the entire unicode point, if narrow
# build
m = r.match(first_non_bmp * 2)
self.assertTrue(m)
self.assertEquals(m.end(), len(first_non_bmp) * 2)
|
iproduct/course-social-robotics
|
09-image-recognition-opencv-dnn/cv2_haar_cascade_demo.py
|
Python
|
gpl-2.0
| 1,513
| 0.001983
|
# import numpy as np
import cv2 as cv
if __name__ == "__main__":
faceCascade = c
|
v.CascadeClassifier(cv.data.haarcascades + 'haarcascade_frontalface_default.xml')
eyeCascade = cv.CascadeClassifier(cv.data.haarcascades + 'haarcascade_eye.xml')
# img = cv.imread('sachin.jpg')
video_capture = cv.VideoCapture(0)
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
|
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv.CASCADE_SCALE_IMAGE
)
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
roi_gray = gray[y:y + h, x:x + w]
roi_color = frame[y:y + h, x:x + w]
eyes = eyeCascade.detectMultiScale(
roi_gray,
scaleFactor=1.1,
minNeighbors=5,
maxSize=(50, 40),
flags=cv.CASCADE_SCALE_IMAGE)
for (ex, ey, ew, eh) in eyes:
cv.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh), (255, 0, 0), 2)
# Display the resulting frame
cv.imshow('Video', frame)
if cv.waitKey(1) & 0xFF == ord('q'):
break
# When everything is done, release the capture
video_capture.release()
cv.destroyAllWindows()
|
simpeg/discretize
|
tutorials/inner_products/4_advanced.py
|
Python
|
mit
| 6,674
| 0.002248
|
"""
Advanced Examples
=================
In this section, we demonstrate how to go from the inner product to the
discrete approximation for some special cases. We also show how all
necessary operators are constructed for each case.
"""
####################################################
#
# Import Packages
# ---------------
#
# Here we import the packages required for this tutorial
#
from discretize.utils import sdiag
from discretize import TensorMesh
import numpy as np
import matplotlib.pyplot as plt
#####################################################
# Constitive Relations and Differential Operators
# -----------------------------------------------
#
# Where :math:`\psi` and :math:`\phi` are scalar quantities,
# :math:`\vec{u}` and :math:`\vec{v}` are vector quantities, and
# :math:`\sigma` defines a constitutive relationship, we may need to derive
# discrete approximations for the following inner products:
#
# 1. :math:`(\vec{u} , \sigma \nabla \phi)`
# 2. :math:`(\psi , \sigma \nabla \cdot \vec{v})`
# 3. :math:`(\vec{u} , \sigma \nabla \times \vec{v})`
#
# These cases effectively combine what was learned in the previous two
# tutorials. For each case, we must:
#
# - Define discretized quantities at the appropriate mesh locations
# - Define an inner product matrix that depends on a single constitutive parameter (:math:`\sigma`) or a tensor (:math:`\Sigma`)
# - Construct differential operators that may require you to define boundary conditions
#
# Where :math:`\mathbf{M_e}(\sigma)` is the property dependent inner-product
# matrix for quantities on cell edges, :math:`\mathbf{M_f}(\sigma)` is the
# property dependent inner-product matrix for quantities on cell faces,
# :math:`\mathbf{G_{ne}}` is the nodes to edges gradient operator and
# :math:`\mathbf{G_{cf}}` is the centers to faces gradient operator:
#
# .. math::
# (\vec{u} , \sigma \nabla \phi) &= \mathbf{u_f^T M_f}(\sigma) \mathbf{ G_{cf} \, \phi_c} \;\;\;\;\; (\vec{u} \;\textrm{on faces and} \; \phi \; \textrm{at centers}) \\
# &= \mathbf{u_e^T M_e}(\sigma) \mathbf{ G_{ne} \, \phi_n} \;\;\;\; (\vec{u} \;\textrm{on edges and} \; \phi \; \textrm{on nodes})
#
# Where :math:`\mathbf{M_c}(\sigma)` is the property dependent inner-product
# matrix for quantities at cell centers and :math:`\mathbf{D}` is the faces
# to centers divergence operator:
#
# .. math::
# (\psi ,
|
\sigma \nabla \cdot \vec{v}) = \mathbf{\psi_c^T M_c} (\sigma)\mathbf{ D v_f} \;\;\;\; (\psi \;\textrm{at centers and} \; \vec{v} \; \textrm{on faces} )
#
# Where :math:`\mathbf{C_{ef}}` is the edges to faces curl operator and
# :math:`\mathbf{C_{fe}}` is the faces to edges curl operator:
#
# .. math::
# (\vec{u} , \sigma \nabla \times \vec{v}) &= \mathbf{u_f^T M_f} (\sigma) \mathbf{ C_{ef} \, v_e} \;\;\;\; (\vec{u}
|
\;\textrm{on edges and} \; \vec{v} \; \textrm{on faces} )\\
# &= \mathbf{u_e^T M_e} (\sigma) \mathbf{ C_{fe} \, v_f} \;\;\;\; (\vec{u} \;\textrm{on faces and} \; \vec{v} \; \textrm{on edges} )
#
# **With the operators constructed below, you can compute all of the
# aforementioned inner products.**
#
# Make basic mesh
h = np.ones(10)
mesh = TensorMesh([h, h, h])
sig = np.random.rand(mesh.nC) # isotropic
Sig = np.random.rand(mesh.nC, 6) # anisotropic
# Inner product matricies
Mc = sdiag(mesh.vol * sig) # Inner product matrix (centers)
# Mn = mesh.getNodalInnerProduct(sig) # Inner product matrix (nodes) (*functionality pending*)
Me = mesh.getEdgeInnerProduct(sig) # Inner product matrix (edges)
Mf = mesh.getFaceInnerProduct(sig) # Inner product matrix for tensor (faces)
# Differential operators
Gne = mesh.nodalGrad # Nodes to edges gradient
mesh.setCellGradBC(["neumann", "dirichlet", "neumann"]) # Set boundary conditions
Gcf = mesh.cellGrad # Cells to faces gradient
D = mesh.faceDiv # Faces to centers divergence
Cef = mesh.edgeCurl # Edges to faces curl
Cfe = mesh.edgeCurl.T # Faces to edges curl
# EXAMPLE: (u, sig*Curl*v)
fig = plt.figure(figsize=(9, 5))
ax1 = fig.add_subplot(121)
ax1.spy(Mf * Cef, markersize=0.5)
ax1.set_title("Me(sig)*Cef (Isotropic)", pad=10)
Mf_tensor = mesh.getFaceInnerProduct(Sig) # inner product matrix for tensor
ax2 = fig.add_subplot(122)
ax2.spy(Mf_tensor * Cef, markersize=0.5)
ax2.set_title("Me(sig)*Cef (Anisotropic)", pad=10)
#####################################################
# Divergence of a Scalar and a Vector Field
# -----------------------------------------
#
# Where :math:`\psi` and :math:`\phi` are scalar quantities, and
# :math:`\vec{u}` is a known vector field, we may need to derive
# a discrete approximation for the following inner product:
#
# .. math::
# (\psi , \nabla \cdot \phi \vec{u})
#
# Scalar and vector quantities are generally discretized to lie on
# different locations on the mesh. As result, it is better to use the
# identity :math:`\nabla \cdot \phi \vec{u} = \phi \nabla \cdot \vec{u} + \vec{u} \cdot \nabla \phi`
# and separate the inner product into two parts:
#
# .. math::
# (\psi , \phi \nabla \cdot \vec{u} ) + (\psi , \vec{u} \cdot \nabla \phi)
#
# **Term 1:**
#
# If the vector field :math:`\vec{u}` is divergence free, there is no need
# to evaluate the first inner product term. This is the case for advection when
# the fluid is incompressible.
#
# Where :math:`\mathbf{D_{fc}}` is the faces to centers divergence operator, and
# :math:`\mathbf{M_c}` is the basic inner product matrix for cell centered
# quantities, we can approximate this inner product as:
#
# .. math::
# (\psi , \phi \nabla \cdot \vec{u} ) = \mathbf{\psi_c^T M_c} \textrm{diag} (\mathbf{D_{fc} u_f} ) \, \mathbf{\phi_c}
#
# **Term 2:**
#
# Let :math:`\mathbf{G_{cf}}` be the cell centers to faces gradient operator,
# :math:`\mathbf{M_c}` be the basic inner product matrix for cell centered
# quantities, and :math:`\mathbf{\tilde{A}_{fc}}` and averages *and* sums the
# cartesian contributions of :math:`\vec{u} \cdot \nabla \phi`, we can
# approximate the inner product as:
#
# .. math::
# (\psi , \vec{u} \cdot \nabla \phi) = \mathbf{\psi_c^T M_c \tilde A_{fc}} \text{diag} (\mathbf{u_f} ) \mathbf{G_{cf} \, \phi_c}
#
# **With the operators constructed below, you can compute all of the
# inner products.**
# Make basic mesh
h = np.ones(10)
mesh = TensorMesh([h, h, h])
# Inner product matricies
Mc = sdiag(mesh.vol * sig) # Inner product matrix (centers)
# Differential operators
mesh.setCellGradBC(["neumann", "dirichlet", "neumann"]) # Set boundary conditions
Gcf = mesh.cellGrad # Cells to faces gradient
Dfc = mesh.faceDiv # Faces to centers divergence
# Averaging and summing matrix
Afc = mesh.dim * mesh.aveF2CC
|
developerQuinnZ/this_will_work
|
student-work/JacobJanak/class_lessons/zoo.py
|
Python
|
mit
| 1,060
| 0.004717
|
import random
class Zoo:
noOfAnimals = 0
noOfZoos = 0
def __init__(self, animals, ticketPrice):
Zoo.noOfAnimals += len(animals)
Zoo.noOfZoos += 1
self.ticketPrice = ticketPrice
self.animals = animals
def __iter__(self):
for animal in self.animals:
yield animal
def __str__(self):
return " ".join(self.animals)
def randomMate(self):
num = random.randrange(0, len(self.animals))
num2 = random.randrange(0, len(self.
|
animals))
self.mate(self.animals[num], self.animals[num2])
def mate(self, male, female):
newAnimal = male[0:2] + female[2:]
self.animals.append(newAnimal)
Zoo.noOfAnimals += 1
print(newAnimal)
if __name__ == '__main__':
pdxAnimals = ['bat', 'lion', 'goats', 'elephant']
pdxZoo = Zoo(pdxAnimals, 20)
vanAnimals = pdxAnimals + ['chimp', 'snake', 'beaver']
vanZoo =
|
Zoo(vanAnimals, 25)
vanZoo.randomMate()
print(pdxZoo.noOfAnimals)
print(Zoo.noOfAnimals)
|
desirable-objects/hotwire-shell
|
hotwire/builtins/pyeval.py
|
Python
|
gpl-2.0
| 4,083
| 0.008327
|
# This file is part of the Hotwire Shell project API.
# Copyright (C) 2007 Colin Walters <walters@verbum.org>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE X CONSORTIUM BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR
# THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os,sys,re,subprocess,sha,tempfile
import symbol,parser,code,threading
from hotwire.builtin import builtin_hotwire, InputStreamSchema, OutputStreamSchema
from hotwire.fs import path_join
from hotwire.sysdep.fs import Filesystem
from hotwire.externals.rewrite import rewrite_
|
and_compile
@builtin_hotwire(singlevalue=True,
input=InputStreamSchema('any', optional=True),
output=OutputStreamSchema('any'),
options=[['-f', '--file']])
def py_eval(context, *args):
_("""Compile and execute Python expression.
Iterable return values (define __iter__) are expanded. Other values are
expressed as an iterable which yielded a single object.""")
if len(args) < 1:
raise Valu
|
eError(_("Too few arguments specified"))
locals = {'hot_context': context}
if context.current_output_metadata and context.current_output_metadata.type is not None:
if context.current_output_metadata.single:
try:
locals['it'] = context.snapshot_current_output()
except ValueError, e:
locals['it'] = None
else:
locals['current'] = lambda: context.snapshot_current_output()
locals['selected'] = lambda: context.snapshot_current_selected_output(selected=True)
last_value = None
if '-f' in context.options:
fpath = path_join(context.cwd, args[0])
# Do we assume locale encoding or UTF-8 here?
# We probably need to scan for a -*- coding -*-
f = open(fpath)
compiled = compile(f.read(), fpath, 'exec')
f.close()
exec compiled in locals
try:
mainfunc = locals['main']
except KeyError, e:
return None
if not hasattr(mainfunc, '__call__'):
return None
return mainfunc(*(args[1:]))
else:
if len(args) > 1:
raise ValueError(_("Too many arguments specified"))
# We want to actually get the object that results from a user typing
# input such as "20" or "import os; os". The CPython interpreter
# has some deep hackery inside which transforms "single" input styles
# into "print <input>". That's not suitable for us, because we don't
# want to spew objects onto stdout; we want to actually get the object
# itself. Thus we use some code from Reinteract to rewrite the
# Python AST to call custom functions.
# Yes, it's lame.
def handle_output(myself, *args):
myself['result'] = args[-1]
locals['_hotwire_handle_output'] = handle_output
locals['_hotwire_handle_output_self'] = {'result': None}
(compiled, mutated) = rewrite_and_compile(args[0], output_func_name='_hotwire_handle_output', output_func_self='_hotwire_handle_output_self')
exec compiled in locals
return locals['_hotwire_handle_output_self']['result']
|
kmaehashi/jubamgr
|
controller/lib/jubamgr/controller/config.py
|
Python
|
lgpl-2.1
| 1,500
| 0.009333
|
# -*- coding: utf-8 -*-
import json
from .entity import *
class JubaManagerConfig(object):
def __init__(self):
self._global_zookeeper = ''
self._visors = []
self._clusters = []
self._servers = []
self._proxies = []
@classmethod
def from_json(cls, data):
# TODO handle errors
cfg = json.loads(data)
obj = cls()
# TODO assert values of config file
obj._global_zookeeper = cfg['global']['zookeeper']
obj._visors = map(lambda x: JubaVisor.create(x), cfg['visors'])
obj._clusters = map(lambda x: JubaCluster.create(x), cfg['clusters'])
obj._servers = map(lambda x: JubaServer.create(x), cfg['servers'])
obj._proxies = map(lambda x: JubaProxy
|
.create(x), cfg['proxies'])
return obj
def lookup(self, process_type, query_id):
if process_type == 'server':
return filter(lambda x: x.get_id() == query_id, self._servers)[0]
elif proce
|
ss_type == 'proxy':
return filter(lambda x: x.get_id() == query_id, self._proxies)[0]
elif process_type == 'visor':
return filter(lambda x: x.get_id() == query_id, self._visors)[0]
elif process_type == 'cluster':
return filter(lambda x: x.get_id() == query_id, self._clusters)[0]
def get_all(self, process_type):
if process_type == 'server':
return self._servers
elif process_type == 'proxy':
return self._proxies
elif process_type == 'visor':
return self._visors
elif process_type == 'cluster':
return self._clusters
|
trhongbinwang/data_science_journey
|
deep_learning/keras/keras_as_tf_api.py
|
Python
|
apache-2.0
| 2,218
| 0.006763
|
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 17 10:48:09 2017
keras as tf api
always use as an tf api
In this case, we use Keras only as a syntactical shortcut to generate an op
that maps some tensor(s) input to some tensor(s) output, and that's it.
@author: hongbin
"""
import tensorflow as tf
from keras import backend as K
from keras.layers import Dense
from keras.objectives import categorical_crossentropy
from tensorflow.examples.tutorials.mnist import input_data
from keras.metrics import categorical_accuracy as accuracy # iy just a function
# initialiaze tf session
sess = tf.Session()
K.set_session(sess) # no need. directly use with tf.Session() as sess:
# this placeholder will contain our input digits, as flat vectors
img = tf.placeholder(tf.float32, shape=(None, 784))
labels = tf.placeholder(tf.float32, shape=(None, 10))
# ----- use Keras layers to speed up the model definition process!!!
# Keras layers can be called on TensorFlow tensors:
x = Dense(128, activation='relu')(img) # fully-connected layer with 128 units and ReLU activation
x = Dense(128, activation='relu')(x)
preds = Dense(10, activation='softmax')(x) # output layer with 10 units and a softmax activation
loss = tf.reduce_mean(categorical_crossentropy(labels, preds))
# load data
mnist_data = input_data.read_data_sets('MNIST_data', one_hot=True)
# train
train_op = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
with sess.as_default():
for i in range(100):
batch = mnist_data.train.next_batch(50)
train_op.run(feed_dict={img: batch[0],
labels: batch[1]})
# evaluate
acc_value = accuracy(labels, preds)
with sess.as_default():
print( acc_value.eval(feed_dict={img: mnist_data.test.images,
labels: mnist_data.test.labels}))
#Calling a Keras model on a TensorFlow tensor
|
#
#A Keras model acts the same as a layer, and thus can be called on TensorFlow tensors:
from keras.models import Sequential
m
|
odel = Sequential()
model.add(Dense(32, activation='relu', input_dim=784))
model.add(Dense(10, activation='softmax'))
# this works!
x = tf.placeholder(tf.float32, shape=(None, 784))
y = model(x)
|
gcmalloc/screen-calendar
|
screen.py
|
Python
|
gpl-3.0
| 5,346
| 0.001122
|
#!/usr/bin/env python2
# -*- coding: utf8 -*-
# from __future__ import unicode_literals
'''
This file is part of FIXME Screen Calendar.
FIXME Events is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
FIXME Events is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with FIXME Events. If not, see <http://www.gnu.org/licenses/>.
'''
from flask import Flask, render_template, request, Response, redirect, session
import requests
import icalendar
import arrow
import random
import sys
import types
import config as cfg
app = Flask(__name__)
if cfg.secret_key == '':
print('configure secret_key!')
sys.exit(0)
app.debug = True # FIXME: remove on production
app.secret_key = cfg.secret_key
#
# Functions
#
def get_recurrences(cal_name, dt_start, dt_end, evt):
rrule = evt.get('rrule')
byday = rrule.get('byday')
freq = rrule.get('freq')[0]
until = rrule.get('until')
if until is not None:
rec_end = arrow.get(until[0])
else:
rec_end = arrow.get().replace(months=+1)
events = []
while dt_start < rec_end:
events.append(get_event(cal_name, dt_start, dt_end, evt))
# Increment date
if freq == 'WEEKLY':
dt_start = dt_start.replace(days=+7)
dt_end = dt_end.replace(days=+7)
elif freq == 'MONTHLY':
dt_start = dt_start.replace(months=+1)
dt_end = dt_end.replace(months=+1)
elif freq == 'YEARLY':
dt_start = dt_start.replace(years=+1)
dt_end = dt_end.replace(years=+1)
else:
dt_start = dt_start.replace(days=+1)
dt_end = dt_end.replace(days=+1)
return events
def get_event(cal_name, dt_start, dt_end, evt):
summary = evt.get('summary')
location = evt.get('location')
description = evt.get('description')
return {
'cal': cal_name,
'name': summary,
'dow': dt_start.datetime.strftime('%A'),
's_day': dt_start.format('DD'),
's_month': dt_start.format('MMM'),
's_month_num': dt_start.format('MM'),
's_year': dt_start.format('YYYY'),
's_time': dt_start.format('HH:mm'),
'e_day': dt_end.format('DD'),
'e_month': dt_end.format('MMM'),
'e_year': dt_end.format('YYYY'),
'e_time': dt_end.format('HH:mm'),
'timestamp': dt_start.timestamp,
'location': location,
'description': description,
}
def get_calendar(url):
name = ''
events = []
cal_data = requests.get(url, headers={'User-Agent': cfg.user_agent}).content
try:
cal_obj = icalendar.Calendar.from_ical(cal_data)
cal_name = cal_obj.get('x-wr-calname')
recent = arrow.now().floor('day').replace(days=-10) # FIXME: must be yesterday ?
for e in cal_obj.walk():
if e.name != 'VEVENT':
continue
# Get dates
try:
dt_start = arrow.get(e.get('dtstart').dt)
dt_end = arrow.get(e.get('dtend').dt)
except TypeError as f:
dt_start = arrow.Arrow.fromdate(e.get('dtstart').dt)
dt_end = arrow.Arrow.fromdate(e.get('dtend').dt)
# Only future or recent events
if dt_start < recent:
continue
# Create and add event
if isinstance(e.get(
|
'rrule'), types.NoneType):
evt = get_event(cal_name, dt_start, dt_end, e)
if evt not in events:
events.append(evt)
else:
for f in get_recurrences(cal_name, dt_start, dt_end, e):
if f not in events:
events.append(f)
except IOError as e:
events.append({'name': e, 'cal': cal_name})
return events
#
# PAGES
#
@app.route('/')
def home():
# session['user
|
name'] = random.getrandbits(32)
# Get all events
events = []
for cal in cfg.calendars:
events += get_calendar(cal)
events = sorted(events, key=lambda i: i['timestamp'])
# Split according to date range
today_start = arrow.get().replace(hour=0, minute=0, second=0)
today_end = today_start.replace(days=+1)
week_end = today_start.replace(days=+7)
past_events = [x for x in events if x['timestamp'] <= today_start.timestamp]
today_events = [x for x in events if x['timestamp'] > today_start.timestamp and x['timestamp'] <= today_end.timestamp]
week_events = [x for x in events if x['timestamp'] > today_end.timestamp and x['timestamp'] <= week_end.timestamp]
future_events = [x for x in events if x['timestamp'] > week_end.timestamp]
return render_template('list.html', data={
'all_events': events,
'past_events': past_events,
'today_events': today_events,
'week_events': week_events,
'future_events': future_events,
})
#
# MAIN
#
if __name__ == '__main__':
app.run()
|
ctrl-alt-d/fpuf
|
fpuf/utils/urls.py
|
Python
|
gpl-3.0
| 381
| 0.041995
|
from django.conf.urls import patterns,
|
url
urlpatterns = patterns('fpuf.utils.views',
url(r'^about/$', 'about', name ="varis_about_about" ) ,
url(r'^missatges/$', 'about', name ="varis_missatges_veure" ) ,
url(r'^condicions/$', 'condicions'
|
, name ="varis_condicions_condicions" ) ,
)
|
amtriathlon/GoldenCheetah
|
src/Resources/python/library.py
|
Python
|
gpl-2.0
| 3,658
| 0.030618
|
#
# Python class library loaded when the interpreter
# is installed by PythonEmbed
#--------------------------------------------------
# API for accessing GC data
#--------------------------------------------------
# basic activity data
def __GCactivity(join="repeat", activity=None):
rd={}
for x in range(0,GC.seriesLast()):
if (GC.seriesPresent(x, activity)):
rd[GC.seriesName(x)] = GC.series(x, activity)
for name in GC.xdataNames("", activity):
for serie in GC.xdataNames(name, activity):
xd = GC.xdata(name, serie, join, activity)
rd[str(xd)] = xd
return rd
# xdata
def __GCactivityXdata(name="", activity=None):
if not name:
return GC.xdataNames("")
rd={}
for serie in GC.xdataNames(name, activity):
xd = GC.xdataSeries(name, serie, activity)
rd[str(xd)] = xd
return rd
# setting up the chart
def __GCsetChart(title="",type=1,animate=False,legpos=2,stack=False,orientation=2):
GC.configChart(title,type,animate,legpos,stack,orientation)
# add a curve
def __GCsetCurve(name="",x=list(),y=list(),f=list(),xaxis="x",yaxis="y", labels=list(), colors=list(),line=1,symbol=1,size=15,color="cyan",opacity=0,opengl=True,legend=True,datalabels=False,fill=False):
if (name == ""):
raise ValueError("curve 'name' must be set and unique.")
GC.setCurve(name,list(x),list(y),list(f),xaxis,yaxis,list(labels),list(colors),line,symbol,size,color,opacity,opengl,legend,datalabels,fill)
# setting the axis
def __GCconfigAxis(name,visible=True,align=-1,min=-1,max=-1,type=-1,labelcolor="",color="",log=False,categories=list()):
if (name == ""):
raise ValueError("axis 'name' must be passed.")
GC.configAxis(name, visible, align, min, max, type, labelcolor, color, log, categories)
# adding annotations
def __GCannotate(type="label", series="", label="", value=0):
GC.addAnnotation(type,series,label,value)
# add to main GC entrypoint
GC.activ
|
ity=__GCactivity
GC.activityXdata=__GCactivityXdata
GC.setChart=__GCsetChart
GC.addCurve=__GCsetCurve
GC.setAxis=__GCconfigAxis
GC.annotate=__GCannotate
# orientation
GC_HORIZONTAL=1
GC_VERTICAL=2
# line style
GC_LINE_NONE=0
GC_LINE_SOLID=1
GC_LINE_DASH=2
GC_LINE_DOT=3
GC_LINE_DASHDOT=4
# constants
GC_ALIGN_BOTTOM=0
GC_ALIGN_LEFT=1
GC_ALIGN_TOP=2
GC_ALIGN_RIGHT=3
# 0 reserved for uninitialised
GC.CHART_LINE=1
GC.CHART_SCATTER=2
GC.CHART_BAR=3
GC.
|
CHART_PIE=4
GC.CHART_STACK=5
GC.CHART_PERCENT=6
# Axis type
GC.AXIS_CONTINUOUS=0
GC.AXIS_DATE=1
GC.AXIS_TIME=2
GC.AXIS_CATEGORY=3
GC.SERIES_SECS = 0
GC.SERIES_CAD = 1
GC.SERIES_CADD = 2
GC.SERIES_HR = 3
GC.SERIES_HRD = 4
GC.SERIES_KM = 5
GC.SERIES_KPH = 6
GC.SERIES_KPHD = 7
GC.SERIES_NM = 8
GC.SERIES_NMD = 9
GC.SERIES_WATTS = 10
GC.SERIES_WATTSD = 11
GC.SERIES_ALT = 12
GC.SERIES_LON = 13
GC.SERIES_LAT = 14
GC.SERIES_HEADWIND = 15
GC.SERIES_SLOPE = 16
GC.SERIES_TEMP = 17
GC.SERIES_INTERVAL = 18
GC.SERIES_NP = 19
GC.SERIES_XPOWER = 20
GC.SERIES_VAM = 21
GC.SERIES_WATTSKG = 22
GC.SERIES_LRBALANCE = 23
GC.SERIES_LTE = 24
GC.SERIES_RTE = 25
GC.SERIES_LPS = 26
GC.SERIES_RPS = 27
GC.SERIES_APOWER = 28
GC.SERIES_WPRIME = 29
GC.SERIES_ATISS = 30
GC.SERIES_ANTISS = 31
GC.SERIES_SMO2 = 32
GC.SERIES_THB = 33
GC.SERIES_RVERT = 34
GC.SERIES_RCAD = 35
GC.SERIES_RCONTACT = 36
GC.SERIES_GEAR = 37
GC.SERIES_O2HB = 38
GC.SERIES_HHB = 39
GC.SERIES_LPCO = 40
GC.SERIES_RPCO = 41
GC.SERIES_LPPB = 42
GC.SERIES_RPPB = 43
GC.SERIES_LPPE = 44
GC.SERIES_RPPE = 45
GC.SERIES_LPPPB = 46
GC.SERIES_RPPPB = 47
GC.SERIES_LPPPE = 48
GC.SERIES_RPPPE = 49
GC.SERIES_WBAL = 50
GC.SERIES_TCORE = 51
GC.SERIES_CLENGTH = 52
GC.SERIES_APOWERKG = 53
GC.SERIES_INDEX = 54
|
arju88nair/projectCulminate
|
venv/lib/python3.5/site-packages/pylint/test/functional/test_compile.py
|
Python
|
apache-2.0
| 214
| 0.004673
|
# pylint: disable=missing-
|
docstring, unused-variable, pointless-statement, too-few-public-methods
class WrapperClass(object):
def method(self):
var = +4294967296
sel
|
f.method.__code__.co_consts
|
uclouvain/OSIS-Louvain
|
attribution/tests/views/charge_repartition/common.py
|
Python
|
agpl-3.0
| 4,717
| 0.00212
|
##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2019 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
import uuid
from attribution.models.attribution_new import AttributionNew
from attribution.tests.factories.attribution_charge_new import AttributionChargeNewFactory
from attribution.tests.factories.attribution_new import AttributionNewFactory
from base.models.enums.learning_container_year_types import MASTER_THESIS
from base.tests.factories.academic_calendar import generate_learning_unit_edition_calendars
from base.tests.factories.entity import EntityWithVersionFactory
from base.tests.factories.learning_component_year import LecturingLearningComponentYearFactory, \
PracticalLearningComponentYearFactory
from base.tests.factories.learning_unit_year import LearningUnitYearFullFactory, LearningUnitYearPartimFactory
from learning_unit.tests.factories.central_manager import CentralManagerFactory
class TestChargeRepartitionMixin:
@classmethod
def setUpTestData(cls):
cls.entity = EntityWithVersionFactory()
cls.learning_unit_year = LearningUnitYearPartimFactory(
learning_container_year__container_type=MASTER_THESIS,
learning_container_year__requirement_entity=cls.entity
)
cls.lecturing_component = LecturingLearningComponentYearFactory(learning_unit_year=cls.learning_unit_year)
cls.practical_component = PracticalLearningComponentYearFactory(learning_unit_year=cls.learning_unit_year)
cls.full_learning_unit_year = LearningUnitYearFullFactory(
learning_container_year=cls.learning_unit_year.learning_container_year,
academic_year=cls.learning_unit_year.academic_year
)
cls.lecturing_component_full = LecturingLearningComponentYearFactory(
learning_unit_year=cls.full_learning_unit_year
)
cls.practical_component_full = PracticalLearningComponentYearFactory(
learning_unit_year=cls.full_learning_unit_year
)
cls.person = CentralManagerFactory(entity=cls.entity).person
generate_learning_unit_edition_calendars([cls.learning_unit_year.academic_year])
def setUp(self):
self.attribution = AttributionNewFactory(
learning_container_year=self.learning_unit_year.learning_container_year,
)
attribution_id = self.attribution.id
self.charge_lecturing = AttributionChargeNewFactory(
attribution=self.attribution,
learning_component_year=self.lecturing_component
)
self.charge_practical = AttributionChargeNewFactory(
attribution=self.attribution,
learning_component_year=self.practical_component
)
self.attribution_full = self.attribution
self.attribution_full.id = None
self.attribution_full.uuid = uuid.uuid4()
self.attribution_full.save()
self.charge_lecturing_full = AttributionChargeNewFactory(
attribution=self.attribution_full,
learning_component_year=self.lecturing_component_full
)
self.charge_pract
|
ical_full = AttributionChargeNewFactory(
attribution=self.attribution_full,
learning_component_year=self.practical_component_full
)
self.attribution = AttributionNew.objects.get(id=attribution_id)
self.client.force_login(self.person.user)
def clean_partim_charges(self):
|
self.charge_practical.delete()
self.charge_lecturing.delete()
self.attribution.delete()
|
finnurtorfa/aflafrettir.is
|
app/models.py
|
Python
|
mit
| 8,023
| 0.018576
|
import hashlib
from datetime import datetime
from flask import request
from flask_login import UserMixin
from werkzeug.security import generate_password_hash, check_password_hash
from . import db, login_manager
#pylint: disable-msg=E1101
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
#pylint: disable-msg=E1101
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
email = db.Column(db.String(64),
nullable=False,
unique=True,
index=True)
username = db.Column(db.String(64),
nullable=False,
unique=True,
index=True)
is_admin = db.Column(db.Boolean)
name = db.Column(db.String(64))
location = db.Column(db.String(64))
bio = db.Column(db.Text)
password_hash = db.Column(db.String(128))
avatar_hash = db.Column(db.String(32))
member_since = db.Column(db.DateTime(), default = datetime.utcnow)
fb_token = db.Column(db.Text)
posts = db.relationship('Post', backref='author', lazy='dynamic')
def __init__(self, **kwargs):
super(User, self).__init__(**kwargs)
if self.email is not None and self.avatar_hash is None:
self.avatar_hash = hashlib.md5(self.email.encode('utf-8')).hexdigest()
#pylint: disable-msg=R0201
@property
def password(self):
raise AttributeError('Password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def gravatar(self, size=100, default='identicon', rating='g'):
if request.is_secure:
url = 'https://secure.gravatar.com/avatar'
else:
url = 'http://www.gravatar.com/avatar'
h = self.avatar_hash or \
hashlib.md5(self.email.encode('utf-8')).hexdigest()
return '{u}/{h}?s={s}&d={d}&r={r}'.format(u=url,
h=h,
s=size,
d=default,
r=rating)
class Post(db.Model):
__tablename__ = 'posts'
__searchable__ = ['title', 'body_html']
id = db.Column(db.Integer, primary_key=T
|
rue, autoincrement=True)
title = db.Colu
|
mn(db.String(64))
body = db.Column(db.Text)
body_html = db.Column(db.Text)
language = db.Column(db.String(4), default='is')
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
author_id = db.Column(db.Integer, db.ForeignKey('users.id'))
category_id = db.Column(db.Integer, db.ForeignKey('categories.id'))
def __init__(self, **kwargs):
super(Post, self).__init__(**kwargs)
@classmethod
def get_all(cls, descending=True):
if descending:
return cls.query.order_by(cls.timestamp.desc()).all()
else:
return cls.query.order_by(cls.timestamp).all()
@classmethod
def get_all_by_lang(cls, descending=True, lang='is'):
if descending:
return cls.query.order_by(cls.timestamp.desc())\
.filter_by(language=lang)\
.all()
else:
return cls.query.order_by(cls.timestamp)\
.filter_by(language=lang)\
.all()
@classmethod
def get_per_page(cls, page, per_page=5, descending=True, lang='is'):
if descending:
return cls.query.order_by(cls.timestamp.desc())\
.filter_by(language=lang)\
.paginate(page, per_page, False)
else:
return cls.query.order_by(cls.timestamp)\
.filter_by(language=lang)\
.paginate(page, per_page, False)
@classmethod
def get_by_id(cls, aid):
return cls.query.filter_by(id=aid).first_or_404()
#pylint: disable-msg=R0913
@classmethod
def get_by_category(cls, cid, page, per_page=5, descending=True, lang='is'):
if descending:
return cls.query.filter(cls.category_id == cid)\
.filter_by(language=lang)\
.order_by(cls.timestamp.desc())\
.paginate(page, per_page, False)
else:
return cls.query.filter(cls.category_id == cid)\
.filter_by(language=lang)\
.order_by(cls.timestamp)\
.paginate(page, per_page, False)
@classmethod
def search(cls, query, page, per_page=4, descending=True):
if descending:
return cls.query.whoosh_search(query)\
.order_by(cls.timestamp.desc())\
.paginate(page, per_page, False)
else:
return cls.query.whoosh_search(query)\
.order_by(cls.timestamp)\
.paginate(page, per_page, False)
class Category(db.Model):
__tablename__ = 'categories'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String(64), nullable=False, unique=True)
name_en = db.Column(db.String(64), nullable=False, unique=True)
active = db.Column(db.Boolean, nullable=False, default=False)
posts = db.relationship('Post', backref='category', lazy='dynamic')
def __init__(self, **kwargs):
super(Category, self).__init__(**kwargs)
@classmethod
def get_all_active(cls, active=True):
if active:
return cls.query.filter_by(active=True)\
.filter(cls.name != 'Almenn frétt').all()
else:
return cls.query.filter_by(active=False)\
.filter(cls.name != 'Almenn frétt').all()
@classmethod
def get_by_name(cls, name):
return cls.query.filter_by(name=name).first()
class Image(db.Model):
__tablename__ = 'images'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
filename = db.Column(db.String(120), nullable=True)
ad_html = db.Column(db.Text)
location = db.Column(db.String(120), nullable=False)
type = db.Column(db.Integer, nullable=False)
url = db.Column(db.String(120))
active = db.Column(db.Boolean, default=False)
timestamp = db.Column(db.DateTime,
nullable=False,
default=datetime.utcnow)
def __init__(self, **kwargs):
super(Image, self).__init__(**kwargs)
@classmethod
def get_all_imgs(cls, descending=True):
if descending:
return cls.query.filter(cls.type >= 10)\
.order_by(cls.timestamp.desc()).all()
else:
return cls.query.filter(cls.type >= 10)\
.order_by(cls.timestamp).all()
@classmethod
def get_all_ads(cls, descending=True, only_active=True):
if descending:
if only_active:
return cls.query.filter(cls.type < 10)\
.filter(cls.active == True)\
.order_by(cls.timestamp.desc()).all()
else:
return cls.query.filter(cls.type < 10)\
.order_by(cls.timestamp.desc()).all()
else:
if only_active:
return cls.query.filter(cls.type < 10)\
.filter(cls.active == True)\
.order_by(cls.timestamp).all()
else:
return cls.query.filter(cls.type < 10)\
.order_by(cls.timestamp).all()
@classmethod
def get_by_id(cls, aid):
return cls.query.filter_by(id=aid).first()
#pylint: disable-msg=R0903
class About(db.Model):
__tablename__ = 'about'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
body = db.Column(db.Text)
timestamp = db.Column(db.DateTime,
nullable=False,
default=datetime.utcnow)
def __init__(self, **kwargs):
super(About, self).__init__(**kwargs)
|
TextusData/Mover
|
thirdparty/git-1.7.11.3/git-p4.py
|
Python
|
gpl-3.0
| 110,533
| 0.003655
|
#!/usr/bin/env python
#
# git-p4.py -- A tool for bidirectional operation between a Perforce depot and git.
#
# Author: Simon Hausmann <simon@lst.de>
# Copyright: 2007 Simon Hausmann <simon@lst.de>
# 2007 Trolltech ASA
# License: MIT <http://www.opensource.org/licenses/mit-license.php>
#
import optparse, sys, os, marshal, subprocess, shelve
import tempfile, getopt, os.path, time, platform
import re, shutil
verbose = False
# Only labels/tags matching this will be imported/exported
defaultLabelRegexp = r'[a-zA-Z0-9_\-.]+$'
def p4_build_cmd(cmd):
"""Build a suitable p4 command line.
This consolidates building and returning a p4 command line into one
location. It means that hooking into the environment, or other configuration
can be done more easily.
"""
real_cmd = ["p4"]
user = gitConfig("git-p4.user")
if len(user) > 0:
real_cmd += ["-u",user]
password = gitConfig("git-p4.password")
if len(password) > 0:
real_cmd += ["-P", password]
port = gitConfig("git-p4.port")
if len(port) > 0:
real_cmd += ["-p", port]
host = gitConfig("git-p4.host")
if len(host) > 0:
real_cmd += ["-H", host]
client = gitConfig("git-p4.client")
if len(client) > 0:
real_cmd += ["-c", client]
|
if isinstance(cmd,basestring):
real_cmd = ' '.join(real_cmd) + ' ' + cmd
else:
real_cmd += cmd
return real_cmd
def chdir(dir):
# P4 uses the PWD environment variab
|
le rather than getcwd(). Since we're
# not using the shell, we have to set it ourselves. This path could
# be relative, so go there first, then figure out where we ended up.
os.chdir(dir)
os.environ['PWD'] = os.getcwd()
def die(msg):
if verbose:
raise Exception(msg)
else:
sys.stderr.write(msg + "\n")
sys.exit(1)
def write_pipe(c, stdin):
if verbose:
sys.stderr.write('Writing pipe: %s\n' % str(c))
expand = isinstance(c,basestring)
p = subprocess.Popen(c, stdin=subprocess.PIPE, shell=expand)
pipe = p.stdin
val = pipe.write(stdin)
pipe.close()
if p.wait():
die('Command failed: %s' % str(c))
return val
def p4_write_pipe(c, stdin):
real_cmd = p4_build_cmd(c)
return write_pipe(real_cmd, stdin)
def read_pipe(c, ignore_error=False):
if verbose:
sys.stderr.write('Reading pipe: %s\n' % str(c))
expand = isinstance(c,basestring)
p = subprocess.Popen(c, stdout=subprocess.PIPE, shell=expand)
pipe = p.stdout
val = pipe.read()
if p.wait() and not ignore_error:
die('Command failed: %s' % str(c))
return val
def p4_read_pipe(c, ignore_error=False):
real_cmd = p4_build_cmd(c)
return read_pipe(real_cmd, ignore_error)
def read_pipe_lines(c):
if verbose:
sys.stderr.write('Reading pipe: %s\n' % str(c))
expand = isinstance(c, basestring)
p = subprocess.Popen(c, stdout=subprocess.PIPE, shell=expand)
pipe = p.stdout
val = pipe.readlines()
if pipe.close() or p.wait():
die('Command failed: %s' % str(c))
return val
def p4_read_pipe_lines(c):
"""Specifically invoke p4 on the command supplied. """
real_cmd = p4_build_cmd(c)
return read_pipe_lines(real_cmd)
def system(cmd):
expand = isinstance(cmd,basestring)
if verbose:
sys.stderr.write("executing %s\n" % str(cmd))
subprocess.check_call(cmd, shell=expand)
def p4_system(cmd):
"""Specifically invoke p4 as the system command. """
real_cmd = p4_build_cmd(cmd)
expand = isinstance(real_cmd, basestring)
subprocess.check_call(real_cmd, shell=expand)
def p4_integrate(src, dest):
p4_system(["integrate", "-Dt", wildcard_encode(src), wildcard_encode(dest)])
def p4_sync(f, *options):
p4_system(["sync"] + list(options) + [wildcard_encode(f)])
def p4_add(f):
# forcibly add file names with wildcards
if wildcard_present(f):
p4_system(["add", "-f", f])
else:
p4_system(["add", f])
def p4_delete(f):
p4_system(["delete", wildcard_encode(f)])
def p4_edit(f):
p4_system(["edit", wildcard_encode(f)])
def p4_revert(f):
p4_system(["revert", wildcard_encode(f)])
def p4_reopen(type, f):
p4_system(["reopen", "-t", type, wildcard_encode(f)])
#
# Canonicalize the p4 type and return a tuple of the
# base type, plus any modifiers. See "p4 help filetypes"
# for a list and explanation.
#
def split_p4_type(p4type):
p4_filetypes_historical = {
"ctempobj": "binary+Sw",
"ctext": "text+C",
"cxtext": "text+Cx",
"ktext": "text+k",
"kxtext": "text+kx",
"ltext": "text+F",
"tempobj": "binary+FSw",
"ubinary": "binary+F",
"uresource": "resource+F",
"uxbinary": "binary+Fx",
"xbinary": "binary+x",
"xltext": "text+Fx",
"xtempobj": "binary+Swx",
"xtext": "text+x",
"xunicode": "unicode+x",
"xutf16": "utf16+x",
}
if p4type in p4_filetypes_historical:
p4type = p4_filetypes_historical[p4type]
mods = ""
s = p4type.split("+")
base = s[0]
mods = ""
if len(s) > 1:
mods = s[1]
return (base, mods)
#
# return the raw p4 type of a file (text, text+ko, etc)
#
def p4_type(file):
results = p4CmdList(["fstat", "-T", "headType", file])
return results[0]['headType']
#
# Given a type base and modifier, return a regexp matching
# the keywords that can be expanded in the file
#
def p4_keywords_regexp_for_type(base, type_mods):
if base in ("text", "unicode", "binary"):
kwords = None
if "ko" in type_mods:
kwords = 'Id|Header'
elif "k" in type_mods:
kwords = 'Id|Header|Author|Date|DateTime|Change|File|Revision'
else:
return None
pattern = r"""
\$ # Starts with a dollar, followed by...
(%s) # one of the keywords, followed by...
(:[^$]+)? # possibly an old expansion, followed by...
\$ # another dollar
""" % kwords
return pattern
else:
return None
#
# Given a file, return a regexp matching the possible
# RCS keywords that will be expanded, or None for files
# with kw expansion turned off.
#
def p4_keywords_regexp_for_file(file):
if not os.path.exists(file):
return None
else:
(type_base, type_mods) = split_p4_type(p4_type(file))
return p4_keywords_regexp_for_type(type_base, type_mods)
def setP4ExecBit(file, mode):
# Reopens an already open file and changes the execute bit to match
# the execute bit setting in the passed in mode.
p4Type = "+x"
if not isModeExec(mode):
p4Type = getP4OpenedType(file)
p4Type = re.sub('^([cku]?)x(.*)', '\\1\\2', p4Type)
p4Type = re.sub('(.*?\+.*?)x(.*?)', '\\1\\2', p4Type)
if p4Type[-1] == "+":
p4Type = p4Type[0:-1]
p4_reopen(p4Type, file)
def getP4OpenedType(file):
# Returns the perforce file type for the given file.
result = p4_read_pipe(["opened", wildcard_encode(file)])
match = re.match(".*\((.+)\)\r?$", result)
if match:
return match.group(1)
else:
die("Could not determine file type for %s (result: '%s')" % (file, result))
# Return the set of all p4 labels
def getP4Labels(depotPaths):
labels = set()
if isinstance(depotPaths,basestring):
depotPaths = [depotPaths]
for l in p4CmdList(["labels"] + ["%s..." % p for p in depotPaths]):
label = l['label']
labels.add(label)
return labels
# Return the set of all git tags
def getGitTags():
gitTags = set()
for line in read_pipe_lines(["git", "tag"]):
tag = line.strip()
gitTags.add(tag)
return gitTags
def diffTreePattern():
# This is a simple generator for the diff tree regex pattern. This could be
# a class variable if this and parseDiffTreeEntry were a part of a class.
pattern = re.compile(':(\d+) (\d+) (\w+) (\w+) ([A-Z])(\d+)?\t(.*?)((\t(.*))|$)')
while True:
yield pattern
def parseDiffTreeEntry(entry)
|
enricopal/snowball_decision
|
decision_algorithm.py
|
Python
|
apache-2.0
| 25,944
| 0.026287
|
import numpy as np
import random
import networkx as nx
from operator import itemgetter
import pandas as pd
import sys
import json
import optparse
###############################################################
#### CONCORDANCE, DISCORDANCE AND CREDIBILITY FUNCTIONS ######
###############################################################
def conc_func(i,j,k): #computes the concordance given a pair of alternatives i and j and a given criterion k
x = float(alternatives[i,k] - alternatives[j,k])
q = float(indiff_thresh[k])
p = float(pref_thresh[k])
if (p != q): #check that the angular coeff. exists
if (x < q):
return 1
elif (x < p):
return (-x)/(p-q) + (p)/(p-q)
elif (x >= p):
return 0
else: #otherwise it is a step function
if (x <= p):
return 1
else:
return 0
def disc_func(i,j,k): #computes the concordance given a pair of alternatives i and j and a given criterion k
x = float(alternatives[i,k] - alternatives[j,k])
v = float(vetos[k])
p = float(pref_thresh[k])
if (p!=v):#check that the angular coeff. exists
if (x <= p):
return 0
elif (x <= v):
return (x)/(v-p) - (p)/(v-p)
elif (x > v):
return 1
else: #otherwise it is a step function
if (x <= p):
return 0
else:
return 1
#define the concordance and discordance functions
def conc_func_tri(i,j,k): #computes the concordance given a pair alternative-profile i and j and a given criterion k
x = float(alternatives[i,k] - profiles[j,k])
q = float(indiff_thresh[k])
p = float(pref_thresh[k])
if (p != q): #check that the angular coeff. exists
if (x < q):
return 1
elif (x < p):
return (-x)/(p-q) + (p)/(p-q)
elif (x >= p):
return 0
else: #otherwise it is a step function
if (x <= p):
return 1
else:
return 0
def disc_func_tri(i,j,k): #computes the concordance given a pair of alternatives i and j and a given criterion k
x = float(alternatives[i,k] - profiles[j,k])
v = float(vetos[k])
p = float(pref_thresh[k])
if (p!=v):#check that the angular coeff. exists
if (x <= p):
return 0
elif (x <= v):
return (x)/(v-p) - (p)/(v-p)
elif (x > v):
return 1
else: #otherwise it is a step function
if (x <= p):
return 0
else:
return 1
def concordance_tri(i,j):
c = []
for k in range(m): #for each criterion
c.append(weights[k]*conc_func_tri(i,j,k))
return sum(c)
#define the credibility of the outranking as a function of concordance and discordance
def credibility_tri(i,j):
c = concordance_tri(i,j)
fact = c
for k in range(m):#for each criterion
d = disc_func_tri(i,j,k) #just for simplicity of notation
if (d > c): #if the discordance of the criterion is greater than the overall concordance
fact = fact * (1-d) / (1-c)
return fact
#define the concordance and discordance for a pair of alternatives
def concordance(i,j):
c = []
for k in range(m): #for each criterion
c.append(weights[k]*conc_func(i,j,k))
return sum(c)
#define the credibility of the outranking as a function of concordance and discordance
def credibility(i,j):
c = concordance(i,j)
fact = c
for k in range(m):#for each criterion
d = disc_func(i,j,k) #just for simplicity of notation
if (d > c): #if the discordance of the criterion is greater than the overall concordance
fact = fact * (1-d) / (1-c)
|
return fact
def discrimination_thresh(x):#non constant threshold
return a - b*x
############################
|
#############
############ ALGORITHMS #################
#########################################
#distillation algorithm
def compute_scores_2(cred_matrix,altern_list):
n = len(altern_list)
scores = {} #vector holding the score of each alternative
keys = altern_list
for i in keys: #initialize to 0 the scores
scores[i] = 0
#compute the max credibility
l = max(cred_matrix.values())
alpha = discrimination_thresh(l) #compute the discrimination threshold
for i in altern_list: #for each alternative
for j in altern_list:
if i!=j: #excluding the diagonal elements
if(cred_matrix[(i,j)] >= l - alpha):
scores[i] += 1
if(cred_matrix[(j,i)] >= l - alpha):
scores[i] -= 1
return scores
#what happens when there are more than two alternatives
def runoff(cred_matrix,maxima_matrix, maxima):
scores = {}
scores = compute_scores_2(maxima_matrix,maxima) #first step of the algorithm
#check if there is a unique max
maxima_run = []
maximum = max(scores.values())
for i in scores.keys():#create a list with the alternatives that have maximum score
if scores[i] == maximum:
maxima_run.append(i)
if len(maxima_run) == 1: #if there is a unique max
ranking.append(maxima_run[0]) #select the winner of the competition
#eliminate the winning alternative from the matrix
for i,j in cred_matrix.keys():
if i == maxima_run[0] or j == maxima_run[0]:
del cred_matrix[(i,j)]
altern_list.remove(maxima_run[0])
distillation_2(cred_matrix)
elif len(maxima_run) > 1:#otherwise put them all together with the same ranking
ranking.append(maxima_run)
#eliminate the winning alternatives from the matrix
if len(cred_matrix) > len(maxima_run):#se ho altre alternative di cui fare il ranking, rimuovo quelle ottenute
#print cred_matrix
for j in maxima_run:
altern_list.remove(j)
for i,k in cred_matrix.keys():
if i == j or k == j:
del cred_matrix[(i,k)]
#print cred_matrix.values(), maxima_run
distillation_2(cred_matrix)
else: #altrimenti l'algoritmo si ferma
return ranking
#initializing the variables
def distillation_2(cred_matrix):
#print cred_matrix
if len(cred_matrix) == 1: #there is just one alternative left, the algorithm has to stop
ranking.append(altern_list[0]) #add the last element
if len(cred_matrix) > 1: #are there any more alternatives to rank?
scores = {}
scores = compute_scores_2(cred_matrix,altern_list) #first step of the algorithm
#check if there is a unique max
maxima = []
#index_maxima = []
nonmaxima = []
#nonmaxima_all = []
#index_nonmaxima = []
maxima_matrix = []
maximum = max(scores.values())
for i in scores.keys():#create a list with the alternatives that have maximum score
if scores[i] == maximum:
maxima.append(i)
else:
nonmaxima.append(i)
if len(maxima) == 1: #if there is a unique max
ranking.append(maxima[0]) #select the winner of the competition
#eliminate the winning alternative from the matrix
for i,j in cred_matrix.keys():
if i == maxima[0] or j == maxima[0]:
del cred_matrix[(i,j)]
altern_list.remove(maxima[0])
distillation_2(cred_matrix)
if len(maxima) > 1:
#devo costruire la sottomatrice dei massimi
#rimuovo quelli che non sono massimi dalla matrice di credibilit
maxima_matrix = {}
for i in cred_matrix.keys():
|
Puppet-Finland/trac
|
files/spam-filter/tracspamfilter/filters/registration.py
|
Python
|
bsd-2-clause
| 4,192
| 0.000954
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.com/license.html.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://projects.edgewall.com/trac/.
from acct_mgr.api import IAccountRegistrationInspector
from acct_mgr.register import RegistrationError
from trac.config import BoolOption, IntOption
from trac.core import Component, ExtensionPoint, implements
from trac.util.html import tag
from tracspamfilter.api import IFilterStrategy, N_
class RegistrationFilterStrategy(Component):
"""Spam filter strategy that calls account manager checks for
account registration.
"""
implements(IFilterStrategy)
karma_points = IntOption('spam-filter', 'account_karma', '0',
"""By how many points a failed registration check impacts
the overall score.""", doc_domain='tracspamfilter')
replace_checks = BoolOption('spam-filter', 'account_replace_checks',
'false',
"""Replace checks in account manager totally.""",
doc_domain='tracspamfilter')
listeners = ExtensionPoint(IAccountRegistrationInspector)
def render_registration_fields(self, req, data, fragments):
self.log.debug("Adding registration check data fields")
if self.replace_checks:
for check in self.listeners:
try:
if check.__class__.__name__ != 'RegistrationFilterAdapter':
self.log.debug("Add registration check data %s", check)
fragment, f_data = \
check.render_registration_fields(req, data)
try:
fragments['optional'] = \
tag(fragments.get('optional', ''),
fragment.get('optional', ''))
fragments['required'] = \
tag(fragments.get('required', ''),
fragment.get('required', ''))
except AttributeError:
if fragment is not None and fragment != '':
|
fragments['required'] = \
tag(fragments.get('required', ''),
fragment)
data.update(f_data)
except Exception, e:
self.log.exception("Adding registration fields failed: %s",
e)
return fragments, data
|
# IFilterStrategy methods
def is_external(self):
return False
def test(self, req, author, content, ip):
if req.path_info == '/register':
karma = 0
checks = []
for check in self.listeners:
try:
if check.__class__.__name__ != 'RegistrationFilterAdapter':
self.log.debug("Try registration check %s", check)
check.validate_registration(req)
except RegistrationError, e:
karma -= abs(self.karma_points)
msg = e.message.replace('\n', '')
args = e.msg_args
if args:
msg = msg % args
msg.replace('<b>', '*').replace('</b>', '*')
self.log.debug("Registration check returned %s", msg)
checks.append('%s: %s' % (check.__class__.__name__, msg))
except Exception, e:
self.log.exception("Registration check %s failed: %s",
check, e)
if karma or checks:
return karma, N_("Account registration failed (%s)"), \
", ".join(checks)
def train(self, req, author, content, ip, spam=True):
return 0
|
jonathanslenders/python-prompt-toolkit
|
prompt_toolkit/output/base.py
|
Python
|
bsd-3-clause
| 7,955
| 0
|
"""
Interface for an output.
"""
from abc import ABCMeta, abstractmethod
from typing import Optional, TextIO
from prompt_toolkit.data_structures import Size
from prompt_toolkit.styles import Attrs
from .color_depth import ColorDepth
__all__ = [
"Output",
"DummyOutput",
]
class Output(metaclass=ABCMeta):
"""
Base class defining the output interface for a
:class:`~prompt_toolkit.renderer.Renderer`.
Actual implementations are
:class:`~prompt_toolkit.output.vt100.Vt100_Output` and
:class:`~prompt_toolkit.output.win32.Win32Output`.
"""
stdout: Optional[TextIO] = None
@abstractmethod
def fileno(self) -> int:
" Return the file descriptor to which we can write for the output. "
@abstractmethod
def encoding(self) -> str:
"""
Return the encoding for this output, e.g. 'utf-8'.
(This is used mainly to know which characters are supported by the
output the data, so that the UI can provide alternatives, when
required.)
"""
@abstractmethod
def write(self, data: str) -> None:
" Write text (Terminal escape sequences will be removed/escaped.) "
@abstractmethod
def write_raw(self, data: str) -> None:
" Write text. "
@abstractmethod
def set_title(self, title: str) -> None:
" Set terminal title. "
@abstractmethod
def clear_title(self) -> None:
" Clear title again. (or restore previous title.) "
@abstractmethod
def flush(self) -> None:
" Write to output stream and flush. "
@abstractmethod
def erase_screen(self) -> None:
"""
Erases the screen with the background colour and moves the cursor to
home.
"""
@abstractmethod
def enter_alternate_screen(self) -> None:
" Go to the alternate screen buffer. (For full screen applications). "
@abstractmethod
def quit_alternate_screen(self) -> None:
" Leave the alternate screen buffer. "
@abstractmethod
def enable_mouse_support(self) -> None:
" Enable mouse. "
@abstractmethod
def disable_mouse_support(self) -> None:
" Disable mouse. "
@abstractmethod
def erase_end_of_line(self) -> None:
"""
Erases from the current cursor position to the end of the current line.
"""
@abstractmethod
def erase_down(self) -> None:
"""
Erases the screen from the current line down to the bottom of the
screen.
"""
@abstractmethod
def reset_attributes(self) -> None:
" Reset color and styling attributes. "
@abstractmethod
def set_attributes(self, attrs: Attrs, color_depth: ColorDepth) -> None:
" Set new color and styling attributes. "
@abstractmethod
def disable_autowrap(self) -> None:
" Disable auto line wrapping. "
@abstractmethod
def enable_autowrap(self) -> None:
" Enable auto line wrapping. "
@abstractmethod
def cursor_goto(self, row: int = 0, column: int = 0) -> None:
" Move cursor position. "
@abstractmethod
def cursor_up(self, amount: int) -> None:
" Move cursor `amount` place up. "
@abstractmethod
def cursor_down(self, amount: int) -> None:
" Move cursor `amount` place down. "
@abstractmethod
def cursor_forward(self, amount: int) -> None:
" Move cursor `amount` place forward. "
@abstractmethod
def cursor_backward(self, amount: int) -> None:
" Move cursor `amount` place backward. "
@abstractmethod
def hide_cursor(self) -> None:
" Hide cursor. "
@abstractmethod
def show_cursor(self) -> None:
" Show cursor. "
def ask_for_cpr(self) -> None:
"""
Asks for a cursor position report (CPR).
(VT100 only.)
"""
@property
def responds_to_cpr(self) -> bool:
"""
`True` if the `Application` can expect to receive a CPR response after
calling `ask_for_cpr` (this will come back through the corresponding
`Input`).
This is used to determine the amount of available rows we have below
the cursor position. In the first place, we have this so that the drop
down autocompletion menus are sized according to the available space.
On Windows, we don't need this, there we have
`get_rows_below_cursor_position`.
"""
return False
@abstractmethod
def get_size(self) -> Size:
" Return the size of the output window. "
def bell(self) -> None:
" Sound bell. "
def enable_bracketed_paste(self) -> None:
" For vt100 only. "
def disable_bracketed_paste(self) -> None:
" For vt100 only. "
def reset_cursor_key_mode(self) -> None:
"""
For vt100 only.
Put the terminal in normal cursor mode (instead of application mode).
See: https://vt100.net/docs/vt100-ug/chapter3.html
"""
def scroll_buffer_to_prompt(self) -> None:
" For Win32 only. "
def get_rows_below_cursor_position(self) -> int:
" For Windows only. "
raise NotImplementedError
@abstractmethod
def get_default_color_depth(self) -> ColorDepth:
"""
Get default color depth for this output.
This value will be used if no color depth was explicitely passed to the
`Application`.
.. note::
If the `$PROMPT_TOOLKIT_COLOR_DEPTH` environment variable has been
set, then `outputs.defaults.create_output` will pass this value to
the implementation as the default_color_depth, which is returned
here. (This is not used when the output corresponds to a
prompt_toolkit SSH/Telnet session.)
"""
class DummyOutput(Output):
"""
For testing. An output class that doesn't render anything.
"""
def fileno(self) -> int:
" There is no sensible default for fileno(). "
raise NotImplementedError
def encoding(self) -> str:
return "utf-8"
def write(self, data: str) -> None:
pass
def write_raw(self, data: str) -> None:
pass
def set_title(self, title: str) -> None:
pass
def clear_title(self) -> None:
pass
def flush(self) -> None:
pass
def erase_screen(self) -> None:
pass
def enter_alternate_screen(self) -> N
|
one:
pass
def quit_alternate_screen(self) -> None:
pass
def enable_mouse_support(self) -> None:
pass
def disable_mouse_support(self) -> None:
pass
def erase_end_of_line(self) -> None:
pass
def erase_down(self) -> None:
pass
def reset_attributes(self) -> None:
pass
def set_attributes(self, attrs: Attrs, color_depth: ColorDepth) -> None:
pass
def disable_autowrap(self) -> None:
pa
|
ss
def enable_autowrap(self) -> None:
pass
def cursor_goto(self, row: int = 0, column: int = 0) -> None:
pass
def cursor_up(self, amount: int) -> None:
pass
def cursor_down(self, amount: int) -> None:
pass
def cursor_forward(self, amount: int) -> None:
pass
def cursor_backward(self, amount: int) -> None:
pass
def hide_cursor(self) -> None:
pass
def show_cursor(self) -> None:
pass
def ask_for_cpr(self) -> None:
pass
def bell(self) -> None:
pass
def enable_bracketed_paste(self) -> None:
pass
def disable_bracketed_paste(self) -> None:
pass
def scroll_buffer_to_prompt(self) -> None:
pass
def get_size(self) -> Size:
return Size(rows=40, columns=80)
def get_rows_below_cursor_position(self) -> int:
return 40
def get_default_color_depth(self) -> ColorDepth:
return ColorDepth.DEPTH_1_BIT
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-resource/azure/mgmt/resource/resources/v2016_09_01/models/identity.py
|
Python
|
mit
| 1,517
| 0
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Identity(Model):
"""Identity for the resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar principal_id: The principal ID of resource identity.
:vartype principal_id: str
:ivar tenant_id: The tenant ID of resource.
:vartype tenant_id: str
|
:param type: The identity type. Possible values include: 'SystemAssigned'
:type type: str or
~a
|
zure.mgmt.resource.resources.v2016_09_01.models.ResourceIdentityType
"""
_validation = {
'principal_id': {'readonly': True},
'tenant_id': {'readonly': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'type': {'key': 'type', 'type': 'ResourceIdentityType'},
}
def __init__(self, type=None):
super(Identity, self).__init__()
self.principal_id = None
self.tenant_id = None
self.type = type
|
skosukhin/spack
|
var/spack/repos/builtin/packages/scons/package.py
|
Python
|
lgpl-2.1
| 1,645
| 0.000608
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# i
|
t under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public Licens
|
e for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Scons(PythonPackage):
"""SCons is a software construction tool"""
homepage = "http://scons.org"
url = "https://pypi.io/packages/source/s/scons/scons-2.5.1.tar.gz"
version('2.5.1', '3eac81e5e8206304a9b4683c57665aa4')
version('2.5.0', 'bda5530a70a41a7831d83c8b191c021e')
# Python 3 is not supported
depends_on('python@:2.8', type=('build', 'run'))
|
openstack/vitrage
|
vitrage/tests/unit/datasources/prometheus/test_prometheus_transformer.py
|
Python
|
apache-2.0
| 6,042
| 0
|
# Copyright 2018 - Nokia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from oslo_config import cfg
from vitrage.common.constants import DatasourceOpts as DSOpts
from vitrage.common.constants import DatasourceProperties as DSProps
from vitrage.common.constants import UpdateMethod
from vitrage.common.constants import VertexProperties as VProps
from vitrage.datasources.nova.host import NOVA_HOST_DATASOURCE
from vitrage.datasources.nova.host.transformer import HostTransformer
from vitrage.datasources.nova.instance import NOVA_INSTANCE_DATASOURCE
from vitrage.datasources.nova.instance.transformer import InstanceTransformer
from vitrage.datasources.prometheus import PROMETHEUS_DATASOURCE
from vitrage.datasources.prometheus.properties import get_label
from vitrage.datasources.prometheus.properties import PrometheusAlertLabels \
as PLabels
from vitrage.datasources.prometheus.properties import \
PrometheusAlertProperties as PProps
from vitrage.datasources.prometheus.properties import PrometheusAlertStatus \
as PAlertStatus
from vitrage.datasources.prometheus.transformer import PrometheusTransformer
from vitrage.datasources.transformer_base import TransformerBase
from vitrage.tests.mocks import mock_transformer
from vitrage.tests.unit.datasources.test_alarm_transformer_base import \
BaseAlarmTransformerTest
# noinspection PyProtectedMember
class PrometheusTransformerTest(BaseAlarmTransformerTest):
OPTS = [
cfg.StrOpt(DSOpts.UPDATE_METHOD,
default=UpdateMethod.PUSH),
]
def setUp(self):
super(PrometheusTransformerTest, self).setUp()
self.transformers = {}
self.conf_reregister_opts(self.OPTS, group=PROMETHEUS_DATASOURCE)
self.transformers[NOVA_HOST_DATASOURCE] = \
HostTransformer(self.transformers)
self.transformers[NOVA_INSTANCE_DATASOURCE] = \
InstanceTransformer(self.transformers)
self.transformers[PROMETHEUS_DATASOURCE] = \
PrometheusTransformer(self.transformers)
def test_create_update_entity_vertex(self):
# Test setup
host1 = 'host1'
instance_id = uuid.uuid4().hex
event_on_host = self._generate_event_on_host(host1)
event_on_instance = self._generate_event_on_instance(host1,
instance_id)
self.assertIsNotNone(event_on_host)
self.assertIsNotNone(event_on_instance)
# Test action
transformer = self.transformers[PROMETHEUS_DATASOURCE]
wrapper_for_host = transformer.transform(event_on_host)
wrapper_for_instance = transformer.transform(event_on_instance)
# Test assertions
self._validate_vertex_props(wrapper_for_host.vertex, event_on_host)
self._validate_vertex_props(wrapper_for_instance.vertex,
event_on_instance)
# Validate the neighbors: only one valid host neighbor
host_entity_key = transformer._create_entity_key(event_on_host)
host_entity_uuid = \
transformer.uuid_from_deprecated_vitrage_id(host_entity_key)
instance_entity_key = transformer._create_entity_key(event_on_instance)
instance_entity_uuid = \
transformer.uuid_from_deprecated_vitrage_id(instance_entity_key)
self._validate_host_neighbor(wrapper_for_host,
host_entity_uuid,
host1)
self._validate_instance_neighbor(wrapper_
|
for_instance,
instance_entity_uuid,
instance_id)
# Validate the expected action on the graph - update or delete
self._validate_graph_action(wrapper_for_host)
self._validate_graph_action(wrapper_for_instance)
|
def _validate_vertex_props(self, vertex, event):
self._validate_alarm_vertex_props(
vertex, get_label(event, PLabels.ALERT_NAME),
PROMETHEUS_DATASOURCE, event[DSProps.SAMPLE_DATE])
def _generate_event_on_host(self, hostname):
# fake query result to be used by the transformer for determining
# the neighbor
query_result = [{VProps.VITRAGE_TYPE: NOVA_HOST_DATASOURCE,
VProps.ID: hostname}]
labels = {PLabels.SEVERITY: 'critical',
PLabels.INSTANCE: hostname}
update_vals = {TransformerBase.QUERY_RESULT: query_result,
PProps.LABELS: labels}
return self._generate_event(update_vals)
def _generate_event_on_instance(self, hostname, instance_name):
# fake query result to be used by the transformer for determining
# the neighbor
query_result = [{VProps.VITRAGE_TYPE: NOVA_INSTANCE_DATASOURCE,
VProps.ID: instance_name}]
labels = {PLabels.SEVERITY: 'critical',
PLabels.INSTANCE: hostname,
PLabels.DOMAIN: instance_name}
update_vals = {TransformerBase.QUERY_RESULT: query_result,
PProps.LABELS: labels}
return self._generate_event(update_vals)
@staticmethod
def _generate_event(update_vals):
generators = mock_transformer.simple_prometheus_alarm_generators(
update_vals=update_vals)
return mock_transformer.generate_random_events_list(generators)[0]
def _is_erroneous(self, vertex):
return vertex[PProps.STATUS] == PAlertStatus.FIRING
|
wederw/bitcoin
|
qa/rpc-tests/mempool_coinbase_spends.py
|
Python
|
mit
| 3,823
| 0.004708
|
#!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test re-org scenarios with a mempool that contains transactions
# that spend (directly or indirectly) coinbase transactions.
#
from test_framework import BitcoinTestFramework
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
import os
import shutil
# Create one-input, one-output, no-fee transaction:
class MempoolCoinbaseTest(BitcoinTestFramework):
alert_filename = None # Set by setup_network
def setup_network(self):
args = ["-checkmempool", "-debug=mempool"]
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, args))
self.nodes.append(start_node(1, self.options.tmpdir, args))
connect_nodes(self.nodes[1], 0)
self.is_network_split = False
self.sync_all
def create_tx(self, from_txid, to_address, amount):
inputs = [{ "txid" : from_txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
signresult = self.nodes[0].signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
def run_test(self):
start_count = self.nodes[0].getblockcount()
# Mine three blocks. After this, nodes[0] blocks
# 101, 102, and 103 are spend-able.
new_blocks = self.nodes[1].generate(4)
self.sync_all()
node0_address = self.nodes[0].getnewaddress()
node1_address = self.nodes[1].getnewaddress()
# Three scenarios for re-orging coinbase spends in the memory pool:
# 1. Direct coinbase spend : spend_101
# 2. Indirect (coinbase spend in chain, child in mempool) : spend_102 and spend_102_1
# 3. Indirect (coinbase and child both in chain) : spend_103 and spend_103_1
# Use invalidatblock to make all of the above coinbase spends invalid (immature coinbase),
# and make sure the mempool code behaves correctly.
b = [ self.nodes[0].getblockhash(n) for n in range(102, 105) ]
coinbase_txids = [ self.nodes[0].getblock(h)['tx'][0] for h in b ]
spend_101_raw = self.create_tx(coinbase_txids[0], node1_address, 50)
spend_102_raw = self.create_tx(coinbase_txids[1], node0_address, 50)
spend_103_raw = self.create_tx(coinbase_txids[2], node0_address, 50)
# Broadcast and mine spend_102 and 103:
spend_102_id = self.nodes[0].sendrawtransaction(spend_102_raw)
spend_103_id = self.nodes[0].sendrawtransaction(spend_103_raw)
self.nodes[0].generate(1)
# Create 102_1 and 103_1:
spend_102_1_raw = self.create_tx(spend_102_id, node1_address, 50)
spend_103_1_raw = self.create_tx(spend_103_id, node1_address, 50)
# Broadcast and mine 103_1:
spend_103_1_id = self.nodes[0].sendrawtransaction(spend_103_1_raw)
self.nodes[0].generate(1)
# ... now put spend_101 and spend_102_1 in memory pools:
spend_101_id = self.nodes[0].sendrawtransaction(spend_101_raw)
spend_102_1_id = self.nodes[0].sendrawtransaction(spend_102_1_raw)
self.sync_all()
assert_equal(set(self.nodes[0].getrawmempool()), set([ spend_101_id, spend_102_1_id ]))
# Use invalidateblock to re-org back and make all those coinbase spends
# immature/invalid:
|
for node in self.nodes:
node.invalida
|
teblock(new_blocks[0])
self.sync_all()
# mempool should be empty.
assert_equal(set(self.nodes[0].getrawmempool()), set())
if __name__ == '__main__':
MempoolCoinbaseTest().main()
|
brousch/opencraft
|
instance/models/server.py
|
Python
|
agpl-3.0
| 9,912
| 0.001917
|
# -*- coding: utf-8 -*-
#
# OpenCraft -- tools to aid developing and hosting free software projects
# Copyright (C) 2015 OpenCraft <xavier@opencraft.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Instance app models - Server
"""
# Imports #####################################################################
import novaclient
import time
from swampdragon.pubsub_providers.data_publisher import publish_data
from django.conf import settings
from django.db import models
from django.db.models import Q
from django.db.models.signals import post_save
from django_extensions.db.models import TimeStampedModel
from instance import openstack
from instance.utils import is_port_open, to_json
from instance.models.instance import OpenEdXInstance
from instance.models.logging_mixin import LoggerMixin
from instance.models.utils import ValidateModelMixin
# Exceptions ##################################################################
class ServerNotReady(Exception):
"""
Raised when an action is attempted in a status that doesn't allow it
"""
pass
# Models ######################################################################
cla
|
ss ServerQuerySet(models.QuerySet):
"""
Additional methods for server querysets
Also used as the standard manager for the Server model (`Server.objects`)
"""
def terminate(self, *args, **kwargs):
"""
Terminate the servers from the queryset
"""
qs = self.filter(~Q(status=Server.TERMINATED), *args, **kwargs)
for server in qs:
server.termin
|
ate()
return qs
def exclude_terminated(self):
"""
Filter out terminated servers from the queryset
"""
return self.filter(~Q(status=Server.TERMINATED))
class Server(ValidateModelMixin, TimeStampedModel, LoggerMixin):
"""
A single server VM
"""
NEW = 'new'
STARTED = 'started'
ACTIVE = 'active'
BOOTED = 'booted'
PROVISIONED = 'provisioned'
REBOOTING = 'rebooting'
READY = 'ready'
LIVE = 'live'
STOPPING = 'stopping'
STOPPED = 'stopped'
TERMINATING = 'terminating'
TERMINATED = 'terminated'
STATUS_CHOICES = (
(NEW, 'New - Not yet loaded'),
(STARTED, 'Started - Running but not active yet'),
(ACTIVE, 'Active - Running but not booted yet'),
(BOOTED, 'Booted - Booted but not ready to be added to the application'),
(PROVISIONED, 'Provisioned - Provisioning is completed'),
(REBOOTING, 'Rebooting - Reboot in progress, to apply changes from provisioning'),
(READY, 'Ready - Rebooted and ready to add to the application'),
(LIVE, 'Live - Is actively used in the application and/or accessed by users'),
(STOPPING, 'Stopping - Stopping temporarily'),
(STOPPED, 'Stopped - Stopped temporarily'),
(TERMINATING, 'Terminating - Stopping forever'),
(TERMINATED, 'Terminated - Stopped forever'),
)
instance = models.ForeignKey(OpenEdXInstance, related_name='server_set')
status = models.CharField(max_length=11, default=NEW, choices=STATUS_CHOICES, db_index=True)
objects = ServerQuerySet().as_manager()
class Meta:
abstract = True
def _set_status(self, status):
"""
Update the current status variable, to be called when a status change is detected
"""
if status not in (s[0] for s in self.STATUS_CHOICES):
raise ValueError(status)
self.status = status
self.log('info', 'Changed status for {}: {}'.format(self, self.status))
self.save()
return self.status
def sleep_until_status(self, target_status):
"""
Sleep in a loop until the server reaches one of the specified status
"""
target_status_list = [target_status] if isinstance(target_status, str) else target_status
self.log('info', 'Waiting for server {} to reach status {}...'.format(self, target_status_list))
while True:
self.update_status()
if self.status in target_status:
break
time.sleep(1)
return self.status
@staticmethod
def on_post_save(sender, instance, created, **kwargs):
"""
Called when an instance is saved
"""
publish_data('notification', {
'type': 'server_update',
'server_pk': instance.pk,
})
def update_status(self, provisioned=False, rebooting=False):
"""
Check the current status and update it if it has changed
"""
raise NotImplementedError
class OpenStackServer(Server):
"""
A Server VM hosted on an OpenStack cloud
"""
openstack_id = models.CharField(max_length=250, db_index=True, blank=True)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.nova = openstack.get_nova_client()
def __str__(self):
if self.openstack_id:
return self.openstack_id
else:
return 'New OpenStack Server'
@property
def os_server(self):
"""
OpenStack nova server API endpoint
"""
if not self.openstack_id:
assert self.status == self.NEW
self.start()
return self.nova.servers.get(self.openstack_id)
@property
def public_ip(self):
"""
Return one of the public address(es)
"""
if not self.openstack_id:
return None
public_addr = openstack.get_server_public_address(self.os_server)
if not public_addr:
return None
return public_addr['addr']
def update_status(self, provisioned=False, rebooting=False):
"""
Refresh the status by querying the openstack server via nova
"""
# TODO: Check when server is stopped or terminated
os_server = self.os_server
self.log('debug', 'Updating status for {} from nova (currently {}):\n{}'.format(
self, self.status, to_json(os_server)))
if self.status == self.STARTED:
self.log('debug', 'Server {}: loaded="{}" status="{}"'.format(
self, os_server._loaded, os_server.status))
if os_server._loaded and os_server.status == 'ACTIVE':
self._set_status(self.ACTIVE)
elif self.status == self.ACTIVE and is_port_open(self.public_ip, 22):
self._set_status(self.BOOTED)
elif self.status == self.BOOTED and provisioned:
self._set_status(self.PROVISIONED)
elif self.status in (self.PROVISIONED, self.READY) and rebooting:
self._set_status(self.REBOOTING)
elif self.status == self.REBOOTING and not rebooting and is_port_open(self.public_ip, 22):
self._set_status(self.READY)
return self.status
def start(self):
"""
Get a server instance started and an openstack_id assigned
TODO: Add handling of quota limitations & waiting list
TODO: Create the key dynamically
"""
self.log('info', 'Starting server {} (status={})...'.format(self, self.status))
if self.status == self.NEW:
os_server = openstack.create_server(
self.nova,
self.instance.sub_domain,
settings.OPENSTACK_SANDBOX_FLAVOR,
settings.OPENSTACK_SANDBOX_BASE_IMAGE,
key_name=settings.OPENSTACK_SANDBOX_SSH_KEYNAME,
)
self.openstack_id = os_server.id
s
|
JHUISI/charm
|
charm/core/engine/protocol.py
|
Python
|
lgpl-3.0
| 10,994
| 0.011825
|
# TODO: provide a transition checker that prevents a feedback loop, inconsistent state.
# in user db that way user can eliminate store step on the receive side.
from charm.core.engine.util import *
from charm.toolbox.enum import Enum
from math import log, ceil
debug = False
# standardize responses between client and server
# code = Enum('Success', 'Fail', 'Repeat', 'StartSubprotocol', 'EndSubprotocol')
class Protocol:
def __init__(self, error_states, max_size=2048): # any init information?
global error
self.p_ID = 0
self.p_ctr =
|
0
error = error_states
# dictionary of party types (each type gets an identifier)
self.partyTypes = {}
self.party = {}
self._serialize = False
self.db = {} # initialize the database
self.max_size = max_size
self.prefix_size = ceil(log(max_size, 256))
|
def setup(self, *args):
# handles the hookup between parties involved
Error = True
for arg in args:
if isinstance(arg, dict):
print("Setup of: ", arg['name'])
if not self.addInstance(arg): Error = False
else:
print(type(arg))
return Error
def addInstance(self, obj):
p_ctr = self.p_ctr
for i in self.partyTypes.keys():
if i == obj['type']: # we find the party type
self.party[p_ctr] = {}
self.party[p_ctr]['name'], self.party[p_ctr]['socket'] = obj['name'], obj['socket']
self.party[p_ctr]['type'], self.party[p_ctr]['states'] = obj['type'], self.partyTypes[i]['states']
self.party[p_ctr]['init'] = self.partyTypes[i]['init']
self.p_ctr += 1
print("Adding party instance w/ id: ", p_ctr)
return True
return None
def addPartyType(self, type, state_map, trans_map, init_state=False):
ExistingTypeFound = False
# see if type already exists. break and return if so
for i in self.partyTypes.keys():
if self.partyTypes[i]['type'] == type:
ExistingTypeFound = True
break
# means we are adding a new type
if not ExistingTypeFound:
p_ID = self.p_ID
party = {'type':type, 'id':p_ID }
if(isinstance(state_map, dict)):
party['states'] = state_map # function pointers for state functions...
if(isinstance(trans_map, dict)):
party['transitions'] = trans_map
party['init'] = init_state # which state initializes the protocol
self.partyTypes[type] = party # makes sure
self.p_ID += 1
return True
return False
#
# def addValidTransitions(self, trans_map):
# if isinstance(trans_map, dict):
# self.trans_map = trans_map
def listStates(self, partyID):
# check if a member parameter is defined
if partyID < self.p_ctr:
return self.party[partyID]['states']
return None
def listParties(self):
return list(self.party.keys())
def listParyTypes(self):
return list(self.partyTypes.keys())
def getInitState(self, _type):
for i in self.listParties():
if self.party[i]['type'] == _type:
self._socket = self.party[i]['socket']
if self.party[i]['init']:
# set current trans starting point
self.cur_state = 1
return (True, self.listStates(i)[1])
else:
self.cur_state = 2
return (False, self.listStates(i)[2])
print("Returning junk!")
return (False, None)
def setState(self, state_num):
# find the corresponding call back based on current party id
self.nextCall = None
if state_num == None: return None
nextPossibleState = self._cur_trans.get(self.cur_state)
if type(nextPossibleState) == list and not state_num in nextPossibleState:
print("Invalid State Transition! Error!")
print("\tCurrent state: ", self.cur_state)
print("\tNext state: ", state_num)
print("Allowed states: ", nextPossibleState)
elif type(nextPossibleState) != list and nextPossibleState != state_num:
print("Invalid State Transition! Error!")
print("\tCurrent state: ", self.cur_state)
print("\tNext state not allowed: ", state_num)
# do not make the transition
return None
for i in self.listParties():
states = self.listStates(i)
if states.get(state_num) != None:
self.nextCall = states.get(state_num)
# preparing for state transition here.
self.cur_state = state_num
break
return None
def send_msg(self, object):
# use socket to send message (check if serializaton is required)
if self._socket != None:
if self._serialize:
result = self._user_serialize(object)
else:
result = self.serialize(object)
#print("DEBUG: send_msg : result =>", result)
if len(result) > self.max_size:
print("Message too long! max_size="+str(self.max_size))
return None
result = len(result).to_bytes(length=self.prefix_size, byteorder='big') + result
self._socket.send(result)
return None
# receives exactly n bytes
def recv_all(self, n):
recvd = 0
res = b''
while recvd < n:
res = res + self._socket.recv(n-recvd)
recvd = len(res)
return res
def recv_msg(self):
# read the socket and return the received message (check if deserialization)
# is necessary
if self._socket != None:
# block until data is available or remote host closes connection
msglen = int.from_bytes(self.recv_all(self.prefix_size), byteorder='big')
result = self.recv_all(msglen)
if result == '': return None
else:
if self._serialize:
return self._user_deserialize(result)
else: # default serialize call
return self.deserialize(result)
return None
# # serialize an object
# def serialize(self, object):
# if type(object) == str:
# return bytes(object, 'utf8')
# return object
#
# def deserialize(self, object):
# if type(object) == bytes:
# return object.decode('utf8')
# return object
def setSubclassVars(self, group, state=None):
if hasattr(group, 'serialize') and hasattr(group, 'deserialize'):
self.group = group
if state != None:
if type(state) == dict:
self.db = state
def get(self, keys, _type=tuple):
if not type(keys) == list: return
if _type == tuple:
ret = []
else: ret = {}
# get the data
for i in keys:
if _type == tuple:
ret.append(self.db[i])
else: # dict
ret[ i ] = self.db[i]
# return data
if _type == tuple:
return tuple(ret)
return ret
def store(self, *args):
for i in args:
if isinstance(i, tuple):
self.db[ i[0] ] = i[1]
return None
def serialize(self, object):
# print("input object... => ", object)
if type(object) == dict:
bytes_object = serializeDict(object, self.group)
return pickleObject(bytes_object)
elif type(object) == str:
return pickleObject(object)
else:
# print("serialize: just =>", object)
return object
def deserialize(self, bytes_object):
# print("deserialize input =>", bytes_object)
if type(bytes_obje
|
svimanet/IRC-Bob
|
modules/reminders.py
|
Python
|
unlicense
| 1,220
| 0.005738
|
import time
import json
import os
# Returns the list of reminders as dict.
# @return reminders - Python Dictionary
def get_reminders():
file = "{}/reminders.json".format(os.path.dirname(os.path.realpath(__file__)))
with open(file, "r") as data_file:
reminders = json.load(data_file)
return reminders
# Creates a reminder and writes it to file.
def set_reminder(nick, msg, date, time):
# Checks if user has existing reminders; if so: append to user reminders.
reminders = get_reminders()
reminder = {}
if nick in
|
reminders.keys():
reminders[nick].append([msg, date, time])
with open(file, "w+") as data_file:
data_file.write(json.dumps(remind
|
ers, indent=4, sort_keys=True))
else:
reminders[nick] = [[msg, date, time]]
with open(file, "w+") as data_file:
data_file.write(json.dumps(reminders, indent=4, sort_keys=True))
#print("After\n{}\n".format(reminders))
# Iterates over reminders and sends PM to users if time and date.
def remind(nick):
reminders = get_reminders()
if reminders.get(nick):
print(reminders.get(nick))
#set_reminder("hm","tannlege elns", "12.01.02","14:00")
remind("svimanet")
|
criteo-forks/graphite-web
|
webapp/graphite/user_util.py
|
Python
|
apache-2.0
| 1,975
| 0
|
"""Copyright 2008 Orbitz WorldWide
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
from django import VERSION as DJANGO_VERSION
from django.contrib.auth.models import User
from graphite.account.models import Profile
from graphite.logger import log
def isAuthenticated(user):
# is_authenticated() is changed to a boolean since 1.10, 2.0 removes the
# backwards compatibilty
if DJANGO_VERSION >= (1, 10):
return user.is_authenticated
else:
return user.is_authenticated()
def getProfile(request, allowDefault=True):
if isAuthenticated(request.user):
return Profile.objects.get_or_create(user=request.user)[0]
elif allowDefault:
return default_profile()
def getProfileByUsername(username):
try:
return Profile.objects.get(user__username=username)
except Profile.DoesNotExist:
return None
def default_profile():
# '!' is an unusable password. Since the default user never a
|
uthenticates
# this avoids creating a default (expensive!) passw
|
ord hash at every
# default_profile() call.
user, created = User.objects.get_or_create(
username='default', defaults={'email': 'default@localhost.localdomain',
'password': '!'})
if created:
log.info("Default user didn't exist, created it")
profile, created = Profile.objects.get_or_create(user=user)
if created:
log.info("Default profile didn't exist, created it")
return profile
|
wcmckee/wcmckee-notebook
|
webData.py
|
Python
|
gpl-2.0
| 3,474
| 0.006621
|
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <headingcell level=1>
# Intercity Python API Development
# <codecell>
from bs4 import BeautifulSoup
import requests
import pickle
loadSite = requests.get('http://www.intercity.co.nz/')
siteData = loadSite.content
blehData = siteData.split()
blehData[0:20]
siteData.swapcase()
print siteData.find('a')
omgSite = BeautifulSoup(siteData)
linkZite = omgSite.text
# <codecell>
pickle.dump(linkZite, open('outpuz.txt', 'wb'))
# <codecell>
dizTxt = open('outpuz.txt', 'r')
dizTxt.read()
# <codecell>
def save(linkZite):
saveFilz = open('save.txt', 'w')
for linz in linkZite:
values = line.split()
|
savefilz.write(values)
saveFilz.close()
# <codecell>
print linkZite
# <codecell>
print omgSite.unwrap
# <codecell>
omgSite.encode
# <codecell>
savzSite = omgSite.find_all(id=True)
# <codecell>
sortSite = linkSite[0:30]
# <codecell>
print daSite.next_element
# <codecell>
daSite = sortSite[15]
# <codecell>
linkSite = omgSite.find_all('a')
# <codecell>
saveLinkz = open('htmldoc', 'w'
|
)
saveLinkz.write(siteData)
saveLinkz.close()
# <codecell>
openLinkz = open('htmldoc', 'r')
openLinkz.read()
# <codecell>
print omgSite.extract()
# <codecell>
print omgSite.setup
# <codecell>
print omgSite.title
# <codecell>
print omgSite.wrap
# <codecell>
print omgSite.body
# <codecell>
print omgSite.head
# <codecell>
print omgSite.currentTag()
# <codecell>
print omgSite.prettify
# <codecell>
# <codecell>
# <codecell>
print loadSite.url
# <codecell>
beaut = BeautifulSoup(loadSite)
# <codecell>
reTweetz = open('testing.txt', 'w')
reTweetz.write('Fixed request')
reTweetz.close()
# <codecell>
daTweetz = open('testing.txt', 'r')
daTweetz.read()
# <codecell>
print diemLink
# <codecell>
for data in loadSite:
mixData = BeautifulSoup(data)
diemLink = mixData.a
print diemLink
seioLink = mixData.findAll('a')
print seioLink
print(mixData.get_text())
# <codecell>
mixOpen = open('outputz', 'r')
mixOpen.read()
# <codecell>
%%bash
git add .
git commit -m daTweetz
# <codecell>
%%bash
git push https://github.com/wcmckee/intercity
# <codecell>
testing = []
# <codecell>
testing.append(daTweetz)
# <codecell>
print testing
# <codecell>
for site in loadSite:
# <codecell>
for site in loadSite:
daLink = []
dafile = open('output', 'w')
daLink.append(site)
inter = BeautifulSoup(site)
daLink.append(inter)
geter = inter.text
daLink.append(geter)
beuLink = BeautifulSoup(daLink[0])
print beuLink.a
# <codecell>
for site in loadSite:
print'print site'
inter = BeautifulSoup(site)
print inter.titlefor site in loadSite:
print'print site'
inter = BeautifulSoup(site)
print inter.title
# <codecell>
for site in loadSite:
print'print site'
inter = BeautifulSoup(site)
print inter.title
# <codecell>
# <codecell>
print inter
# <codecell>
print inter
# <headingcell level=2>
# Timetable
# <codecell>
loadUrl = requests.get('http://www.intercity.co.nz/travel-info/timetable/')
# <codecell>
for da in loadUrl:
print da.title()
# <codecell>
selz = BeautifulSoup(da)
# <codecell>
print selz.title
# <codecell>
timez = BeautifulSoup(loadUrl)
# <codecell>
nakedSite = requests.get('http://nakedbus.com/nz/bus/')
# <codecell>
for naked in nakedSite:
print naked
# <codecell>
# <codecell>
|
dg7541/MicrolensingLCOGT
|
LIA/training_set.py
|
Python
|
gpl-3.0
| 12,474
| 0.00978
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 28 20:30:11 2018
@author: danielgodinez
"""
import numpy as np
import random
from astropy.io import fits
from sklearn import decomposition
import os
from LIA import simulate
from LIA import noise_models
from LIA import quality_check
from LIA import extract_features
def create(timestamps, min_mag=14, max_mag=21, noise=None, n_class=500, ml_n1=7, cv_n1=7, cv_n2=1):
"""Creates a training dataset using adaptive cadence.
Simulates each class n_class times, adding errors from
a noise model either defined using the create_noise
function, or Gaussian by default
Parameters
__________
timestamps : array of arrays
Times at which to simulate the different lightcurves.
Must be an array containing all possible timestamps combinations.
min_mag : float, optional
Minimum baseline magnitude for simulating lightcurves.
Defaults to 14.
max_mag : float, optional
Maximum baseline magnitude for simulating lightcurves.
Defaults to 21.
noise : function, optional
Noise model, can be created using the create_noise function.
If None it defaults to adding Gaussian noise.
n_class : int, optional
The amount of lightcurve (per class) to simulate.
Defaults to 500.
ml_n1 : int, optional
The mininum number of measurements that should be within the
microlensing signal when simulating the lightcurves.
cv_n1 : int, optional
The mininum number of measurements that should be within
at least one CV outburst when simulating the lightcurves.
cv_n2 : int, optional
The mininum number of measurements that should be within the
rise or drop of at least one CV outburst when simulating the lightcurves.
Outputs
_______
dataset : FITS
All simulated lightcurves in a FITS file, sorted by class and ID
all_features : txt file
A txt file containing all the features plus class label.
pca_stats : txt file
A txt file containing all PCA features plus class label.
"""
if n_class < 12:
raise ValueError("Parameter n_class must be at least 12 for principal components to be computed.")
while True:
try:
x=len(timestamps[0])
break
except TypeError:
raise ValueError("Incorrect format -- append the timestamps to a list and try again.")
times_list=[]
mag_list=[]
magerr_list=[]
id_list = []
source_class_list=[]
stats_list = []
print("Now simulating variables...")
for k in range(1,n_class+1):
time = random.choice(timestamps)
baseline = np.random.uniform(min_mag,max_mag)
mag, amplitude, period = simulate.variable(time,baseline)
if noise is not None:
mag, magerr = noise_models.add_noise(mag, noise)
if noise is None:
mag, magerr = noise_models.add_gaussian_noise(mag,zp=max_mag+3)
source_class = ['VARIABLE']*len(time)
source_class_list.append(source_class)
id_num = [k]*len(time)
id_list.append(id_num)
times_list.append(time)
mag_list.append(mag)
magerr_list.append(magerr)
stats = extract_features.extract_all(mag,magerr,convert=True)
stats = [i for i in stats]
stats = ['VARIABLE'] + [k] + stats
stats_list.append(stats)
print("Variables successfully simulated")
print("Now simulating constants...")
for k in range(1,n_class+1):
time = random.choice(timestamps)
baseline = np.random.uniform(min_mag,max_mag)
mag = simulate.constant(time, baseline)
if noise is not None:
mag, magerr = noise_models.add_noise(mag, noise)
if noise is None:
mag, magerr = noise_models.add_gaussian_noise(mag,zp=max_mag+3)
source_class = ['CONSTANT']*len(time)
source_class_list.append(source_class)
id_num = [1*n_class+k]*len(time)
id_list.append(id_num)
times_list.append(time)
mag_list.append(mag)
magerr_list.append(magerr)
stats = extract_features.extract_all(mag,magerr,convert=True)
stats = [i for i in stats]
stats = ['CONSTANT'] + [1*n_class+k] + stats
stats_list.append(stats)
print("Constants successfully simulated")
print("Now simulating CV...")
for k in range(1,n_class+1):
for j in range(10000):
time = random.choice(timestamps)
baseline = np.random.uniform(min_mag,max_mag)
mag, burst_start_times, burst_end_times, end_rise_times, end_high_times = simulate.cv(time, baseline)
quality = quality_check.test_cv(time, burst_start_times, burst_end_times, end_rise_times, end_high_times, n1=cv_n1, n2=cv_n2)
if quality is True:
try:
if noise is not None:
mag, magerr = noise_models.add_noise(mag,noise)
if noise is None:
mag, magerr = noise_models.add_gaus
|
sian_noise(mag,zp=max_mag+3)
except ValueError:
continue
source_class = ['CV']*len(time)
source_class_list.append(source_class)
id_num = [2*n_class+k]*len(time)
id_list.append(id_num)
times_list.append(time)
mag_list.append(mag)
magerr_list.append(magerr)
stats = ext
|
ract_features.extract_all(mag,magerr,convert=True)
stats = [i for i in stats]
stats = ['CV'] + [2*n_class+k] + stats
stats_list.append(stats)
break
if j == 9999:
raise RuntimeError('Unable to simulate proper CV in 10k tries with current cadence -- inspect cadence and try again.')
print("CVs successfully simulated")
print ("Now simulating microlensing...")
for k in range(1,n_class+1):
for j in range(10000):
time = random.choice(timestamps)
baseline = np.random.uniform(min_mag,max_mag)
mag, baseline, u_0, t_0, t_e, blend_ratio = simulate.microlensing(time, baseline)
try:
if noise is not None:
mag, magerr = noise_models.add_noise(mag,noise)
if noise is None:
mag, magerr= noise_models.add_gaussian_noise(mag,zp=max_mag+3)
except ValueError:
continue
quality = quality_check.test_microlensing(time, mag, magerr, baseline, u_0, t_0, t_e, blend_ratio, n=ml_n1)
if quality is True:
source_class = ['ML']*len(time)
source_class_list.append(source_class)
id_num = [3*n_class+k]*len(time)
id_list.append(id_num)
times_list.append(time)
mag_list.append(mag)
magerr_list.append(magerr)
stats = extract_features.extract_all(mag,magerr, convert=True)
stats = [i for i in stats]
stats = ['ML'] + [3*n_class+k] + stats
stats_list.append(stats)
break
if j == 9999:
raise RuntimeError('Unable to simulate proper ML in 10k tries with current cadence -- inspect cadence and/or noise model and try again.')
print("Microlensing events successfully simulated")
print("Writing files...")
col0 = fits.Column(name='Class', format='20A', array=np.hstack(source_class_list))
col1 = fits.Column(name='ID', format='E', array=np.hstack(id_list))
col2 = fits.Column(name='time', format='D', array=np.hstack(times_list))
col3 = fits.Column(name='mag', format='E', array=np.hstack(mag_list))
col4 = fits.Column(name='magerr', format='E', array=np.hstack(magerr_list))
cols = fits.Co
|
potash/scikit-learn
|
sklearn/gaussian_process/gaussian_process.py
|
Python
|
bsd-3-clause
| 35,041
| 0.000114
|
# -*- coding: utf-8 -*-
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# (mostly translation, see implementation details)
# License: BSD 3 clause
from __future__ import print_function
import numpy as np
from scipy import linalg, optimize
from ..base import BaseEstimator, RegressorMixin
from ..metrics.pairwise import manhattan_distances
from ..utils import check_random_state, check_array, check_X_y
from ..utils.validation import check_is_fitted
from . import regression_models as regression
from . import correlation_models as correlation
from ..utils import deprecated
MACHINE_EPSILON = np.finfo(np.double).eps
@deprecated("l1_cross_distances was deprecated in version 0.18 "
"and will be removed in 0.20.")
def l1_cross_distances(X):
"""
Computes the nonzero componentwise L1 cross-distances between the vectors
in X.
Parameters
----------
X: array_like
An array with shape (n_samples, n_features)
Returns
-------
D: array with shape (n_samples * (n_samples - 1) / 2, n_features)
The array of componentwise L1 cross-distances.
ij: arrays with shape (n_samples * (n_samples - 1) / 2, 2)
The indices i and j of the vectors in X associated to the cross-
distances in D: D[k] = np.abs(X[ij[k, 0]] - Y[ij[k, 1]]).
"""
X = check_array(X)
n_samples, n_features = X.shape
n_nonzero_cross_dist = n_samples * (n_samples - 1) // 2
ij = np.zeros((n_nonzero_cross_dist, 2), dtype=np.int)
D = np.zeros((n_nonzero_cross_dist, n_features))
ll_1 = 0
for k in range(n_samples - 1):
ll_0 = ll_1
ll_1 = ll_0 + n_samples - k - 1
ij[ll_0:ll_1, 0] = k
ij[ll_0:ll_1, 1] = np.arange(k + 1, n_samples)
D[ll_0:ll_1] = np.abs(X[k] - X[(k + 1):n_samples])
return D, ij
@deprecated("GaussianProcess was deprecated in version 0.18 and will be "
"removed in 0.20. Use the GaussianProcessRegressor instead.")
class GaussianProcess(BaseEstimator, RegressorMixin):
"""The legacy Gaussian Process model class.
Note that this class was deprecated in version 0.18 and will be
removed in 0.20. Use the GaussianProcessRegressor instead.
Read more in the :ref:`User Guide <gaussian_process>`.
Parameters
----------
regr : string or callable, optional
A regression function returning an array of outputs of the linear
regression functional basis. The number of observations n_samples
should be greater than the size p of this basis.
Default assumes a simple constant regression trend.
Available built-in regression models are::
'constant', 'linear', 'quadratic'
corr : string or callable, optional
A stationary autocorrelation function returning the autocorrelation
between two points x and x'.
Default assumes a squared-exponential autocorrelation model.
Built-in correlation models are::
'absolute_exponential', 'squared_exponential',
'generalized_exponential', 'cubic', 'linear'
beta0 : double array_like, optional
The regression weight vector to perform Ordinary Kriging (OK).
Default assumes Universal Kriging (UK) so that the vector beta of
regression weights is estimated using the maximum likelihood
principle.
storage_mode : string, optional
A string specifying whether the Cholesky decomposition of the
correlation matrix should be stored in the class (storage_mode =
'full') or not (storage_mode = 'light').
Default assumes storage_mode = 'full', so that the
Cholesky decomposition of the correlation matrix is stored.
This might be a useful parameter when one is not interested in the
MSE and only plan to estimate the BLUP, for which the correlation
matrix is not required.
verbose : boolean, optional
A boolean specifying the verbose level.
Default is verbose = False.
theta0 : double array_like
|
, optional
An array with shape (n_features, ) or (1, ).
|
The parameters in the autocorrelation model.
If thetaL and thetaU are also specified, theta0 is considered as
the starting point for the maximum likelihood estimation of the
best set of parameters.
Default assumes isotropic autocorrelation model with theta0 = 1e-1.
thetaL : double array_like, optional
An array with shape matching theta0's.
Lower bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
thetaU : double array_like, optional
An array with shape matching theta0's.
Upper bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
normalize : boolean, optional
Input X and observations y are centered and reduced wrt
means and standard deviations estimated from the n_samples
observations provided.
Default is normalize = True so that data is normalized to ease
maximum likelihood estimation.
nugget : double or ndarray, optional
Introduce a nugget effect to allow smooth predictions from noisy
data. If nugget is an ndarray, it must be the same length as the
number of data points used for the fit.
The nugget is added to the diagonal of the assumed training covariance;
in this way it acts as a Tikhonov regularization in the problem. In
the special case of the squared exponential correlation function, the
nugget mathematically represents the variance of the input values.
Default assumes a nugget close to machine precision for the sake of
robustness (nugget = 10. * MACHINE_EPSILON).
optimizer : string, optional
A string specifying the optimization algorithm to be used.
Default uses 'fmin_cobyla' algorithm from scipy.optimize.
Available optimizers are::
'fmin_cobyla', 'Welch'
'Welch' optimizer is dued to Welch et al., see reference [WBSWM1992]_.
It consists in iterating over several one-dimensional optimizations
instead of running one single multi-dimensional optimization.
random_start : int, optional
The number of times the Maximum Likelihood Estimation should be
performed from a random starting point.
The first MLE always uses the specified starting point (theta0),
the next starting points are picked at random according to an
exponential distribution (log-uniform on [thetaL, thetaU]).
Default does not use random starting point (random_start = 1).
random_state: integer or numpy.RandomState, optional
The generator used to shuffle the sequence of coordinates of theta in
the Welch optimizer. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
Attributes
----------
theta_ : array
Specified theta OR the best set of autocorrelation parameters (the \
sought maximizer of the reduced likelihood function).
reduced_likelihood_function_value_ : array
The optimal reduced likelihood function value.
Examples
--------
>>> import numpy as np
>>> from sklearn.gaussian_process import GaussianProcess
>>> X = np.array([[1., 3., 5., 6., 7., 8.]]).T
>>> y = (X * np.sin(X)).ravel()
>>> gp = GaussianProcess(theta0=0.1, thetaL=.001, thetaU=1.)
>>> gp.fit(X, y) # doctest: +ELLIPSIS
GaussianProcess(beta0=None...
...
Notes
-----
The presentation implementation is based on a translation of the DACE
Matlab toolbox, see reference [NLNS2002]_.
References
----------
.. [NLNS2002] `H.B. Nielsen, S.N. Lophaven, H. B. Nielsen and J.
Sondergaard. DACE - A MATLAB Kriging Toolbox.` (2002)
http://imedea.uib-csic.es/mast
|
ProgVal/pydigmips
|
tests/test_loaders.py
|
Python
|
mit
| 1,378
| 0.006531
|
from unittest import TestCase
from pydigmips import instructions, loaders
class HexaLoaderTestCase(TestCase):
def testAdd(self):
i = ['1510', # 000 101 010 001 0000
'1C60',
'2C60'] # 001 011 000 110 0000
o = [instructions.Add(5, 2, 1), instructions.Add(7, 0, 6),
instructions.Sub(3, 0, 6)]
prog = loaders.load_hexa(i)
self.assertEqual(prog, o)
def testLd(self):
i = ['4EAA', # 010 011 101 0101010
'6EAA'] # 011 011 101 0101010
o = [instructions.Ld(3, (5, 42)),
instructions.St(3, (5, 42))]
prog = loaders.load_
|
hexa(i)
self.assertEqual(prog, o)
def testBle(self):
i = ['8EAA'] # 100 011 101 0101010
o = [instructions.Ble(3, 5, 42)]
prog = loaders.load_hexa(i)
self.assertEqual(prog, o)
def testLdi(self):
i = ['B0AA'] # 101 100 00 10101010
o = [instructions.Ldi(4, 170)]
prog = loaders.load_hexa(i)
self.assertEqual(prog, o)
def test
|
Ja(self):
i = ['CE80'] # 110 011 101 0000000
o = [instructions.Ja(3, 5)]
prog = loaders.load_hexa(i)
self.assertEqual(prog, o)
def testJ(self):
i = ['EAAA'] # 111 0101010101010
o = [instructions.J(2730)]
prog = loaders.load_hexa(i)
self.assertEqual(prog, o)
|
duane-edgington/stoqs
|
stoqs/loaders/MolecularEcology/load_dorado2009.py
|
Python
|
gpl-3.0
| 4,550
| 0.006813
|
#!/usr/bin/env python
'''
Loader for all 2009 Dorado missions written for Monique's notice of bad
depths in Dorado389_2009_084_02_084_02_decim.nc.
Mike McCann
MBARI 15 January 2013
@var __date__: Date of last svn commit
@undocumented: __doc__ parser
@status: production
@license: GPL
'''
import os
import sys
import datetime
parentDir = os.path.join(os.path.dirname(__file__), "../")
sys.path.insert(0, parentDir) # So that CANON is found
from CANON import CANONLoader
import timing
cl = CANONLoader('stoqs_dorado2009', 'Dorado - All 2009 missions',
description = 'In Monterey Bay and Santa Monica Basin - includes processed Gulper Samples',
x3dTerrains = {
'https://stoqs.mbari.org/x3d/Monterey25_10x/Monterey25_10x_scene.x3d': {
'position': '-2822317.31255 -4438600.53640 3786150.85474',
'orientation': '0.89575 -0.31076 -0.31791 1.63772',
'centerOfRotation': '-2711557.9403829873 -4331414.329506527 3801353.4691465236',
'VerticalExaggeration': '10',
'speed': '.1',
}
},
grdTerrain = os.path.join(parentDir, 'Monterey25.grd')
)
# Dorado surveys in 2009
cl.dorado_base = 'http://dods.mbari.org/opendap/data/auvctd/surveys/2009/netcdf/'
cl.dorado_files = [
'Dorado389_2009_055_05_055_05_decim.nc',
'Dorado389_2009_084_00_084_00_decim.nc',
'Dorado389_2009_084_02_084_02_decim.nc',
'Dorado389_2009_085_02_085_02_decim.nc',
'Dorado389_2009_111_00_111_00_decim.nc',
'Dorado389_2009_111_01_111_01_decim.nc',
'Dorado389_2009_112_07_112_07_decim.nc',
'Dorado389_2009_113_00_113_00_decim.nc',
'Dorado389_2009_124_03_124_03_decim.nc',
'Dorado389_2009_125_00_125_00_decim.nc',
'Dorado389_2009_126_00_126_00_decim.nc',
'Dorado389_2009_152_00_152_00_decim.nc',
'Dorado389_2009_153_01_153_01_decim.nc',
'Dorado389_2009_154_00_154_00_decim.nc',
'Dorado389_2009_155_03_155_03_decim.nc',
'Dorado389_2009_182_01_182_01_decim.nc',
'Dorado389_2009_272_00_272_00_decim.nc',
'Dorado389_2009_274_03_274_03_decim.nc',
'Dorado389_2009_278_01_278_01_decim.nc',
'Dorado389_2009_278_01_278_02_decim.nc',
'Dorado389_2009_279_00_279_00_decim.nc',
'Dorado389_2009_280_00_280_00_decim.nc',
'Dorado389_2009_281_01_281_01_decim.nc',
'Dorado389_2009_308_04_308_04_decim.nc',
'Dorado389_2009_309_00_309_03_decim.nc',
'Dorado389_2009_313_02_313_02_decim.nc',
'Dorado389_2009_342_04_342_04_decim.nc',
'Dorado389_2009_348_05_348_05_decim.nc',
]
cl.dorado_parms = [ 'temperature', 'oxygen', 'nit
|
rate', 'bbp420', 'bbp700',
'fl700_uncorr', 'salinity', 'biolume',
'sepCountList', 'mepCountList',
'roll', 'pitch', 'yaw']
# Mooring M1ts
cl.m1ts_base = 'http://elvis.shore.mbari.org/thredds/dodsC/agg/'
cl.m1ts_files = ['OS_MBARI-M1_R_TS']
cl.m1ts_parms = [ 'PSAL', 'TEMP' ]
cl.m1ts_startDatetime = datetime.datetime(2009, 1, 1)
cl.m1ts_endDatetime = datetim
|
e.datetime(2009, 12, 31)
# Mooring M1met
cl.m1met_base = 'http://elvis.shore.mbari.org/thredds/dodsC/agg/'
cl.m1met_files = ['OS_MBARI-M1_R_M']
cl.m1met_parms = [ 'WSPD', 'WDIR', 'ATMP', 'SW', 'RELH' ]
cl.m1met_startDatetime = datetime.datetime(2009, 1, 1)
cl.m1met_endDatetime = datetime.datetime(2009, 12, 31)
# Execute the load
cl.process_command_line()
if cl.args.test:
cl.loadDorado(stride=20)
##cl.loadM1ts(stride=10)
##cl.loadM1met(stride=10)
elif cl.args.optimal_stride:
cl.loadDorado(stride=2)
cl.loadM1ts(stride=1)
cl.loadM1met(stride=1)
else:
cl.loadDorado(stride=cl.args.stride)
##cl.loadM1ts(stride=cl.args.stride)
##cl.loadM1met(stride=cl.args.stride)
# Add any X3D Terrain information specified in the constructor to the database - must be done after a load is executed
cl.addTerrainResources()
print("All Done.")
|
rajalokan/nova
|
nova/policies/console_output.py
|
Python
|
apache-2.0
| 1,106
| 0
|
# Copyright 2016 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions a
|
nd limitations
# under the License.
from nova.pol
|
icies import base
BASE_POLICY_NAME = 'os_compute_api:os-console-output'
console_output_policies = [
base.create_rule_default(
BASE_POLICY_NAME,
base.RULE_ADMIN_OR_OWNER,
'Show console output for a server',
[
{
'method': 'POST',
'path': '/servers/{server_id}/action (os-getConsoleOutput)'
}
])
]
def list_rules():
return console_output_policies
|
hack4sec/ws-cli
|
classes/jobs/FormBruterJob.py
|
Python
|
mit
| 479
| 0.006263
|
# -*- co
|
ding: utf-8 -*-
"""
This is part of WebScout software
Docs EN: http://hack4sec.pro/wiki/index.php/WebScout_en
Docs RU: http://hack4sec.pro/wiki/index.php/WebScout
License: MIT
Copyright (c) Anton Kuzmin <http://anton-kuzmin.ru> (ru) <http://anton-kuzmin.pro> (en)
Job class for FormBruter module
"""
from classes.jobs.GeneratorJob import GeneratorJob
class FormBruterJob(GeneratorJob):
""" Job class for FormBruter module """
collection_name = 'form_bruter
|
'
|
petemoore/funsize
|
funsize/balrog.py
|
Python
|
mpl-2.0
| 2,557
| 0
|
import os
import requests
import logging
import json
import redo
log = logging.getLogger(__name__)
PLATFORM_MAP = json.load(open(
os.path.join(os.path.dirname(__file__), 'data', 'platform_map.json')))
def _retry_on_http_errors(url, auth, verify, params, errors):
for _ in redo.retrier(sleeptime=5, max_sleeptime=30, attempts=10):
try:
req = requests.get(url, auth=auth, verify=verify, params=params)
req.raise_for_status()
return req
except requests.HTTPError as e:
if e.response.status_code in errors:
log.exception("Got HTTP %s trying to reach %s",
e.response.status_code, url)
else:
raise
else:
raise
class BalrogClient(object):
def __init__(self, api_root, auth, cert=None):
self.api_root = api_root
self.auth = auth
if cert:
self.verify = cert
else:
self.verify = True
def get_releases(self, product, branch):
"""Returns a list of release names from Balrog.
:param product: product name, AKA appName
:param branch: branch name, e.g. mozilla-central
:return: a list of release names
"""
url = "{}/releases".format(self.api_root)
params = {
"product": product,
# Adding -nightly-2 (2 stands for the beginning of build ID
# based on date) should filter out release and latest blobs.
# This should be changed to -nightly-3 in 3000 ;)
"name_prefix": "{}-{}-nightly-2".format(product, branch),
"names_only": True
}
params_str = "&".join("=".join([k, str(v)])
for k, v in params.iteritems())
log.info("Connecting to %s?%s", url, params_str)
req = _retry_on_http_errors(
url=url, auth=self.auth, verify=self.verify, params=params,
errors=[500])
releases = req.json()["names"]
releases = sorted(releases, reverse=True)
return releases
def get_build(self, release, platform, locale):
update_platform = PLATFORM_MAP[platform][0]
url = "{}/releases/{}/builds/{}/{
|
}".format(self.api_root, release,
|
update_platform, locale)
log.info("Connecting to %s", url)
req = _retry_on_http_errors(
url=url, auth=self.auth, verify=self.verify, params=None,
errors=[500])
return req.json()
|
h4ck3rm1k3/FEC-Field-Documentation
|
fec/version/v8_0/F9.py
|
Python
|
unlicense
| 2,852
| 0.000701
|
import fechbase
class Records(fechbase.RecordsBase):
def __init__(self):
fechbase.RecordsBase.__init__(self)
self.fields = [
{'name': 'FORM TYPE', 'number': '1'},
{'name': 'FILER COMMITTEE ID NUMBER', 'number': '2'},
{'name': 'ENTITY TYPE', 'number': '3'},
{'name': 'ORGANIZATION NAME', 'number': '4'},
{'name': 'INDIVIDUAL LAST NAME', 'number': '5'},
{'name': 'INDIVIDUAL FIRST NAME', 'number': '6'},
{'name': 'INDIVIDUAL MIDDLE NAME', 'number': '7'},
{'name': 'INDIVIDUAL PREFIX', 'number': '8'},
{'name': 'INDIVIDUAL SUFFIX', 'number': '9'},
{'name': 'CHANGE OF ADDRESS', 'number': '10'},
{'name': 'STREET 1', 'number': '11'},
{'name': 'STREET 2', 'number
|
': '12'},
{'name': 'CITY', 'number': '13'},
{'name': 'STATE', 'number': '14'},
{'name': 'ZIP', 'number': '15'},
{'name': 'INDIVIDUAL EMPLOYER', 'number': '16'},
{'name': 'INDIVIDUAL OCCUPATION', 'number': '17'},
{'name': 'COVERAGE FROM DATE', 'number': '18'},
{'name': 'COVERAGE THROUGH D
|
ATE', 'number': '19'},
{'name': 'DATE OF PUBLIC DISTRIBUTION', 'number': '20'},
{'name': 'COMMUNICATION TITLE', 'number': '21'},
{'name': 'FILER CODE', 'number': '22'},
{'name': 'FILER CODE DESCRIPTION', 'number': '23'},
{'name': 'SEGREGATED BANK ACCOUNT', 'number': '24'},
{'name': 'CUSTODIAN LAST NAME', 'number': '25'},
{'name': 'CUSTODIAN FIRST NAME', 'number': '26'},
{'name': 'CUSTODIAN MIDDLE NAME', 'number': '27'},
{'name': 'CUSTODIAN PREFIX', 'number': '28'},
{'name': 'CUSTODIAN SUFFIX', 'number': '29'},
{'name': 'CUSTODIAN STREET 1', 'number': '30'},
{'name': 'CUSTODIAN STREET 2', 'number': '31'},
{'name': 'CUSTODIAN CITY', 'number': '32'},
{'name': 'CUSTODIAN STATE', 'number': '33'},
{'name': 'CUSTODIAN ZIP', 'number': '34'},
{'name': 'CUSTODIAN EMPLOYER', 'number': '35'},
{'name': 'CUSTODIAN OCCUPATION', 'number': '36'},
{'name': 'TOTAL DONATIONS THIS STATEMENT', 'number': '37-9.'},
{'name': 'TOTAL DISB./OBLIG. THIS STATEMENT', 'number': '38-10.'},
{'name': 'PERSON COMPLETING LAST NAME', 'number': '39'},
{'name': 'PERSON COMPLETING FIRST NAME', 'number': '40'},
{'name': 'PERSON COMPLETING MIDDLE NAME', 'number': '41'},
{'name': 'PERSON COMPLETING PREFIX', 'number': '42'},
{'name': 'PERSON COMPLETING SUFFIX', 'number': '43'},
{'name': 'DATE SIGNED', 'number': '44'},
]
self.fields_names = self.hash_names(self.fields)
|
dsgouda/autorest
|
Samples/2a-validation/Python/storage/storage_management_client.py
|
Python
|
mit
| 3,693
| 0.000542
|
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator 1.1.0.0
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.service_client import ServiceClient
from msrest import Serializer, Deserializer
from msrestazure import AzureConfiguration
from .version import VERSION
from .operations.storage_accounts_operations import StorageAccountsOperations
from .operations.usage_operations import UsageOperations
from . import models
class StorageManagementClientConfiguration(AzureConfiguration):
"""Configuration for StorageManagementClient
Note that all parameters used to create this instance are saved as instance
attributes.
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param subscription_id: Gets subscription credentials which uniquely
identify Microsoft Azure subscription. The subscription ID forms part of
the URI for every service call.
:type subscription_id: str
:param str base_url: Service URL
"""
def __init__(
self, credentials, subscription_id, base_url=None):
if credentials is None:
raise ValueError("Parameter 'credentials' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
if not isinstance(subscription_id, str):
raise TypeError("Parameter 'subscription_id' must be str.")
if not base_url:
base_url = 'https://management.azure.com'
super(StorageManagementClientConfiguration, self).__init__(base_url)
self.add_user_agent('storagemanagementclient/{}'.format(VERSION))
self.add_user_agent('Azure-SDK-For-Python')
self.credentials = credentials
self.subscription_id = subscription_id
class StorageManagementClient(object):
"""The Storage Management Client.
:ivar config: Configuration for client.
:vartype config: StorageManagementClientConfiguration
:ivar storage_accounts: StorageAccounts operations
:vartype storage_accounts: storage.operations.StorageAccountsOperations
:ivar usage: Usage operations
:vartype usage: storage.operations.UsageOperations
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param subscription_i
|
d: Gets subscription credentials which uniquely
identify Microsoft Azure subscription. The subscription ID forms part of
the URI for every service call.
:type subscription_id: str
:param str base_url: Service URL
"""
def __init__(
self, credentials, subscription_id, base_url=None):
self.config = StorageManagementClientConfiguration(credentials, subscription_id, base_url)
self._client = ServiceClient(se
|
lf.config.credentials, self.config)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self.api_version = '2015-06-15'
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self.storage_accounts = StorageAccountsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.usage = UsageOperations(
self._client, self.config, self._serialize, self._deserialize)
|
Metaswitch/calico-nova
|
nova/tests/unit/test_linuxscsi.py
|
Python
|
apache-2.0
| 5,951
| 0
|
# Copyright 2010 OpenStack Foundation
# (c) Copyright 2012-2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from nova.openstack.common import log as logging
from nova.storage import linuxscsi
from nova import test
from nova import utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class StorageLinuxSCSITestCase(test.NoDBTestCase):
def setUp(self):
super(StorageLinuxSCSITestCase, self).setUp()
self.executes = []
def fake_execute(*cmd, **kwargs):
self.executes.append(cmd)
return None, None
self.stubs.Set(utils, 'execute', fake_execute)
def test_find_multipath_device_3par(self):
def fake_execute(*cmd, **kwargs):
out = ("mpath6 (350002ac20398383d) dm-3 3PARdata,VV\n"
"size=2.0G features='0' hwhandler='0' wp=rw\n"
"`-+- policy='round-robin 0' prio=-1 status=active\n"
" |- 0:0:0:1 sde 8:64 active undef running\n"
" `- 2:0:0:1 sdf 8:80 active undef running\n"
)
return out, None
def fake_execute2(*cmd, **kwargs):
out = ("350002ac20398383d dm-3 3PARdata,VV\n"
"size=2.0G features='0' hwhandler='0' wp=rw\n"
"`-+- policy='round-robin 0' prio=-1 status=active\n"
" |- 0:0:0:1 sde 8:64 active undef running\n"
" `- 2:0:0:1 sdf 8:80 active undef running\n"
)
return out, None
self.stubs.Set(utils, 'execute', fake_execute)
info = linuxscsi.find_multipath_device('/dev/sde')
LOG.error("info = %s" % info)
self.assertEqual("/dev/mapper/350002ac20398383d", info["device"])
self.assertEqual("/dev/sde", info['devices'][0]['device'])
self.assertEqual("0", info['devices'][0]['host'])
self.assertEqual("0", info['devices'][0]['id'])
self.assertEqual("0", info['devices'][0]['channel'])
self.assertEqual("1", info['devices'][0]['lun'])
self.assertEqual("/dev/sdf", info['devices'][1]['device'])
self.assertEqual("2", info['devices'][1]['host'])
self.assertEqual("0", info['devices'][1]['id'])
self.assertEqual("0", info['devices'][1]['channel'])
self.assertEqual("1", info['devices'][1]['lun'])
def test_find_multipath_device_svc(self):
def fake_execute(*cmd, **kwargs):
out = ("36005076da00638089c000000000004d5 dm-2 IBM,2145\n"
"size=954M features='1 queue_if_no_path' hwhandler='0'"
" wp=rw\n"
"|-+- policy='round-robin 0' prio=-1 status=active\n"
"| |- 6:0:2:0 sde 8:64 active undef running\n"
"| `- 6:0:4:0 sdg 8:96 active undef running\n"
"`-+- policy='round-robin 0' prio=-1 status=enabled\n"
" |- 6:0:3:0 sdf 8:80 active undef running\n"
" `- 6:0:5:0 sdh 8:112 active undef running\n"
)
return out, None
self.stubs.Set(utils, 'execute', fake_execute)
info = linuxscsi.find_multipath_device('/dev/sde')
LOG.error("info = %s" % info)
self.assertEqual("/dev/mapper/36005076da00638089c000000000004d5",
info["device"])
self.assertEqual("/dev/sde", info['devices'][0]['device'])
self.assertEqual("6", info['devices'][0]['host'])
self.assertEqual("0", info['devices'][0]['channel'])
self.assertEqual("2", info['devices'][0]['id'])
self.assertEqual("0", info['devices'][0]['lun'])
self.assertEqual("/dev/sdf", info['devices'][2]['device'])
self.assertEqual("6", info['devices'][2]['host'])
self.assertEqual("0", info['devices'][2]['channel'])
self.assertEqual("3", info['devices'][2]['id'])
self.assertEqual("0", info['devices'][2]['lun'])
def test_find_multipath_device_ds8000(self):
def fake_execute(*cmd, **kwargs):
out = ("36005076303ffc48e0000000000000101 dm-2 IBM,2107900\n"
"size=1.0G features='1 queue_if_no_path' hwhandler='0'"
" wp=rw\n"
"`-+- policy='round-robin 0' prio=-1 status=active\n"
" |- 6:0:2:0 sdd 8:64 active undef running\n"
" `- 6:1:0:3 sdc 8:32 active undef running\n"
)
return out, None
self.stubs.Set(utils, 'execute', fake_execute)
info = linuxscsi.find_multipath_d
|
evice('/dev/sdd')
LOG.error("info = %s" % info)
self.assertEqual("/dev/mapper/36005076303ffc48e0000000000000101",
info["device"])
self.assertEqual("/dev/sdd", info['devices'][0]['device'])
self.assertEqual("6", info['devices'][0]['host'])
self.assertEqual("0", info['devices'][0]['channel'])
self.assertEqual("2", info['devices'][0]['id'])
self.assertEqual("0", info['devices'][0]['lun'])
self.assertEqual(
|
"/dev/sdc", info['devices'][1]['device'])
self.assertEqual("6", info['devices'][1]['host'])
self.assertEqual("1", info['devices'][1]['channel'])
self.assertEqual("0", info['devices'][1]['id'])
self.assertEqual("3", info['devices'][1]['lun'])
|
Kirubaharan/hydrology
|
ch_591/ch_591_stage_area.py
|
Python
|
gpl-3.0
| 18,373
| 0.003973
|
__author__ = 'kiruba'
import pandas as pd
import matplotlib.pyplot as plt
import mpld3 as m
from mpl_toolkits.mplot3d import axes3d, Axes3D
from matplotlib import rc
from scipy.interpolate import griddata
import numpy as np
from matplotlib import cm
from matplotlib.path import *
from matplotlib.collections import PolyCollection
import matplotlib as mpl
from matplotlib.ticker import MultipleLocator
from matplotlib.ticker import MaxNLocator
import checkdam.checkdam as cd
import checkdam.mynormalize as mn
class MyAxes3D(axes3d.Axes3D):
def __init__(self, baseObject, sides_to_draw):
self.__class__ = type(baseObject.__class__.__name__,
(self.__class__, baseObject.__class__),
{})
self.__dict__ = baseObject.__dict__
self.sides_to_draw = list(sides_to_draw)
self.mouse_init()
def set_some_features_visibility(self, visible):
for t in self.w_zaxis.get_ticklines() + self.w_zaxis.get_ticklabels():
t.set_visible(visible)
self.w_zaxis.line.set_visible(visible)
self.w_zaxis.pane.set_visible(visible)
self.w_zaxis.label.set_visible(visible)
def draw(self, renderer):
# set visibility of some features False
self.set_some_features_visibility(False)
# draw the axes
super(MyAxes3D, self).draw(renderer)
# set visibility of some features True.
|
# This could be adapted to set your features to desired visibility,
# e.g. storing the previous values and restoring the values
self.set_some_features_visibility(True)
zaxis = self.zaxis
draw_grid_old = zaxis.axes._draw_grid
# disable draw grid
zaxis.axes._draw_grid = False
t
|
mp_planes = zaxis._PLANES
if 'l' in self.sides_to_draw :
# draw zaxis on the left side
zaxis._PLANES = (tmp_planes[2], tmp_planes[3],
tmp_planes[0], tmp_planes[1],
tmp_planes[4], tmp_planes[5])
zaxis.draw(renderer)
if 'r' in self.sides_to_draw :
# draw zaxis on the right side
zaxis._PLANES = (tmp_planes[3], tmp_planes[2],
tmp_planes[1], tmp_planes[0],
tmp_planes[4], tmp_planes[5])
zaxis.draw(renderer)
zaxis._PLANES = tmp_planes
# disable draw grid
zaxis.axes._draw_grid = draw_grid_old
# print mpl.__version__
base_file = '/media/kiruba/New Volume/r/r_dir/stream_profile/new_code/591/base_profile_591.csv'
df_base = pd.read_csv(base_file, header=-1)
# print df_base.head()
# print(df_base.ix[1:, 1:])
df_base.ix[1:, 1:] = df_base.ix[1:, 1:].add(0.03)
# raise SystemExit(0)
slope_file = '/media/kiruba/New Volume/r/r_dir/stream_profile/new_code/591/slope_profile_1.csv'
df_slope = pd.read_csv(slope_file, header=0)
# print df_base
df_base_trans = df_base.T # T refers to transpose
rc('font', **{'family': 'sans-serif', 'sans-serif': ['Helvetica']})
rc('text', usetex=True)
plt.rc('text', usetex=True)
plt.rc('font', family='serif', size=36)
# print(df_base_trans)
# print df_slope
# check dam height = 1.9 m
#width - 8.8 m
# # df_base 17(0-16) rows and 47(0-46) columns
# df_base_trans has 47 (0-46) rows and 17(0-16) columns
############################################ In between profiles are filled
# print df_base_trans.ix[1:, 1] # df.ix[row,column]
template_0 = df_base_trans.ix[1:, 1] # template for profile
# print template_0
# print df_base_trans.ix[0:, 0:1]
z1 = df_base_trans.ix[29, 1]
z2 = .21 # elevation value at 9 m
diff = z2 - z1
profile_9 = template_0 + diff
df_base_trans[17] = profile_9
df_base_trans.ix[0, 17] = 9
template_10 = df_base_trans.ix[1:, 2]
z10 = df_base_trans.ix[29, 2]
z11 = .11
diff = z11 - z10
profile_11 = template_10 + diff
df_base_trans[18] = profile_11
df_base_trans.ix[0, 18] = 11
template_20 = df_base_trans.ix[1:, 3]
z20 = df_base_trans.ix[29, 3]
z28 = 0.49
diff = z28 - z20
profile_28 = template_20 + diff
df_base_trans[19] = profile_28
df_base_trans.ix[0, 19] = 28
template_30 = df_base_trans.ix[1:, 4]
z30 = df_base_trans.ix[29, 4]
z38 = .66
diff = z38 - z30
profile_38 = template_30 + diff
df_base_trans[20] = profile_38
df_base_trans.ix[0, 20] = 38
# print df_base_trans.head()
# raise SystemExit(0)
################################################
x1 = df_base_trans.ix[1:, 0] # separate out x, y, z values
y1 = df_base_trans.ix[0, 1:]
z1 = df_base_trans.ix[1:, 1:]
z_array = df_base_trans.ix[1:, 1:].values
columns = list(df_base_trans.ix[0, 1:].values)
index = df_base_trans.ix[1:, 0].values
df = pd.DataFrame(z_array, columns=columns).set_index(index)
#### create x, y, z array for plotting and contour
# print df
data_1 = []
for y, row in df.iteritems():
# print 'i = %s' % y
for x, z in row.iteritems():
data_1.append((x, y, z))
# print 'x = %s and z = %s' % (x,z)
data_1_df = pd.DataFrame(data_1, columns=['x', 'y', 'z'])
# df_base_trans.to_csv('/media/kiruba/New Volume/r/r_dir/stream_profile/new_code/591/base_trans.csv')
data_1_df.to_csv('/media/kiruba/New Volume/r/r_dir/stream_profile/new_code/591/cloud.csv')
# print data_1_df.shape
X = data_1_df.x
Y = data_1_df.y
Z = data_1_df.z
#
# fig = plt.figure()
# ax = fig.add_subplot(1,1,1, projection = '3d')
# ax.plot_wireframe(X, Y, Z, rstride=1, cstride=1)
# ax.plot_surface(X,Y,Z, rstride=4, cstride=4, linewidth=2)
# rc('font', **{'family': 'sans-serif', 'sans-serif': ['Helvetica']})
# rc('text', usetex=True)
# plt.rc('text', usetex=True)
# plt.rc('font', family='serif')
# plt.xlabel(r'\textbf{X} (m)')
# plt.ylabel(r'\textbf{Y} (m)')
# plt.title(r"Profile for 591", fontsize=16)
# plt.show()
# 3d plot for check dam paper
fig = plt.figure(figsize=(12, 7.414), facecolor='white')
ax1 = fig.gca(projection='3d')
xi = np.linspace(X.min(), X.max(), 100)
yi = np.linspace(Y.min(), Y.max(), 100)
zi = griddata((X, Y), Z, (xi[None, :], yi[:, None]), method='linear') # create a uniform spaced grid
xig, yig = np.meshgrid(xi, yi)
# surf = ax.plot_surface(xig, yig, zi, rstride=5, cstride=3, linewidth=0, cmap=cm.coolwarm, antialiased=False, rasterized=True) # 3d plot
surf = ax1.plot_surface(xig, yig, zi, rstride=1, cstride=1, cmap='Greys', shade=False, linewidth=0.25) # 3d plot
plt.subplots_adjust(right=0.80, bottom=0, wspace=0.22)
cbar_ax = fig.add_axes([0.85, 0.4, 0.04, 0.5])
# surf.set_edgecolor('k')
ax1.set_xlabel(r'X')
ax1.set_ylabel(r'Y')
ax1.set_zlabel(r'Z')
ax1.xaxis.set_rotate_label(False)
ax1.yaxis.set_rotate_label(False)
ax1.zaxis.set_rotate_label(False)
ax1.view_init(elev=30, azim=-119)
ax1.grid(False)
ax1.xaxis.pane.set_edgecolor('black')
ax1.yaxis.pane.set_edgecolor('black')
ax1.zaxis.pane.set_edgecolor('black')
ax1.xaxis.pane.fill = False
ax1.yaxis.pane.fill = False
ax1.zaxis.pane.fill = False
ax1.xaxis.set_major_locator(MultipleLocator(10))
ax1.yaxis.set_major_locator(MultipleLocator(20))
ax1.zaxis.set_major_locator(MultipleLocator(1))
[t.set_va('center') for t in ax1.get_yticklabels()]
[t.set_ha('left') for t in ax1.get_yticklabels()]
[t.set_va('center') for t in ax1.get_xticklabels()]
[t.set_ha('right') for t in ax1.get_xticklabels()]
[t.set_va('center') for t in ax1.get_zticklabels()]
[t.set_ha('left') for t in ax1.get_zticklabels()]
ax1.xaxis._axinfo['tick']['inward_factor'] = 0
ax1.xaxis._axinfo['tick']['outward_factor'] = 0.4
ax1.yaxis._axinfo['tick']['inward_factor'] = 0
ax1.yaxis._axinfo['tick']['outward_factor'] = 0.4
ax1.zaxis._axinfo['tick']['inward_factor'] = 0
ax1.zaxis._axinfo['tick']['outward_factor'] = 0.4
ax1.zaxis._axinfo['tick']['inward_factor'] = 0
ax = fig.add_axes(MyAxes3D(ax1, 'lr'))
plt.show()
# raise SystemExit(0)
## contour and 3d surface plotting
fig = plt.figure(figsize=(16, 8))
ax = fig.gca(projection='3d')
# ax = fig.add_subplot(1, 2, 1, projection='3d')
xi = np.linspace(X.min(), X.max(), 100)
yi = np.linspace(Y.min(), Y.max(), 100)
# print len(xi)
# print len(yi)
# print len(Z)
zi = griddata((X, Y), Z, (xi[None, :], yi[:, None]), method='linear') # create a uniform spaced grid
# print zi.min()
# print zi.max()
# CS_1 = plt.contou
|
turdusmerula/kipartman
|
kipartman/swagger_client/models/part.py
|
Python
|
gpl-3.0
| 13,785
| 0.001378
|
# coding: utf-8
"""
Kipartman
Kipartman api specification
|
s
OpenAPI spec version: 1.0.0
Contact: --
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pf
|
ormat
from six import iteritems
import re
class Part(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'description': 'str',
'comment': 'str',
'octopart': 'str',
'octopart_uid': 'str',
'updated': 'datetime',
'value_parameter': 'str',
'id': 'int',
'category': 'PartCategory',
'has_childs': 'int',
'childs': 'list[Part]',
'footprint': 'VersionedFile',
'symbol': 'VersionedFile',
'parameters': 'list[PartParameter]',
'distributors': 'list[PartDistributor]',
'manufacturers': 'list[PartManufacturer]',
'storages': 'list[PartStorage]',
'attachements': 'list[PartAttachement]'
}
attribute_map = {
'name': 'name',
'description': 'description',
'comment': 'comment',
'octopart': 'octopart',
'octopart_uid': 'octopart_uid',
'updated': 'updated',
'value_parameter': 'value_parameter',
'id': 'id',
'category': 'category',
'has_childs': 'has_childs',
'childs': 'childs',
'footprint': 'footprint',
'symbol': 'symbol',
'parameters': 'parameters',
'distributors': 'distributors',
'manufacturers': 'manufacturers',
'storages': 'storages',
'attachements': 'attachements'
}
def __init__(self, name=None, description=None, comment=None, octopart=None, octopart_uid=None, updated=None, value_parameter=None, id=None, category=None, has_childs=None, childs=None, footprint=None, symbol=None, parameters=None, distributors=None, manufacturers=None, storages=None, attachements=None):
"""
Part - a model defined in Swagger
"""
self._name = None
self._description = None
self._comment = None
self._octopart = None
self._octopart_uid = None
self._updated = None
self._value_parameter = None
self._id = None
self._category = None
self._has_childs = None
self._childs = None
self._footprint = None
self._symbol = None
self._parameters = None
self._distributors = None
self._manufacturers = None
self._storages = None
self._attachements = None
if name is not None:
self.name = name
if description is not None:
self.description = description
if comment is not None:
self.comment = comment
if octopart is not None:
self.octopart = octopart
if octopart_uid is not None:
self.octopart_uid = octopart_uid
if updated is not None:
self.updated = updated
if value_parameter is not None:
self.value_parameter = value_parameter
self.id = id
if category is not None:
self.category = category
if has_childs is not None:
self.has_childs = has_childs
if childs is not None:
self.childs = childs
if footprint is not None:
self.footprint = footprint
if symbol is not None:
self.symbol = symbol
if parameters is not None:
self.parameters = parameters
if distributors is not None:
self.distributors = distributors
if manufacturers is not None:
self.manufacturers = manufacturers
if storages is not None:
self.storages = storages
if attachements is not None:
self.attachements = attachements
@property
def name(self):
"""
Gets the name of this Part.
:return: The name of this Part.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this Part.
:param name: The name of this Part.
:type: str
"""
self._name = name
@property
def description(self):
"""
Gets the description of this Part.
:return: The description of this Part.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this Part.
:param description: The description of this Part.
:type: str
"""
self._description = description
@property
def comment(self):
"""
Gets the comment of this Part.
:return: The comment of this Part.
:rtype: str
"""
return self._comment
@comment.setter
def comment(self, comment):
"""
Sets the comment of this Part.
:param comment: The comment of this Part.
:type: str
"""
self._comment = comment
@property
def octopart(self):
"""
Gets the octopart of this Part.
:return: The octopart of this Part.
:rtype: str
"""
return self._octopart
@octopart.setter
def octopart(self, octopart):
"""
Sets the octopart of this Part.
:param octopart: The octopart of this Part.
:type: str
"""
self._octopart = octopart
@property
def octopart_uid(self):
"""
Gets the octopart_uid of this Part.
:return: The octopart_uid of this Part.
:rtype: str
"""
return self._octopart_uid
@octopart_uid.setter
def octopart_uid(self, octopart_uid):
"""
Sets the octopart_uid of this Part.
:param octopart_uid: The octopart_uid of this Part.
:type: str
"""
self._octopart_uid = octopart_uid
@property
def updated(self):
"""
Gets the updated of this Part.
:return: The updated of this Part.
:rtype: datetime
"""
return self._updated
@updated.setter
def updated(self, updated):
"""
Sets the updated of this Part.
:param updated: The updated of this Part.
:type: datetime
"""
self._updated = updated
@property
def value_parameter(self):
"""
Gets the value_parameter of this Part.
:return: The value_parameter of this Part.
:rtype: str
"""
return self._value_parameter
@value_parameter.setter
def value_parameter(self, value_parameter):
"""
Sets the value_parameter of this Part.
:param value_parameter: The value_parameter of this Part.
:type: str
"""
self._value_parameter = value_parameter
@property
def id(self):
"""
Gets the id of this Part.
:return: The id of this Part.
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this Part.
:param id: The id of this Part.
:type: int
"""
if id is None:
raise ValueError("Invalid value for `id`, must not be `None`")
self._id = id
@property
def category(self):
"""
Gets the category of this Part.
:return: The category of this Part.
:rtype: PartCategory
"""
return self._category
@category.setter
def category(self, category):
"""
Sets the category of this Part.
:param category: The category of this Part.
:type: PartCategory
"""
self._category = category
@property
def has_childs(self):
"""
|
LLNL/spack
|
var/spack/repos/builtin/packages/xdotool/package.py
|
Python
|
lgpl-2.1
| 1,385
| 0.004332
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Xdotool(MakefilePackage):
"""fake keyboard/mouse input, window management, and more"""
homepage = "https://github.com/jordansissel/xdotool"
url = "https://github.com/jordansissel/xdotool/releases/download/v3.20160805.1/xdoto
|
ol-3.20160805.1.tar.gz"
version('3.20160805.1', sha256='35be5ff6edf0c620a0e16f09ea5e101d5173280161772fca18657d83f20fcca8')
version('3.20160804.2', sha256='2251671c3c3dadab2b70e08bd87f2de6338c7
|
b4e64e7e2d2d881fd13f9bff72c')
version('3.20160804.1', sha256='7a76ee57515cc767a00a768f1d04c703279d734255a34f8027c29178561fdce9')
version('3.20150503.1', sha256='e8326883bd5e91bede7336cbee186e6e9143f40b3fb61c84afc9bb31b87e96d1')
depends_on('libxext')
depends_on('libxtst')
depends_on('libxi')
depends_on('libx11')
depends_on('inputproto')
depends_on('libxinerama')
depends_on('libxkbcommon')
def edit(self, spec, prefix):
env['PREFIX'] = prefix
makefile = FileFilter('Makefile')
makefile.filter('xdotool: LDFLAGS+=-Xlinker', '', string=True)
makefile.filter('xdotool: LDFLAGS+=-rpath $(INSTALLLIB)', '',
string=True)
|
vivekhaldar/fetch_rss
|
output_prn.py
|
Python
|
gpl-3.0
| 928
| 0.001078
|
# Copyright (C) 2012 Vivek Haldar
#
# Take in a dict containing fetched RSS data, and output to printable files in
# the
|
current directory.
#
# Dict looks like:
# feed_title -> [list of articles]
# each article has (title, body).
#
# Author: Vivek Haldar <vh@vivekhaldar.com>
import codecs
import escpos
from datetime import datetime
import textwrap
import output
class OutputPrn(output.Output):
def output(self):
articles = self._articles
for f in articles:
prn = escpos.Escpos('%s.prn' % f.replace('/', '_'))
|
for a in articles[f]:
title, body = a
# Cut body down to 100 words.
short_body = ' '.join(body.split()[:100])
prn.bigtext(f + '\n')
prn.bigtext(textwrap.fill(title, 32) + '\n')
prn.text(textwrap.fill(body, 32))
prn.text('\n\n\n')
prn.flush()
|
pku9104038/edx-platform
|
common/lib/xmodule/setup.py
|
Python
|
agpl-3.0
| 3,169
| 0.000947
|
from setuptools import setup, find_packages
XMODULES = [
"abtest = xmodule.abtest_module:ABTestDescriptor",
"book = xmodule.backcompat_module:TranslateCustomTagDescriptor",
"chapter = xmodule.seq_module:SequenceDescriptor",
"combinedopenended = xmodule.combined_open_ended_module:CombinedOpenEndedDescriptor",
"conditional = xmodule.conditional_module:ConditionalDescriptor",
"course = xmodule.course_module:CourseDescriptor",
"customtag = xmodule.template_module:CustomTagDescriptor",
"discuss = xmodule.backcompat_module:TranslateCustomTagDescriptor",
"html = xmodule.html_module:HtmlDescriptor",
"image = xmodule.backcompat_module:TranslateCustomTagDe
|
scriptor",
"error = xmodule.error_module:ErrorDescriptor",
"peergrading = xmodule.peer_grading_module:PeerGradingDescriptor",
"poll_question = xmodule.poll_module:PollDescriptor",
"problem = xmodule.capa_module:CapaDescriptor",
"problemset = xmodule.seq_module:SequenceDescriptor",
"randomize = xmodule.randomize_module:RandomizeDescriptor",
"section = xmodule.backcompat_module:SemanticSectionDescriptor",
"sequential = xm
|
odule.seq_module:SequenceDescriptor",
"slides = xmodule.backcompat_module:TranslateCustomTagDescriptor",
"vertical = xmodule.vertical_module:VerticalDescriptor",
"video = xmodule.video_module:VideoDescriptor",
"videoalpha = xmodule.video_module:VideoDescriptor",
"videodev = xmodule.backcompat_module:TranslateCustomTagDescriptor",
"videosequence = xmodule.seq_module:SequenceDescriptor",
"discussion = xmodule.discussion_module:DiscussionDescriptor",
"course_info = xmodule.html_module:CourseInfoDescriptor",
"static_tab = xmodule.html_module:StaticTabDescriptor",
"custom_tag_template = xmodule.raw_module:RawDescriptor",
"about = xmodule.html_module:AboutDescriptor",
"wrapper = xmodule.wrapper_module:WrapperDescriptor",
"graphical_slider_tool = xmodule.gst_module:GraphicalSliderToolDescriptor",
"annotatable = xmodule.annotatable_module:AnnotatableDescriptor",
"textannotation = xmodule.textannotation_module:TextAnnotationDescriptor",
"videoannotation = xmodule.videoannotation_module:VideoAnnotationDescriptor",
"foldit = xmodule.foldit_module:FolditDescriptor",
"word_cloud = xmodule.word_cloud_module:WordCloudDescriptor",
"hidden = xmodule.hidden_module:HiddenDescriptor",
"raw = xmodule.raw_module:RawDescriptor",
"crowdsource_hinter = xmodule.crowdsource_hinter:CrowdsourceHinterDescriptor",
"lti = xmodule.lti_module:LTIDescriptor",
]
setup(
name="XModule",
version="0.1",
packages=find_packages(exclude=["tests"]),
install_requires=[
'distribute',
'docopt',
'capa',
'path.py',
'webob',
],
package_data={
'xmodule': ['js/module/*'],
},
# See http://guide.python-distribute.org/creation.html#entry-points
# for a description of entry_points
entry_points={
'xblock.v1': XMODULES,
'xmodule.v1': XMODULES,
'console_scripts': [
'xmodule_assets = xmodule.static_content:main',
],
},
)
|
pawciobiel/fgpst-gae
|
fgpst/utils/pubsub.py
|
Python
|
gpl-3.0
| 1,412
| 0.000708
|
import logging
from google.appengine.api import channel
from . import memc
log = logging.getLogger(__name__)
PUBSUB_PREFIX = 'pubsub'
def _build_group_key(channel_id):
return "%s:%s" % (PUBSUB_PREFIX, channel_id)
def add_client(channel_id, client_id, time=channel.MAXIMUM_TOKEN_DURATION_MINUTES * 60):
group_key = _build_group_key(channel_id)
token = memc.get_from_group(group_key, client_id)
if not
|
token:
log.debug("opening channel")
token = channel.create_channel(client_id)
memc.add_to_group(group_key, client_id, token, time=time)
return token
def get_all_clients(channel_id):
"""
@param channel_
|
id:
@return: set({'client-id': 'token'}, ...)
"""
group_key = _build_group_key(channel_id)
log.debug("get_all_clients group_key=%s", group_key)
return memc.get_all_from_group(group_key).items()
def remove_client_from_channel(channel_id, client_id):
return memc.remove_from_group(channel_id, client_id)
def send_to_client(client_id, msg):
channel.send_message(client_id, msg)
def send_to_channel(channel_id, msg):
log.debug("pubsub.send_to_channel(%s, %s)", channel_id, msg)
clients = get_all_clients(channel_id)
# FIXME: threads or bg task or send_async?
for client_id, token in clients:
send_to_client(client_id, msg)
else:
log.debug("no clients in channel `%s`", channel_id)
|
LeanVel/TakeoutsTimelining
|
SearchesParser.py
|
Python
|
gpl-3.0
| 857
| 0.014002
|
import json
import time
def parseSearches(searchesFile, begintimeframe = 0, en
|
dtimeframe = int(time.time())) :
searches = json.load(open(searchesFile, 'r'))
listOfsearches = []
for search in searches["event"]:
|
#a query can contain several timestap so special measurments need te be implemented in order to handle it
for timeStampDic in search["query"]['id'] :
timeStamp = int(timeStampDic["timestamp_usec"]) // 1000000
# time filtering
if timeStamp < endtimeframe and timeStamp > begintimeframe:
#timeStamp = timeStamp / 1000
queryText = str(search["query"]["query_text"])
searchInfo = timeStamp, 'Searches', \
"Query Text: " + queryText
listOfsearches.append(searchInfo)
return listOfsearches
|
karllessard/tensorflow
|
tensorflow/python/data/kernel_tests/window_test.py
|
Python
|
apache-2.0
| 10,592
| 0.005004
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.window()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest
from tensorflow.python.eager import context
from tensorflow.python.framework import combinations
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class WindowTest(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(
count=20,
size=[10, 14, 17],
shift=[7, 14],
stride=[1, 2, 6],
drop_remainder=[True, False]) + combinations.combine(
count=[0, 1],
size=10,
shift=4,
stride=1,
drop_remainder=[True, False])))
def testWindowDataset(self, count, size, shift, stride, drop_remainder):
"""Tests a dataset that slides a window its input elements."""
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
def _flat_map_fn(x, y, z):
return dataset_ops.Dataset.zip((x.batch(batch_size=size),
y.batch(batch_size=size),
z.batch(batch_size=size)))
dataset = dataset_ops.Dataset.from_tensor_slices(components).map(
_map_fn).repeat(count).window(
size=size,
shift=shift,
stride=stride,
drop_remainder=drop_remainder).flat_map(_flat_map_fn)
get_next = self.getNext(dataset)
self.assertEqual([[None] + list(c.shape[1:]) for c in components],
[ts.as_list() for ts in nest.flatten(
dataset_ops.get_legacy_output_shapes(dataset))])
num_full_batches = max(0,
(count * 7 - ((size - 1) * stride + 1)) // shift + 1)
for i in range(num_full_batches):
result = self.evaluate(get_next())
for component, result_component in zip(components, result):
for j in range(size):
self.assertAllEqual(component[(i * shift + j * stride) % 7]**2,
result_component[j])
if not drop_remainder:
num_partial_batches = (count * 7) // shift + (
(count * 7) % shift > 0) - num_full_batches
for i in range(num_partial_batches):
result = self.evaluate(get_next())
for component, result_component in zip(components, result):
remaining = (count * 7) - ((num_full_batches + i) * shift)
num_elements = remaining // stride + ((remaining % stride) > 0)
for j in range(num_elements):
self.assertAllEqual(
component[((num_full_batches + i) * shift + j * stride) % 7]**2,
result_component[j])
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(count=20, size=0, shift=3, stride=1) +
combinations.combine(count=20, size=3, shift=0, stride=1) +
combinations.combine(count=20, size=3, shift=3, stride=0)))
def testWindowDatasetInvalid(self, count, size, shift, stride):
with self.assertRaises(errors.InvalidArgumentError):
ds = dataset_ops.Dataset.range(10).map(lambda x: x).repeat(count).window(
size=size, shift=shift,
stride=stride).flat_map(lambda x: x.batch(batch_size=size))
self.evaluate(ds._variant_tensor)
@combinations.generate(test_base.default_test_combinations())
def testWindowDifferentNestedStructures(self):
ds = dataset_ops.Dataset.from_tensor_slices(([1, 2], [3, 4])).window(2)
self.getNext(ds)
ds = dataset_ops.Dataset.from_tensor_slices({"a": [1, 2]}).window(2)
self.getNext(ds)
@combinations.generate(test_base.default_test_combinations())
def testWindowSparse(self):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=[[0]], values=(i * [1]), dense_shape=[1])
dataset = dataset_ops.Dataset.range(10).map(_sparse).window(
size=5, shift=3,
drop_remainder=True).flat_map(lambda x: x.batch(batch_size=5))
num_batches = (10 - 5) // 3 + 1
expected_output = [
sparse_tensor.SparseTensorValue(
indices=[[0, 0], [1, 0], [2, 0], [3, 0], [4, 0]],
values=[i * 3, i * 3 + 1, i * 3 + 2, i * 3 + 3, i * 3 + 4],
dense_shape=[5, 1]) for i in range(num_batches)
]
self.assertDatasetProduces(dataset, expected_output=expected_output)
@combinations.generate(test_base.default_test_combinations())
def testWindowSparseWithDifferentDenseShapes(self):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=array_ops.expand_dims(
math_ops.range(i, dtype=dtypes.int64), 1),
values=array_ops.fill([math_ops.cast(i, dtypes.int32)], i),
dense_shape=[i])
dataset = dataset_ops.Dataset.range(10).map(_sparse).window(
size=5, shift=3,
drop_remainder=True).flat_map(lambda x: x.batch(batch_size=5))
|
expected_output = []
num_batches =
|
(10 - 5) // 3 + 1
for i in range(num_batches):
expected_indices = []
expected_values = []
for j in range(5):
for k in range(i * 3 + j):
expected_indices.append([j, k])
expected_values.append(i * 3 + j)
expected_output.append(
sparse_tensor.SparseTensorValue(
indices=expected_indices,
values=expected_values,
dense_shape=[5, i * 3 + 5 - 1]))
self.assertDatasetProduces(dataset, expected_output=expected_output)
@combinations.generate(test_base.default_test_combinations())
def testNestedWindowSparse(self):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=[[0]], values=(i * [1]), dense_shape=[1])
dataset = dataset_ops.Dataset.range(10).map(_sparse).window(
size=4, shift=2,
drop_remainder=True).flat_map(lambda x: x.batch(batch_size=4)).window(
size=3, shift=1,
drop_remainder=True).flat_map(lambda x: x.batch(batch_size=3))
expected_output = [
sparse_tensor.SparseTensorValue(
indices=[[0, 0, 0], [0, 1, 0], [0, 2, 0], [0, 3, 0], [1, 0, 0],
[1, 1, 0], [1, 2, 0], [1, 3, 0], [2, 0, 0], [2, 1, 0],
[2, 2, 0], [2, 3, 0]],
values=[0, 1, 2, 3, 2, 3, 4, 5, 4, 5, 6, 7],
dense_shape=[3, 4, 1]),
sparse_tensor.SparseTensorValue(
indices=[[0, 0, 0], [0, 1, 0], [0, 2, 0], [0, 3, 0], [1, 0, 0],
[1, 1, 0], [1, 2, 0], [1, 3, 0], [2, 0, 0], [2, 1, 0
|
eufarn7sp/egads-eufar
|
egads/algorithms/transforms/__init__.py
|
Python
|
bsd-3-clause
| 547
| 0.003656
|
"""
EGADS transforms algorithms. See EGADS Algorithm Documentation for more info.
"""
__author__ = "mfreer, ohenry"
__date__ = "2017-01-08 11
|
:42"
__version__ = "1.2"
import logging
try:
from interpolation_linear import *
from isotime_to_elements import *
from isotime_to_seconds import *
from seconds_to_isotime import *
from time_to_decimal
|
_year import *
logging.info('egads [transforms] algorithms have been loaded')
except Exception:
logging.error('an error occured during the loading of a [transforms] algorithm')
|
IllusionRom-deprecated/android_platform_tools_idea
|
python/testData/quickFixes/AddFieldQuickFixTest/addFieldFromMethod_after.py
|
Python
|
apache-2.0
| 97
| 0.020619
|
class
|
A:
def __init__(self):
self.x = 1
self.y = None
def foo(self
|
):
a = self.y
|
jncormier24/Templ
|
templ/parser.py
|
Python
|
gpl-3.0
| 4,028
| 0.0072
|
#!/usr/bin/env python3
# This file is a part of Templ
# Copyright (C) 2012 Zachary Dziura
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import re
import yaml
import htmltag
class Parser:
"""Parses .tmpl file to be outputted as an html file. """
def __init__(self, config_file = 'templ.conf'):
self.config_file = config_file
self.tree = None
self.rules = {}
self._load_config()
def _load_config(self):
"""Loads the configuration settings from the config_file."""
with open(self.config_file, 'r') as config:
self.rules = yaml.load(config)
def parse(self, pray_file):
"""Parse the input file and output a syntax tree.
Parameters:
pray_file - The name of the pray file to be parsed.
"""
# Begin parsing the file
with open(pray_file, 'r') as input_file:
syntax_tree = Tree()
lines = []
# Read the entire file as a list of lines
for line in input_file:
line = line.strip('\n')
# First, expand any variables found in the line
regex = re.compile(r'\$\w+')
matches = regex.findall(line)
if len(matches) > 0:
for match in matches:
replace = match.strip('$')
line = line.replace(match, self.rules[replace])
print(line)
class NodeNotFoundError(Exception):
"""Error to be thrown when node is not found in tree."""
def __init__(self, name):
self.name = name
def __str__(self):
return repr(self.name)
class Tree:
"""A parser syntax tree.
The syntax tree is nothing more than an encapsulated dictionary. When
adding nodes to the tree, the node's name parameter is used as the tree's
key, while the node's dictionary of elements are used as the saved value.
"""
def __init__(self):
self.root_node = None
def add_node(self, node):
"""Add a node to the tree.
Parameters:
Node - The node to be added to the tree.
Returns:
Void
"""
self.nodes[node.name] = node.elements
def remove_node(self, node_name):
"""Remove the first node of a given name from the tree.
If the node does not exist in the tree, return a NodeNotFoundError.
Parameters:
Node_Name - The name of the node to be removed from the tree.
Returns:
Void
"""
i
|
f node_name in self.nodes:
del self.nodes[node.name]
else:
raise NodeNotFoundError(node_name)
def remove_nodes(self, node_name):
"""Remove all nodes of a given
|
name from the tree.
If no nodes exist in the tree, return a NodeNotFoundError.
Parameters:
Node_Name - The name of the notes to be removed from the tree.
"""
class Node:
"""Element found within parser syntax tree.
Nodes encapsulate 3 values: a name, a list of containing elements, and a
list of child elements.
"""
def __init__(self, name, attributes = {}, children = []):
self.name = name
self.attributes = attributes
self.children = children
|
kailIII/emaresa
|
trunk.pe/report_aeroo_sample/report/parser.py
|
Python
|
agpl-3.0
| 1,951
| 0.003075
|
##############################################################################
#
# Copyright (c) 2008-2011 Alistek Ltd (http://www.alistek.com) All Rights Reserved.
# General contacts <info@alistek.com>
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This module is GPLv3 or newer and incompatible
# with OpenERP SA "AGPL + Private Use License"!
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write
|
to the Free Software
# Foundation, Inc., 59 Tem
|
ple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from report import report_sxw
from report.report_sxw import rml_parse
import lorem
import random
class Parser(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(Parser, self).__init__(cr, uid, name, context)
self.localcontext.update({
'lorem':lorem.do_lorem,
'random':random,
'hello_world':self.hello_world,
})
def hello_world(self, name):
return "Hello, %s!" % name
|
mshcruz/LearnPythonTheHardWay
|
ex40.py
|
Python
|
gpl-2.0
| 484
| 0.002066
|
class Song(object):
def __init_
|
_(self, lyrics):
self.lyrics = lyrics
def sing_me_a_song(self):
for line in self.lyrics:
print line
happy_bday = Song(["Happy birthday to you",
"I don't want to get sued",
"So I'll stop right here"])
bulls_on_parade = Song(["T
|
hey rally around tha family",
"With pockets full of shells"])
happy_bday.sing_me_a_song()
bulls_on_parade.sing_me_a_song()
|
qpfiffer/blackdog
|
home/migrations/0006_textpointofinterest.py
|
Python
|
gpl-2.0
| 745
| 0.002685
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-10-14 01:45
from __future__ import unicode_literals
f
|
rom django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('home', '0005_instagrampointofinterest_cached_response'),
]
operations = [
migrations.CreateModel(
name='TextPointOfInterest',
|
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('poi', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='home.PointOfInterest')),
],
),
]
|
fsxfreak/esys-pbi
|
src/pupil/pupil_src/player/vis_eye_video_overlay.py
|
Python
|
mit
| 12,941
| 0.017232
|
'''
(*)~---------------------------------------------------------------------------
Pupil - eye tracking platform
Copyright (C) 2012-2017 Pupil Labs
Distributed under the terms of the GNU
Lesser General Public License (LGPL v3.0).
See COPYING and COPYING.LESSER for license details.
---------------------------------------------------------------------------~(*)
'''
import sys, os, platform
from glob import glob
import cv2
import numpy as np
from file_methods import Persistent_Dict
from pyglui import ui
from player_methods import transparent_image_overlay
from plugin import Plugin
from copy import copy
# helpers/utils
from version_utils import VersionFormat
#capture
from video_capture import EndofVideoFileError,FileSeekError,FileCaptureError,File_Source
#mouse
from glfw import glfwGetCursorPos,glfwGetWindowSize,glfwGetCurrentContext
from methods import normalize,denormalize
#logging
import logging
logger = logging.getLogger(__name__)
def get_past_timestamp(idx,timestamps):
"""
recursive function to find the most recent valid timestamp in the past
"""
if idx == 0:
# if at the beginning, we can't go back in time.
return get_future_timestamp(idx,timestamps)
if timestamps[idx]:
res = timestamps[idx][-1]
return res
else:
return get_past_timestamp(idx-1,timestamps)
def get_future_timestamp(idx,timestamps):
"""
recursive function to find most recent valid timestamp in the future
"""
if idx == len(timestamps)-1:
# if at the end, we can't go further into the future.
return get_past_timestamp(idx,timestamps)
elif timestamps[idx]:
return timestamps[idx][0]
else:
idx = min(len(timestamps),idx+1)
return get_future_timestamp(idx,timestamps)
def get_nearest_timestamp(past_timestamp,future_timestamp,world_timestamp):
dt_past = abs(past_timestamp-world_timestamp)
dt_future = abs(future_timestamp-world_timestamp) # abs prob not necessary here, but just for sanity
if dt_past < dt_future:
return past_timestamp
else:
return future_timestamp
def correlate_eye_world(eye_timestamps,world_timestamps):
"""
This function takes a list of eye timestamps and world timestamps
and correlates one eye frame per world frame
Returns a mapping that correlates a single eye frame index with each world frame index.
Up and downsampling is used to achieve this mapping.
"""
# return framewise mapping as a list
e_ts = eye_timestamps
w_ts = list(world_timestamps)
eye_frames_by_timestamp = dict(zip(e_ts,range(len(e_ts))))
eye_timestamps_by_world_index = [[] for i in world_timestamps]
frame_idx = 0
try:
current_e_ts = e_ts.pop(0)
except:
logger.warning("No eye timestamps found.")
return eye_timestamps_by_world_index
while e_ts:
# if the current eye timestamp is before the mean of the current world frame timestamp and the next worldframe timestamp
try:
t_between_frames = ( w_ts[frame_idx]+w_ts[frame_idx+1] ) / 2.
except IndexError:
break
if current_e_ts <= t_between_frames:
eye_timestamps_by_world_index[frame_idx].append(current_e_ts)
current_e_ts = e_ts.pop(0)
else:
frame_idx+=1
idx = 0
eye_world_frame_map = []
# some entiries in the `eye_timestamps_by_world_index` might be empty -- no correlated eye timestamp
# so we will either show the previous frame or next frame - whichever is temporally closest
for candidate,world_ts in zip(eye_timestamps_by_world_index,w_ts):
# if there is no candidate, then assign it to the closest timestamp
if not candidate:
# get most recent timestamp, either in the past or future
e_past_ts = get_past_timestamp(idx,eye_timestamps_by_world_index)
e_future_ts = get_future_timestamp(idx,eye_timestamps_by_world_index)
eye_world_frame_map.append(eye_frames_by_timestamp[get_nearest_timestamp(e_past_ts,e_future_ts,world_ts)])
else:
# TODO - if there is a list of len > 1 - then we should check which is the temporally closest timestamp
eye_world_frame_map.append(eye_frames_by_timestamp[eye_timestamps_by_world_index[idx][-1]])
idx += 1
return eye_world_frame_map
class Vis_Eye_Video_Overlay(Plugin):
"""docstring This plugin allows the user to overlay the eye recording on the recording of his field of vision
Features: flip video across horiz/vert axes, click and drag around interface, scale video size from 20% to 100%,
show only 1 or 2 or both eyes
features updated by Andrew June 2015
"""
def __init__(self,g_pool,alpha=0.6,eye_scale_factor=.5,move_around=0,mirror={'0':False,'1':False}, flip={'0':False,'1':False},pos=[(640,10),(10,10)]):
super().__init__(g_pool)
self.order = .6
self.menu = None
# user controls
self.alpha = alpha #opacity level of eyes
self.eye_scale_factor = eye_scale_factor #scale
self.showeyes = 0,1 #modes: any text containg both means both eye is present, on 'only eye1' if only one eye recording
self.move_around = move_around #boolean whether allow to move clip around screen or not
self.video_size = [0,0] #video_size of recording (bc scaling)
#variables specific to each eye
self.eye_frames = []
self.eye_world_frame_map = []
self.eye_cap = []
self.mirror = mirror #do we horiz flip first eye
self.flip = flip #do we vert flip first eye
self.pos = [list(pos[0]),list(pos[1])] #positions of 2 eyes
self.drag_offset = [None,None]
# load eye videos and eye timestamps
if VersionFormat(self.g_pool.meta_info['Capture Software Version'][1:]) < VersionFormat('0.4'):
eye_video_path = os.path.join(g_pool.rec_dir,'eye.avi'),'None'
eye_timestamps_path = os.path.join(g_pool.rec_dir,'eye_timestamps.npy'),'None'
else:
eye_video_path = os.path.join(g_pool.rec_dir,'eye0.*'),os.path.join(g_pool.rec_dir,'eye1.*')
eye_timestamps_path = os.path.join(g_pool.rec_dir,'eye0_timestamps.npy'),os.path.join(g_pool.rec_dir,'eye1_timestamps.npy')
#try to load eye video and ts for each eye.
for video,ts in zip(eye_video_path,eye_timestamps_path):
try:
self.eye_cap.append(File_Source(self.g_pool,source_path=glob(video)[0],timestamps=np.load(ts)))
except(IndexError,FileCaptureError):
pass
else:
self.eye_frames.
|
append(self.eye_cap[-1].get_frame())
try:
eye_timestamps = list(np.load(ts))
except:
pass
else:
self.eye_world_frame_map.append(correlate_eye_world(eye_timestamps,g_pool.timestamps))
if len(self.eye_cap) == 2:
logger.debug("Loaded binocular eye video data.")
elif len(self.eye_cap) == 1:
logger.debug("Loaded monocular eye video data")
self.showeyes = (0,)
else:
|
logger.error("Could not load eye video.")
self.alive = False
return
def unset_alive(self):
self.alive = False
def init_gui(self):
# initialize the menu
self.menu = ui.Scrolling_Menu('Eye Video Overlay')
self.update_gui()
self.g_pool.gui.append(self.menu)
def update_gui(self):
self.menu.elements[:] = []
self.menu.append(ui.Button('Close',self.unset_alive))
self.menu.append(ui.Info_Text('Show the eye video overlaid on top of the world video. Eye1 is usually the right eye'))
self.menu.append(ui.Slider('alpha',self,min=0.0,step=0.05,max=1.0,label='Opacity'))
self.menu.append(ui.Slider('eye_scale_factor',self,min=0.2,step=0.1,max=1.0,label='Video Scale'))
self.menu.append(ui.Switch('move_around',self,label="Move Overlay"))
if len(self.eye_cap) == 2:
self.menu.append(ui.Selector('showeyes',self,label='Sh
|
asterisk/pjproject
|
tests/pjsua/scripts-sipp/uac-srtp-sdes-reinv-dtls.py
|
Python
|
gpl-2.0
| 258
| 0.01938
|
# $Id$
#
import inc_const a
|
s const
PJSUA = ["--null-audio --max-calls=1 --auto-answer=200 --no-tcp --srtp-secure 0 --use-srtp 2 --srtp-keying=0"]
PJSUA_EXPECTS = [[0, "SR
|
TP uses keying method SDES", ""],
[0, "SRTP uses keying method DTLS-SRTP", ""]
]
|
liosha2007/temporary-groupdocs-python3-sdk
|
groupdocs/models/UpdateQuestionnaireExecutionResult.py
|
Python
|
apache-2.0
| 944
| 0.006356
|
#!/usr/bin/env python
"""
Copyright 2012 GroupDocs.
Licensed under the
|
Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
|
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class UpdateQuestionnaireExecutionResult:
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
self.swaggerTypes = {
'execution_id': 'float'
}
self.execution_id = None # float
|
GCC15/tcp-chatroom
|
server/python/core/threads.py
|
Python
|
gpl-2.0
| 5,338
| 0.000187
|
"""Threads to be used by the SCRP server"""
# Copyright (C) 2015 Zhang NS, Zifan Li, Zichao Li
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import socket
import threading
from database.dao import Dao
from scrp.request import ScrpRequest, InvalidRequestDictError
from scrp.push import ScrpPush
from scrp.error import *
from network.sockets import *
import env
import logger
class ServerThread(threading.Thread):
"""Main Daemon thread"""
def __init__(self):
super().__init__(name='ServerThread')
self.__ct = ControlThread(self)
# Store all ClientHandlerThreads
self.__cht_set = set()
# Map user_id -> cht
self.__id_cht_dict = {}
# Synchronize
self.__lock = threading.RLock()
def cht_ready(self, cht: 'ClientHandlerThread'):
"""Inform that a cht is ready"""
with self.__lock:
logger.d(str(cht))
self.__cht_set.add(cht)
def run(self):
logger.i('I started')
# Start the control thread
self.__ct.start()
# Start listening
server_sock = socket.socket()
server_port = env.get_server_port()
server_sock.bind(('', server_port))
server_sock.listen(env.get_tcp_listen_backlog())
logger.i('Listening on server port {}'.format(server_port))
while True:
# Main loop
logger.i('Waiting for a new client connection')
client_sock, address = server_sock.accept()
logger.i('Connection established with {}'.format(address))
# Dispatch
cht = ClientHandlerThread(self, client_sock)
logger.i('Starting ClientHandlerThread {}'.format(cht.name))
cht.start()
class ControlThread(threading.Thread):
"""
Receive control messages from controllers (scrpd.py)
"""
def __init__(self, st: ServerThread):
super().__init__(name='ControlThread')
self.__st = st
def run(self):
logger.i('I started')
control_sock = socket.socket()
control_port = env.get_control_port()
# Only allow control messages from localhost for security
control_sock.bind(('localhost', control_port))
control_sock.listen(env.get_tcp_listen_backlog())
logger.i('Listening on control port {}'.format(control_port))
while True:
logger.i('Waiting for a new controller connection')
client_sock, address = control_sock.accept()
logger.i('Connection established with controller {}'
.format(address))
# TODO
class ClientHandlerThread(threading.Thread):
"""Handle a client connection"""
def __init__(self, st: ServerThread, client_sock: socket.socket):
super().__init__()
self.__st = st
self.__client_sock = client_sock
self.__bytes_msg_sock = BytesMessageSocketWrapper(self.__client_sock)
self.__unicode_sock = UnicodeSocketWrapper(self.__bytes_msg_sock)
self.__json_sock = JsonSocketWrapper(self.__unicode_sock)
self.__scrp_sock = ScrpSocketWrapper(self.__json_sock)
# To avoid concurrent writes
self.__send_lock = threading.RLock()
def run(self):
logger.i('I started')
self.__st.cht_ready(self)
while True:
# Main loop for receiving requests
try:
req = self.__scrp_sock.receive_request()
# Dispatch
rht = RequestHandlerThread(self, req)
logger.d('Starting RequestHandlerThread {}'.format(rht.name))
rht.start()
except BytesMessageReceiveError as e:
# Connection is broken
break
except (MessageDecodeError,
JsonParseError,
InvalidRequestDictError) as e:
#
|
Bad request
raise BadRequestError
except ScrpError as e:
logger.e(e)
logger.d('Thread terminate')
def send_response(self, resp: ScrpResponse):
|
with self.__send_lock:
pass
def send_push(self, push: ScrpPush):
with self.__send_lock:
pass
class RequestHandlerThread(threading.Thread):
"""Handle a given request"""
def __init__(self, cht: ClientHandlerThread, req: ScrpRequest):
super().__init__()
self.__cht = cht
self.__req = req
def run(self):
logger.i('I started')
handler_cls = self.__req.get_handler_cls()
handler = handler_cls()
resp = handler.handle_request(self.__req)
self.__cht.send_response(resp)
|
wq2012/SpectralCluster
|
spectralcluster/spectral_clusterer.py
|
Python
|
apache-2.0
| 8,653
| 0.004969
|
"""A spectral clusterer class to perform clustering."""
import numpy as np
from spectralcluster import constraint
from spectralcluster import custom_distance_kmeans
from spectralcluster import laplacian
from spectralcluster import refinement
from spectralcluster import utils
RefinementName = refinement.RefinementName
LaplacianType = laplacian.LaplacianType
ConstraintName = constraint.ConstraintName
EigenGapType = utils.EigenGapType
class SpectralClusterer:
"""Spectral clustering class."""
def __init__(self,
min_clusters=None,
max_clusters=None,
refinement_options=None,
autotune=None,
laplacian_type=None,
stop_eigenvalue=1e-2,
row_wise_renorm=False,
custom_dist="cosine",
max_iter=300,
constraint_options=None,
eigengap_type=EigenGapType.Ratio,
affinity_function=utils.compute_affinity_matrix,
post_eigen_cluster_function=custom_distance_kmeans.run_kmeans):
|
"""Constructor of the clusterer.
Args:
min_clusters: minimal number of clusters allowed (only effective if not
None)
max_clusters: maximal number of clusters allowed (only effective if not
None), can be used together with min_clusters to fix the number of
clusters
refinement_options: a RefinementOptions object that contains refinement
arguments for the affinity matrix. If None, we will not refine
autotune: an AutoTu
|
ne object to automatically search p_percentile
laplacian_type: a LaplacianType. If None, we do not use a laplacian matrix
stop_eigenvalue: when computing the number of clusters using Eigen Gap, we
do not look at eigen values smaller than this value
row_wise_renorm: if True, perform row-wise re-normalization on the
spectral embeddings
custom_dist: str or callable. custom distance measure for k-means. If a
string, "cosine", "euclidean", "mahalanobis", or any other distance
functions defined in scipy.spatial.distance can be used
max_iter: the maximum number of iterations for the custom k-means
constraint_options: a ConstraintOptions object that contains constraint
arguments
eigengap_type: the type of the eigengap computation
affinity_function: a function to compute the affinity matrix from the
embeddings. This defaults to (cos(x,y)+1)/2
post_eigen_cluster_function: a function to cluster the spectral embeddings
after the eigenvalue computations. This function must have the same
signature as custom_distance_kmeans.run_kmeans
"""
self.min_clusters = min_clusters
self.max_clusters = max_clusters
if not refinement_options:
self.refinement_options = refinement.RefinementOptions()
else:
self.refinement_options = refinement_options
self.autotune = autotune
self.laplacian_type = laplacian_type
self.row_wise_renorm = row_wise_renorm
self.stop_eigenvalue = stop_eigenvalue
self.custom_dist = custom_dist
self.max_iter = max_iter
self.constraint_options = constraint_options
self.eigengap_type = eigengap_type
self.affinity_function = affinity_function
self.post_eigen_cluster_function = post_eigen_cluster_function
def _compute_eigenvectors_ncluster(self, affinity, constraint_matrix=None):
"""Perform eigen decomposition and estiamte the number of clusters.
Perform affinity refinement, eigen decomposition and sort eigenvectors by
the real part of eigenvalues. Estimate the number of clusters using EigenGap
principle.
Args:
affinity: the affinity matrix of input data
constraint_matrix: numpy array of shape (n_samples, n_samples). The
constraint matrix with prior information
Returns:
eigenvectors: sorted eigenvectors. numpy array of shape
(n_samples, n_samples)
n_clusters: number of clusters as an integer
max_delta_norm: normalized maximum eigen gap
"""
# Perform refinement operations on the affinity matrix.
for refinement_name in self.refinement_options.refinement_sequence:
refinement_operator = self.refinement_options.get_refinement_operator(
refinement_name)
affinity = refinement_operator.refine(affinity)
if (self.constraint_options and
not self.constraint_options.apply_before_refinement):
# Perform the constraint operation after refinement
affinity = self.constraint_options.constraint_operator.adjust_affinity(
affinity, constraint_matrix)
if not self.laplacian_type or self.laplacian_type == LaplacianType.Affinity:
# Perform eigen decomposion.
(eigenvalues, eigenvectors) = utils.compute_sorted_eigenvectors(affinity)
# Get number of clusters.
n_clusters, max_delta_norm = utils.compute_number_of_clusters(
eigenvalues,
max_clusters=self.max_clusters,
stop_eigenvalue=self.stop_eigenvalue,
eigengap_type=self.eigengap_type,
descend=True)
else:
# Compute Laplacian matrix
laplacian_norm = laplacian.compute_laplacian(
affinity, laplacian_type=self.laplacian_type)
# Perform eigen decomposion. Eigen values are sorted in an ascending
# order
(eigenvalues, eigenvectors) = utils.compute_sorted_eigenvectors(
laplacian_norm, descend=False)
# Get number of clusters. Eigen values are sorted in an ascending order
n_clusters, max_delta_norm = utils.compute_number_of_clusters(
eigenvalues,
max_clusters=self.max_clusters,
eigengap_type=self.eigengap_type,
descend=False)
return eigenvectors, n_clusters, max_delta_norm
def predict(self, embeddings, constraint_matrix=None):
"""Perform spectral clustering on data embeddings.
The spectral clustering is performed on an affinity matrix.
Args:
embeddings: numpy array of shape (n_samples, n_features)
constraint_matrix: numpy array of shape (n_samples, n_samples). The
constraint matrix with prior information
Returns:
labels: numpy array of shape (n_samples,)
Raises:
TypeError: if embeddings has wrong type
ValueError: if embeddings has wrong shape
"""
if not isinstance(embeddings, np.ndarray):
raise TypeError("embeddings must be a numpy array")
if len(embeddings.shape) != 2:
raise ValueError("embeddings must be 2-dimensional")
# Compute affinity matrix.
affinity = self.affinity_function(embeddings)
if (self.constraint_options and
self.constraint_options.apply_before_refinement):
# Perform the constraint operation before refinement
affinity = self.constraint_options.constraint_operator.adjust_affinity(
affinity, constraint_matrix)
if self.autotune:
# Use Auto-tuning method to find a good p_percentile.
if (RefinementName.RowWiseThreshold
not in self.refinement_options.refinement_sequence):
raise ValueError(
"AutoTune is only effective when the refinement sequence"
"contains RowWiseThreshold")
def p_percentile_to_ratio(p_percentile):
"""Compute the `ratio` given a `p_percentile` value."""
self.refinement_options.p_percentile = p_percentile
(eigenvectors, n_clusters,
max_delta_norm) = self._compute_eigenvectors_ncluster(
affinity, constraint_matrix)
ratio = np.sqrt(1 - p_percentile) / max_delta_norm
return ratio, eigenvectors, n_clusters
eigenvectors, n_clusters, _ = self.autotune.tune(p_percentile_to_ratio)
else:
# Do not use Auto-tune.
eigenvectors, n_clusters, _ = self._compute_eigenvectors_ncluster(
affinity, constraint_matrix)
if self.min_clusters is not None:
n_clusters = max(n_clusters, self.min_clusters)
# Get spectral embeddings.
spectral_embeddings = eigenvectors[:, :n_clusters]
if self.row_wise_renorm:
# Perform row wise re-normalization.
rows_norm = np.linalg.
|
williamFalcon/pytorch-lightning
|
tests/accelerators/ddp_model.py
|
Python
|
apache-2.0
| 2,010
| 0.000498
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Runs either `.fit()` or `.test()` on a single node across multiple gpus.
"""
import os
from argparse import ArgumentParser
import torch
from pytorch_lightning import seed_everything, Trainer
from tests.helpers.datamodules import ClassifDataModule
from tests.helpers.simple_models import ClassificationModel
def main():
|
seed_everything(4321)
parser = ArgumentParser(add_help=False)
parser = Trainer.add_argparse_args(parser)
parser.add_argument("--trainer_method", default="fit")
parser.add_argument("--tmpdir")
parser.add_argument("--workdir")
parser.set_defaults(gpus=2)
parser.set_defaults(accelerator="ddp")
args = parser.parse_args()
dm = ClassifDat
|
aModule()
model = ClassificationModel()
trainer = Trainer.from_argparse_args(args)
if args.trainer_method == "fit":
trainer.fit(model, datamodule=dm)
result = None
elif args.trainer_method == "test":
result = trainer.test(model, datamodule=dm)
elif args.trainer_method == "fit_test":
trainer.fit(model, datamodule=dm)
result = trainer.test(model, datamodule=dm)
else:
raise ValueError(f"Unsupported: {args.trainer_method}")
result_ext = {"status": "complete", "method": args.trainer_method, "result": result}
file_path = os.path.join(args.tmpdir, "ddp.result")
torch.save(result_ext, file_path)
if __name__ == "__main__":
main()
|
sametmax/Django--an-app-at-a-time
|
ignore_this_directory/django/db/backends/sqlite3/introspection.py
|
Python
|
mit
| 18,775
| 0.001704
|
import re
from collections import namedtuple
import sqlparse
from django.db.backends.base.introspection import (
BaseDatabaseIntrospection, FieldInfo as BaseFieldInfo, TableInfo,
)
from django.db.models.indexes import Index
FieldInfo = namedtuple('FieldInfo', BaseFieldInfo._fields + ('pk',))
field_size_re = re.compile(r'^\s*(?:var)?char\s*\(\s*(\d+)\s*\)\s*$')
def get_field_size(name):
""" Extract the size number from a "varchar(11)" type name """
m = field_size_re.search(name)
return int(m.group(1)) if m else None
# This light wrapper "fakes" a dictionary interface, because some SQLite data
# types include variables in them -- e.g. "varchar(30)" -- and can't be matched
# as a simple dictionary lookup.
class FlexibleFieldLookupDict:
# Maps SQL types to Django Field types. Some of the SQL types have multiple
# entries here because SQLite allows for anything and doesn't normalize the
# field type; it uses whatever was given.
base_data_types_reverse = {
'bool': 'BooleanField',
'boolean': 'BooleanField',
'smallint': 'SmallIntegerField',
'smallint unsigned': 'PositiveSmallIntegerField',
'smallinteger': 'SmallIntegerField',
'int': 'IntegerField',
'integer': 'IntegerField',
'bigint': 'BigIntegerField',
'integer unsigned': 'PositiveIntegerField',
'decimal': 'DecimalField',
'real': 'FloatField',
'text': 'TextField',
'char': 'CharField',
'varchar': 'CharField',
'blob': 'BinaryField',
'date': 'DateField',
'datetime': 'DateTimeField',
'time': 'TimeField',
}
def __getitem__(self, key):
key = key.lower().split('(', 1)[0].strip()
return self.base_data_types_reverse[key]
class DatabaseIntrospection(BaseDatabaseIntrospection):
data_types_reverse = FlexibleFieldLookupDict()
def get_field_type(self, data_type, description):
field_type = super().get_field_type(data_type, description)
if description.pk and field_type in {'BigIntegerField', 'IntegerField'}:
# No support for BigAutoField as SQLite treats all integer primary
# keys as signed 64-bit integers.
return 'AutoField'
return field_type
def get_table_list(self, cursor):
"""Return a list of table and view names in the current database."""
# Skip the sqlite_sequence system table used for autoincrement key
# generation.
cursor.execute("""
SELECT name, type FROM sqlite_master
WHERE type in ('table', 'view') AND NOT name='sqlite_sequence'
ORDER BY name""")
return [TableInfo(row[0], row[1][0]) for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name):
"""
Return a description of the table with the DB-API cursor.description
interface.
"""
return [
FieldInfo(
info['name'],
info['type'],
None,
info['size'],
None,
None,
info['null_ok'],
info['default'],
info['pk'] == 1,
) for info in self._table_info(cursor, table_name)
]
def get_sequences(self, cursor, table_name, table_fields=()):
pk_col = self.get_primary_key_column(cursor, table_name)
return [{'table': table_name, 'column': pk_col}]
def get_relations(self, cursor, table_name):
"""
Return a dictionary of {field_name: (field_name_other_table, other_table)}
representing all relationships to the given table.
"""
# Dictionary of relations to return
relations = {}
# Schema for this table
cursor.execute(
"SELECT sql, type FROM sqlite_master "
"WHERE tbl_name = %s AND type IN ('table', 'view')",
[table_name]
)
create_sql, table_type = cursor.fetchone()
if table_type == 'view':
# It might be a view, then no results will be returned
return relations
results = create_sql[create_sql.index('(') + 1:create_sql.r
|
index(')')]
# Walk through and look for references to other tables. SQLite doesn't
# really have enforced references, but since it echoes out the SQL used
# to create the table we can look for REFERENCES statements used there.
f
|
or field_desc in results.split(','):
field_desc = field_desc.strip()
if field_desc.startswith("UNIQUE"):
continue
m = re.search(r'references (\S*) ?\(["|]?(.*)["|]?\)', field_desc, re.I)
if not m:
continue
table, column = [s.strip('"') for s in m.groups()]
if field_desc.startswith("FOREIGN KEY"):
# Find name of the target FK field
m = re.match(r'FOREIGN KEY\s*\(([^\)]*)\).*', field_desc, re.I)
field_name = m.groups()[0].strip('"')
else:
field_name = field_desc.split()[0].strip('"')
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s", [table])
result = cursor.fetchall()[0]
other_table_results = result[0].strip()
li, ri = other_table_results.index('('), other_table_results.rindex(')')
other_table_results = other_table_results[li + 1:ri]
for other_desc in other_table_results.split(','):
other_desc = other_desc.strip()
if other_desc.startswith('UNIQUE'):
continue
other_name = other_desc.split(' ', 1)[0].strip('"')
if other_name == column:
relations[field_name] = (other_name, table)
break
return relations
def get_key_columns(self, cursor, table_name):
"""
Return a list of (column_name, referenced_table_name, referenced_column_name)
for all key columns in given table.
"""
key_columns = []
# Schema for this table
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"])
results = cursor.fetchone()[0].strip()
results = results[results.index('(') + 1:results.rindex(')')]
# Walk through and look for references to other tables. SQLite doesn't
# really have enforced references, but since it echoes out the SQL used
# to create the table we can look for REFERENCES statements used there.
for field_index, field_desc in enumerate(results.split(',')):
field_desc = field_desc.strip()
if field_desc.startswith("UNIQUE"):
continue
m = re.search(r'"(.*)".*references (.*) \(["|](.*)["|]\)', field_desc, re.I)
if not m:
continue
# This will append (column_name, referenced_table_name, referenced_column_name) to key_columns
key_columns.append(tuple(s.strip('"') for s in m.groups()))
return key_columns
def get_primary_key_column(self, cursor, table_name):
"""Return the column name of the primary key for the given table."""
# Don't use PRAGMA because that causes issues with some transactions
cursor.execute(
"SELECT sql, type FROM sqlite_master "
"WHERE tbl_name = %s AND type IN ('table', 'view')",
[table_name]
)
row = cursor.fetchone()
if row is None:
raise ValueError("Table %s does not exist" % table_name)
create_sql, table_type = row
if table_type == 'view':
# Views don't have a primary key.
return None
fields_sql = create_sql[create_sql.index('(') + 1:create_sql.rindex(')')]
for field_desc in fields_sql.split(','):
field_desc = field_desc.strip()
m = re.match(r'(?:(?:["`\[])(.*)(?:["`\]])|(\w+)).*PRIMARY KEY.*', field_desc)
if m:
return m.group(1) if m.group(1) else m.group(2)
|
carloderamo/mushroom
|
mushroom_rl/distributions/gaussian.py
|
Python
|
mit
| 14,862
| 0.005517
|
import numpy as np
from .distribution import Distribution
from scipy.stats import multivariate_normal
from scipy.optimize import minimize
class GaussianDistribution(Distribution):
"""
Gaussian distribution with fixed covariance matrix. The parameters
vector represents only the mean.
"""
def __init__(self, mu, sigma):
"""
Constructor.
Args:
mu (np.ndarray): initial mean of the distribution;
sigma (np.ndarray): covariance matrix of the distribution.
"""
self._mu = mu
self._sigma = sigma
self._inv_sigma = np.linalg.inv(sigma)
self._add_save_attr(
_mu='numpy',
_sigma='numpy',
_inv_sigma='numpy'
)
def sample(self):
return np.random.multivariate_normal(self._mu, self._sigma)
def log_pdf(self, theta):
return multivariate_normal.logpdf(theta, self._mu, self._sigma)
def __call__(self, theta):
return multivariate_normal.pdf(theta, self._mu, self._sigma)
def entropy(self):
n_dims = len(self._mu)
sigma = self._sigma
(sign_sigma, logdet_sigma) = np.linalg.slogdet(sigma)
return GaussianDistribution._entropy(logdet_sigma, n_dims)
def mle(self, theta, weights=None):
if weights is None:
self._mu = np.mean(theta, axis=0)
else:
self._mu = weights.dot(theta) / np.sum(weights)
def con_wmle(self, theta, weights, eps, *args):
n_dims = len(self._mu)
mu =self._mu
sigma = self._sigma
eta_start = np.array([1000])
res = minimize(GaussianDistribution._lagrangian_eta, eta_start,
bounds=((np.finfo(np.float32).eps, np.inf),),
args=(weights, theta, mu, sigma, n_dims, eps),
method='SLSQP')
eta_opt = res.x[0]
self._mu = GaussianDistribution._compute_mu_from_lagrangian(weights, theta, mu, eta_opt)
def diff_log(self, theta):
delta = theta - self._mu
g = self._inv_sigma.dot(delta)
return g
def get_parameters(self):
return self._mu
def set_parameters(self, rho):
self._mu = rho
@property
def parameters_size(self):
return
|
len(self._mu)
@staticmethod
def _compute_mu_from_lagrangian(weights, theta, mu, eta):
weights_sum = np.sum(weights)
mu_new = (weights @ theta + eta * mu) / (weights_sum + eta)
return mu_new
@staticmethod
def _kl_constraint(mu, mu_new, sigma, sigma
|
_new, sigma_inv, sigma_new_inv, logdet_sigma, logdet_sigma_new, n_dims):
return 0.5*(np.trace(sigma_new_inv@sigma) - n_dims + logdet_sigma_new - logdet_sigma + (mu_new - mu).T @ sigma_new_inv @ (mu_new - mu))
@staticmethod
def _entropy(logdet_sigma, n_dims):
c = n_dims * np.log(2*np.pi)
return 0.5 * (logdet_sigma + c + n_dims)
@staticmethod
def _lagrangian_eta(lag_array, weights, theta, mu, sigma, n_dims, eps):
eta = lag_array[0]
mu_new = GaussianDistribution._compute_mu_from_lagrangian(weights, theta, mu, eta)
sigma_inv = np.linalg.inv(sigma)
(sign_sigma, logdet_sigma) = np.linalg.slogdet(sigma)
c = n_dims * np.log(2*np.pi)
sum1 = np.sum([w_i * (-0.5 * (theta_i - mu_new)[:,np.newaxis].T @ sigma_inv @ (theta_i - mu_new)[:,np.newaxis] - 0.5 * logdet_sigma - 0.5 * c) for w_i, theta_i in zip(weights, theta)])
sum2 = eta * (eps - GaussianDistribution._kl_constraint(mu, mu_new, sigma, sigma, sigma_inv, sigma_inv, logdet_sigma, logdet_sigma, n_dims))
return sum1 + sum2
class GaussianDiagonalDistribution(Distribution):
"""
Gaussian distribution with diagonal covariance matrix. The parameters
vector represents the mean and the standard deviation for each dimension.
"""
def __init__(self, mu, std):
"""
Constructor.
Args:
mu (np.ndarray): initial mean of the distribution;
std (np.ndarray): initial vector of standard deviations for each
variable of the distribution.
"""
assert(len(std.shape) == 1)
self._mu = mu
self._std = std
self._add_save_attr(
_mu='numpy',
_std='numpy'
)
def sample(self):
sigma = np.diag(self._std**2)
return np.random.multivariate_normal(self._mu, sigma)
def log_pdf(self, theta):
sigma = np.diag(self._std ** 2)
return multivariate_normal.logpdf(theta, self._mu, sigma)
def __call__(self, theta):
sigma = np.diag(self._std ** 2)
return multivariate_normal.pdf(theta, self._mu, sigma)
def entropy(self):
n_dims = len(self._mu)
sigma = np.diag(self._std**2)
(sign_sigma, logdet_sigma) = np.linalg.slogdet(sigma)
return GaussianDiagonalDistribution._entropy(logdet_sigma, n_dims)
def mle(self, theta, weights=None):
if weights is None:
self._mu = np.mean(theta, axis=0)
self._std = np.std(theta, axis=0)
else:
sumD = np.sum(weights)
sumD2 = np.sum(weights**2)
Z = sumD - sumD2 / sumD
self._mu = weights.dot(theta) / sumD
delta2 = (theta - self._mu)**2
self._std = np.sqrt(weights.dot(delta2) / Z)
def con_wmle(self, theta, weights, eps, kappa):
n_dims = len(self._mu)
mu = self._mu
sigma = self._std
eta_omg_start = np.array([1000, 0])
res = minimize(GaussianDiagonalDistribution._lagrangian_eta_omega, eta_omg_start,
bounds=((np.finfo(np.float32).eps, np.inf),(np.finfo(np.float32).eps, np.inf)),
args=(weights, theta, mu, sigma, n_dims, eps, kappa),
method='SLSQP')
eta_opt, omg_opt = res.x[0], res.x[1]
self._mu, self._std = GaussianDiagonalDistribution._compute_mu_sigma_from_lagrangian(weights, theta, mu, sigma, eta_opt, omg_opt)
def diff_log(self, theta):
n_dims = len(self._mu)
sigma = self._std**2
g = np.empty(self.parameters_size)
delta = theta - self._mu
g_mean = delta / sigma
g_std = delta**2 / (self._std**3) - 1 / self._std
g[:n_dims] = g_mean
g[n_dims:] = g_std
return g
def get_parameters(self):
rho = np.empty(self.parameters_size)
n_dims = len(self._mu)
rho[:n_dims] = self._mu
rho[n_dims:] = self._std
return rho
def set_parameters(self, rho):
n_dims = len(self._mu)
self._mu = rho[:n_dims]
self._std = rho[n_dims:]
@property
def parameters_size(self):
return 2 * len(self._mu)
@staticmethod
def _compute_mu_sigma_from_lagrangian(weights, theta, mu, sigma, eta, omg):
weights_sum = np.sum(weights)
mu_new = (weights @ theta + eta * mu) / (weights_sum + eta)
sigma_new = np.sqrt( ( np.sum([w_i * (theta_i-mu_new)**2 for theta_i, w_i in zip(theta, weights)], axis=0) + eta*sigma**2 + eta*(mu_new - mu)**2 ) / ( weights_sum + eta - omg ) )
return mu_new, sigma_new
@staticmethod
def _kl_constraint(mu, mu_new, sigma, sigma_new, sigma_inv, sigma_new_inv, logdet_sigma, logdet_sigma_new, n_dims):
return 0.5*(np.trace(sigma_new_inv@sigma) - n_dims + logdet_sigma_new - logdet_sigma + (mu_new - mu).T @ sigma_new_inv @ (mu_new - mu))
@staticmethod
def _entropy(logdet_sigma, n_dims):
c = n_dims * np.log(2*np.pi)
return 0.5 * (logdet_sigma + c + n_dims)
@staticmethod
def _lagrangian_eta_omega(lag_array, weights, theta, mu, sigma, n_dims, eps, kappa):
eta, omg = lag_array[0], lag_array[1]
mu_new, sigma_new = GaussianDiagonalDistribution._compute_mu_sigma_from_lagrangian(weights, theta, mu, sigma, eta, omg)
sigma = np.diag(sigma**2)
sigma_new = np.diag(sigma_new**2)
sigma_inv = np.linalg.inv(sigma)
sigma
|
flux3dp/fluxghost
|
fluxghost/api/stl_slicing_parser.py
|
Python
|
agpl-3.0
| 13,536
| 0.002512
|
import subprocess
import logging
import os, sys, traceback
import urllib
from fluxclient import check_platform
from .misc import BinaryUploadHelper, BinaryHelperMixin, OnTextMessageMixin
logger = logging.getLogger("API.SLICING")
StlSlicer = None
StlSlicerCura = None
def get_default_cura():
return os.environ["cura"]
def get_default_cura2():
return os.environ["cura2"]
def stl_slicing_parser_api_mixin(cls):
class StlSlicingParserApi(BinaryHelperMixin, OnTextMessageMixin, cls):
"""
This websocket is use to slice a stl/obj model
"""
POOL_TIME = 30.0
def __init__(self, *args):
super().__init__(*args)
try:
global StlSlicer
global StlSlicerCura
if StlSlicer is None:
from fluxclient.printer.stl_slicer import (
StlSlicer as _StlSlicer,
StlSlicerCura as _StlSlicerCura)
StlSlicer = _StlSlicer
StlSlicerCura = _StlSlicerCura
except ImportError as e:
logger.exception("Slicer error")
self.send_fatal("LIBRARY_NOT_FOUND")
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback,
limit=2, file=sys.stderr)
return
self.m_stl_slicer = StlSlicer('')
self._change_engine('slic3r', 'default')
# self.change_engine('cura default')
self.cmd_mapping = {
'upload': [self.begin_recv_stl, 'upload'],
'upload_image': [self.begin_recv_stl, 'upload_image'],
'load_stl_from_path': [self.load_stl_from_path],
'set': [self.set],
'go': [self.gcode_generate],
'delete': [self.delete],
'advanced_setting': [self.advanced_setting],
'get_path': [self.get_path],
'get_path_async': [self.get_path_async],
'duplicate': [self.duplicate],
'meta_option': [self.meta_option],
'begin_slicing': [self.begin_slicing],
'end_slicing': [self.end_slicing],
'report_slicing': [self.report_slicing],
'get_result': [self.get_result],
'change_engine': [self.change_engine],
'check_engine': [self.check_engine],
}
self.ext_metadata = {}
def begin_recv_stl(self, params, flag):
if flag == 'upload':
params = params.split()
if len(params) == 2:
name, file_length = params
buf_type = 'stl'
elif len(params) == 3:
name, file_length, buf_type = params
elif flag == 'upload_image':
# useless
name = ''
buf_type = ''
file_length = params
logger.debug('upload_image {}'.format(file_length))
if int(file_length) == 0:
self.send_error('12', info='empty file!')
else:
helper = BinaryUploadHelper(int(file_length), self.end_recv_stl, name, flag, buf_type)
self.set_binary_helper(helper)
self.send_continue()
def end_recv_stl(self, buf, *args):
if args[1] == 'upload':
logger.debug('upload ' + args[0] + args[2])
ret = self.m_stl_slicer.upload(args[0], buf, args[2])
if ret:
self.send_ok()
else:
self.send_error('15', info="File parsing fail")
return
elif args[1] == 'upload_image':
ret = self.m_stl_slicer.upload_image(buf)
self.send_ok()
def load_stl_from_path(self, params):
logger.debug('load_stl_from_path ' + params)
params = params.split()
if len(params) == 2:
name, path = params
buf_type = 'stl'
elif len(params) == 3:
name, path, buf_type = params
#buf_type could be stl or obj
ret = self.m_stl_slicer.upload(name, urllib.parse.unquote(path), buf_type)
if ret:
self.send_ok()
else:
self.send_error('15', info="File parsing fail")
return
def duplicate(self, params):
logger.debug('duplicate ' + params)
name_in, name_out = params.split()
flag = self.m_stl_slicer.duplicate(name_in, name_out)
if flag:
self.send_ok()
else:
self.send_error('13', info='{} not exist'.format(name_in))
def set(self, params):
params = params.split()
assert len(params) == 10, 'wrong number of parameters %d' % len(params)
name = params[0]
position_x = float(params[1])
position_y = float(params[2])
position_z = float(params[3])
rotation_x = float(params[4])
rotation_y = float(params[5])
rotation_z = float(params[6])
scale_x = float(params[7])
scale_y = float(params[8])
scale_z = float(params[9])
logger.debug('set {} {} {} {} {} {} {} {} {} {}'.format(name, position_x, position_y, position_z, rotation_x, rotation_y, rotation_z, scale_x, scale_y, scale_z))
set_result = self.m_stl_slicer.set(name, [position_x, position_y, position_z, rotation_x, rotation_y, rotation_z, scale_x, scale_y, scale_z])
if set_result == 'ok':
self.send_ok()
|
else:
self.send_error('14', info=set_result)
def adva
|
nced_setting(self, params):
bad_lines = self.m_stl_slicer.advanced_setting(params)
if bad_lines != []:
for line_num, err_msg in bad_lines:
self.send_error('7', info='line %d: %s' % (line_num, err_msg))
logger.debug('line %d: %s' % (line_num, err_msg))
self.send_ok()
def gcode_generate(self, params):
raise RuntimeError('is this still working?')
names = params.split()
if names[-1] == '-g':
output_type = '-g'
names = names[:-1]
elif names[-1] == '-f':
output_type = '-f'
names = names[:-1]
else:
output_type = '-f'
output, metadata = self.m_stl_slicer.gcode_generate(names, self, output_type)
# self.send_progress('finishing', 1.0)
if output:
self.send_text('{"slice_status": "complete", "length": %d, "time": %.3f, "filament_length": %.2f}' % (len(output), metadata[0], metadata[1]))
self.send_binary(output)
logger.debug('slicing finish')
else:
self.send_error(metadata)
logger.debug('slicing fail')
def begin_slicing(self, params):
names = params.split()
self.path_bytes = None
if names[-1] == '-g':
output_type = '-g'
names = names[:-1]
elif names[-1] == '-f':
output_type = '-f'
names = names[:-1]
else:
output_type = '-f'
ret, msg = self.m_stl_slicer.begin_slicing(names, self, output_type)
if ret:
self.send_ok()
else:
self.send_error('16', info=msg)
def end_slicing(self, *args):
self.m_stl_slicer.end_slicing()
self.send_ok()
def report_slicing(self, *args):
for m in self.m_stl_slicer.report_slicing():
self.send_text(m)
self.send_ok()
def get_result(self, *args):
if self.m_stl_slicer.output:
self.send_ok(info=str(len(self.m_stl_sli
|
andrewyoung1991/django-restframework-stripe
|
tests/test_customer.py
|
Python
|
bsd-2-clause
| 4,147
| 0.001929
|
from unittest import mock
import pytest
import stripe
from model_mommy import mommy
from rest_framework.reverse import reverse
from restframework_stripe import models
from restframework_stripe.test import get_mock_resource
@mock.patch("stripe.Customer.save")
@mock.patch("stripe.Customer.retrieve")
@pytest.mark.django_db
def test_customer_update_bank_acct(customer_retrieve, customer_update, customer,
bank_account, api_client):
bank_account.owner = customer.owner
bank_account.save()
api_client.force_authenticate(customer.owner)
data = {
"default_source": bank_account.id,
"default_source_type": "bank_account"
}
customer_retrieve.return_value = get_mock_resource("Customer")
customer_update.return_value = get_mock_resource("Customer", default_source=bank_account.source)
uri = reverse("rf_stripe:customer-detail", kwargs={"pk": customer.pk})
response = api_client.patch(uri, data=data, format="json")
customer.refresh_from_db()
assert response.status_code == 200, response.data
assert customer.default_source.id == bank_account.id
@mock.patch("stripe.Customer.save")
@mock.patch("stripe.Customer.retrieve")
@pytest.mark.django_db
def test_customer_update_card(customer_retrieve, customer_update, customer, card, api_client):
card.owner = customer.owner
card.save()
api_client.force_authenticate(customer.owner)
data = {
"default_source": card.id,
"default_source_type": "card"
}
customer_retrieve.return_value = get_mock_resource("Customer")
customer_update.return_value = get_mock_resource("Customer", default_source=card.source)
uri = reverse("rf_stripe:customer-detail", kwargs={"pk": customer.pk})
response = api_client.patch(uri, data=data, format="json")
customer.refresh_from_db()
assert response.status_code == 200, response.data
assert customer.default_source.id == card.id
@pytest.mark.django_db
def test_customer_to_record_with_card_as_source(card):
stripe_object = get_mock_resource("Customer", default_source=card.source)
record = models.Customer.stripe_object_to_record(stripe_object)
assert record["default_source"].id == card.id
@pytest.mark.django_db
def test_customer_to_record_with_bank_account_as_source(bank_account):
stripe_object = get_mock_resource("Customer", default_source=bank_account.source)
record = models.Customer.stripe_object_to_record(stripe_object)
assert record["default_source"].id == bank_account.id
@pytest.mark.django_db
def test_customer_to_record_with_string_as_source():
stripe_object = get_mock_resource("Customer", default_source="bjkldjkfd532")
record = models.Customer.stripe_object_to_record(stripe_object)
assert record.get("default_source", None) is None
@mock.patch("stripe.ListObject.create")
@mock.patch("stripe.Customer.save")
@mock.patch("stripe.Customer.retrieve")
@pytest.mark.django_db
def test_customer_add_payment_method(a_retrieve, a_update, l_create, customer, api_client):
api_client.force_authenticate(customer.owner)
data = {
"source": "fkdsla;jfioewni3o2ndsa",
"email": "test@test.com",
}
new_card = get_mock_resource("Card")
updated_data = data.copy()
updated_data.pop("source")
updated_data["default_source"] = new_card
|
a_retrieve.return_value = get_mock_resource("Customer")
l_create.return_value = new_card
a_update.return_value = get_mock_resource("Customer", **updated_data)
uri = reverse("rf_stripe:customer-detail", kwargs={"pk": customer.pk})
response = api_client.patch(uri, data=data, format="json")
customer.refresh_from
|
_db()
assert response.status_code == 200, response.data
assert 0 < models.Card.objects.filter(owner=customer.owner).count()
assert customer.source["email"] == data["email"]
@pytest.mark.django_db
def test_options(customer, api_client):
api_client.force_authenticate(customer.owner)
uri = reverse("rf_stripe:customer-list")
response = api_client.options(uri)
assert response.status_code == 200, response.data
|
diofeher/django-nfa
|
django/core/management/commands/compilemessages.py
|
Python
|
bsd-3-clause
| 2,462
| 0.004468
|
import os
import sys
from optparse import make_option
from django.core.management.base import BaseCommand
from django.core.management.color import no_style
try:
set
except NameError:
from sets import Set as set # For Python 2.3
def compile_messages(locale=None):
basedirs = (os.path.join('conf', 'locale'), 'locale')
if os.environ.get('DJANGO_SETTINGS_MODULE'):
from django.conf import settings
basedirs += settings.LOCALE_PATHS
# Gather existing directories.
basedirs = set(map(os.path.abspath, filter(os.path.isdir, basedirs)))
if not basedirs:
raise CommandError("T
|
his script should be run from th
|
e Django SVN tree or your project or app tree, or with the settings module specified.")
for basedir in basedirs:
if locale:
basedir = os.path.join(basedir, locale, 'LC_MESSAGES')
for dirpath, dirnames, filenames in os.walk(basedir):
for f in filenames:
if f.endswith('.po'):
sys.stderr.write('processing file %s in %s\n' % (f, dirpath))
pf = os.path.splitext(os.path.join(dirpath, f))[0]
# Store the names of the .mo and .po files in an environment
# variable, rather than doing a string replacement into the
# command, so that we can take advantage of shell quoting, to
# quote any malicious characters/escaping.
# See http://cyberelk.net/tim/articles/cmdline/ar01s02.html
os.environ['djangocompilemo'] = pf + '.mo'
os.environ['djangocompilepo'] = pf + '.po'
if sys.platform == 'win32': # Different shell-variable syntax
cmd = 'msgfmt --check-format -o "%djangocompilemo%" "%djangocompilepo%"'
else:
cmd = 'msgfmt --check-format -o "$djangocompilemo" "$djangocompilepo"'
os.system(cmd)
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--locale', '-l', dest='locale',
help='The locale to process. Default is to process all.'),
)
help = 'Compiles .po files to .mo files for use with builtin gettext support.'
requires_model_validation = False
can_import_settings = False
def handle(self, **options):
locale = options.get('locale')
compile_messages(locale)
|
gpotter2/scapy
|
scapy/contrib/isotp/isotp_native_socket.py
|
Python
|
gpl-2.0
| 14,939
| 0
|
# This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) Nils Weiss <nils@we155.de>
# This program is published under a GPLv2 license
# scapy.contrib.description = ISO-TP (ISO 15765-2) Native Socket Library
# scapy.contrib.status = library
import ctypes
from ctypes.util import find_library
import struct
import socket
from scapy.compat import Optional, Union, Tuple, Type, cast
from scapy.packet import Packet
import scapy.modules.six as six
from scapy.error import Scapy_Exception, warning
from scapy.supersocket import SuperSocket
from scapy.data import SO_TIMESTAMPNS
from scapy.config import conf
from scapy.arch.linux import get_last_packet_timestamp, SIOCGIFINDEX
from scapy.contrib.isotp.isotp_packet import ISOTP
from scapy.layers.can import CAN_MTU, CAN_MAX_DLEN
LIBC = ctypes.cdll.LoadLibrary(find_library("c")) # type: ignore
CAN_ISOTP = 6 # ISO 15765-2 Transport Protocol
SOL_CAN_BASE = 100 # from can.h
SOL_CAN_ISOTP = SOL_CAN_BASE + CAN_ISOTP
# /* for socket options affecting the socket (not the global system) */
CAN_ISOTP_OPTS = 1 # /* pass struct can_isotp_options */
CAN_ISOTP_RECV_FC = 2 # /* pass struct can_isotp_fc_options */
# /* sockopts to force stmin timer values for protocol regression tests */
CAN_ISOTP_TX_STMIN = 3 # /* pass __u32 value in nano secs */
CAN_ISOTP_RX_STMIN = 4 # /* pass __u32 value in nano secs */
CAN_ISOTP_LL_OPTS = 5 # /* pass struct can_isotp_ll_options */
CAN_ISOTP_LISTEN_MODE = 0x001 # /* listen only (do not send FC) */
CAN_ISOTP_EXTEND_ADDR = 0x002 # /* enable extended addressing */
CAN_ISOTP_TX_PADDING = 0x004 # /* enable CAN frame padding tx path */
CAN_ISOTP_RX_PADDING = 0x008 # /* enable CAN frame padding rx path */
CAN_ISOTP_CHK_PAD_LEN = 0x010 # /* check received CAN frame padding */
CAN_ISOTP_CHK_PAD_DATA = 0x020 # /* check received CAN frame padding */
CAN_ISOTP_HALF_DUPLEX = 0x040 # /* half duplex error state handling */
CAN_ISOTP_FORCE_TXSTMIN = 0x080 # /* ignore stmin from received FC */
CAN_ISOTP_FORCE_RXSTMIN = 0x100 # /* ignore CFs depending on rx stmin */
CAN_ISOTP_RX_EXT_ADDR = 0x200 # /* different rx extended addressing */
# /* default values */
CAN_ISOTP_DEFAULT_FLAGS = 0
CAN_ISOTP_DEFAULT_EXT_ADDRESS = 0x00
CAN_ISOTP_DEFAULT_PAD_CONTENT = 0xCC # /* prevent bit-stuffing */
CAN_ISOTP_DEFAULT_FRAME_TXTIME = 0
CAN_ISOTP_DEFAULT_RECV_BS = 0
CAN_ISOTP_DEFAULT_RECV_STMIN = 0x00
CAN_ISOTP_DEFAULT_RECV_WFTMAX = 0
CAN_ISOTP_DEFAULT_LL_MTU = CAN_MTU
CAN_ISOTP_DEFAULT_LL_TX_DL = CAN_MAX_DLEN
CAN_ISOTP_DEFAULT_LL_TX_FLAGS = 0
class tp(ctypes.Structure):
# This struct is only used within the sockaddr_can struct
_fields_ = [("rx_id", ctypes.c_uint32),
("tx_id", ctypes.c_uint32)]
class addr_info(ctypes.Union):
# This struct is only used within the sockaddr_can struct
# This union is to future proof for future can address information
_fields_ = [("tp", tp)]
class sockaddr_can(ctypes.Structure):
# See /usr/include/linux/can.h for original struct
_fields_ = [("can_family", ctypes.c_uint16),
("can_ifindex", ctypes.c_int),
("can_addr", addr_info)]
class ifreq(ctypes.Structure):
# The two fields in this struct were originally unions.
# See /usr/include/net/if.h for original struct
_fields_ = [("ifr_name", ctypes.c_char * 16),
("ifr_ifindex", ctypes.c_int)]
class ISOTPNativeSocket(SuperSocket):
desc = "read/write packets at a given CAN interface using CAN_ISOTP socket " # noqa: E501
can_isotp_options_fmt = "@2I4B"
can_isotp_fc_options_fmt = "@3B"
can_isotp_ll_options_fmt = "@3B"
sockaddr_can_fmt = "@H3I"
auxdata_available = True
def __build_can_isotp_options(
self,
flags=CAN_ISOTP_DEFAULT_FLAGS,
frame_txtime=CAN_ISOTP_DEFAULT_FRAME_TXTIME,
ext_address=CAN_ISOTP_DEFAULT_EXT_ADDRESS,
txpad_content=CAN_ISOTP_DEFAULT_PAD_CONTENT,
rxpad_content=CAN_ISOTP_DEFAULT_PAD_CONTENT,
rx_ext_address=CAN_ISOTP_DEFAULT_EXT_ADDRESS):
# type: (int, int, int, int, int, int) -> bytes
return struct.pack(self.can_isotp_options_fmt,
flags,
frame_txtime,
ext_address,
txpad_content,
rxpad_content,
rx_ext_address)
# == Must use native not standard types for packing ==
# struct can_isotp_options {
# __u32 flags; /* set flags for isotp behaviour. */
# /* __u32 value : flags see below */
#
# __u32 frame_txtime; /* frame transmission time (N_As/N_Ar) */
# /* __u32 value : time in nano secs */
#
# __u8 ext_address; /* set address for extended addressing */
# /* __u8 value : extended address */
#
# __u8 txpad_content; /* set content of padding byte (tx) */
# /* __u8 value : content on tx path */
#
# __u8 rxpad_content; /* set content of padding byte (rx) */
# /* __u8 value : content on rx path */
#
# __u8 rx_ext_address; /* set address for extended addressing */
# /* __u8 value : extended address (rx) */
# };
def __build_can_isotp_fc_options(self,
bs=CAN_ISOTP_DEFAULT_RECV_BS,
stmin=CAN_ISOTP_DEFAULT_RECV_STMIN,
wftmax=CAN_ISOTP_DEFAULT_RECV_WFTMAX):
# type: (int, int, int) -> bytes
return struct.pack(self.can_isotp_fc_options_fmt,
bs,
stmin,
wftmax)
# == Must use native not standard types for packing ==
# struct can_isotp_fc_options {
#
# __u8 bs; /* blocksize provided in FC frame */
# /* __u8 value : blocksize. 0 = off */
#
# __u8 stmin; /* separation time provided in FC frame */
# /* __u8 value : */
# /* 0x00 - 0x7F : 0 - 127 ms */
# /* 0x80 - 0xF0 : reserved */
# /* 0xF1 - 0xF9 : 100 us - 900 us */
# /* 0xFA - 0xFF : reserved */
#
# __u8 wftmax; /* max. number of wait frame transmiss. */
# /* __u8 value : 0 = omit FC N_PDU WT */
# };
def __build_can_isotp_ll_options(self,
mtu=CAN_ISOTP_DEFAULT_LL_MTU,
tx_dl=CAN_ISOTP_DEFAULT_LL_TX_DL,
tx_flags=CAN_ISOTP_DEFAULT_LL_TX_FLAGS
):
# type: (int, int, int) -> bytes
return struct.pack(self.can_isotp_ll_options_fmt,
|
mtu,
tx_dl,
tx_flags)
# == Must use native not standard types for packing ==
# struct can_isotp_ll_options {
#
# __u8 mtu; /* generated & accepted CAN frame type */
# /* __u8 value :
|
*/
# /* CAN_MTU (16) -> standard CAN 2.0 */
# /* CANFD_MTU (72) -> CAN FD frame */
#
# __u8 tx_dl; /* tx link layer data length in bytes */
# /* (configured maximum payload length) */
# /* __u8 value : 8,12,16,20,24,32,48,64 */
# /* => rx path supports all LL_DL values */
#
# __u8 tx_flags; /* set into struct canfd_frame.flags */
# /* at frame creation: e.g. CANFD_BRS */
# /* Ob
|
MicBrain/Python-algo-Module
|
setup.py
|
Python
|
apache-2.0
| 2,077
| 0.029851
|
##############################
### ALGO MODULE SETUP TOOL ###
##############################
from distutils.core import setup
setup(name='algo',
version='1.0',
python_modules=['algo'],
short_description="Different Algorithms.",
author="Rafayel Mkrtchyan",
date_created="6/22/2014",
country = "Armenia",
author_email="rafamian@berkeley.edu",
download_url="http://github.com",
keywords=["Different Algorithms"],
classifiers= [
"Programming Language: Python",
"Operating System: OS Independent",
"License: MIT License",
"Audience: Python Developers",
"Development Status: Complete Version",
"Topic : Software Development, Libraries, Python Modules"
],
long_description = """
####################################
### Python Module for Algorithms ###
####################################
DESCRIPTION
This library was created in order to help Python developers easily
implement some very important algorithms. Algorithms include topics from
various subfields of Computer Science. Module contains 50 algorithms. For
more information, read the DOCUMENTATION of the module. You can find the
documentaion of the module in the same Github repository.
This is the First version of the module and in the future new updates
will occur.
CREATOR
Algorithms were implemented by the author - Rafayel Mkrtchyan.
I don't claim to be perfect. For suggestions or improvements
|
contact:
rafamian@berkeley.edu
LICENSE
License for Algo Module is distributed under t
|
he MIT License.
---------------------------------------------------------------------------
---------------------------------------------------------------------------
"""
)
|
JudTown17/solutions-geoprocessing-toolbox
|
capability/test/test_erg_tools/TestUtilities.py
|
Python
|
apache-2.0
| 1,365
| 0.003663
|
#------------------------------------------------------------------------------
# Copyright 2013 Esri
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the
|
specific language governing permissions and
# limitations under the License.
#-------------
|
-----------------------------------------------------------------
# TestUtilities.py
# Description: Common objects/methods used by test scripts
# Requirements: ArcGIS Desktop Standard
# ----------------------------------------------------------------------------
import arcpy
import os
import sys
currentPath = os.path.dirname(__file__)
toolboxesPath = os.path.normpath(os.path.join(currentPath, r"../../../capability/toolboxes/"))
toolDataPath = os.path.join(toolboxesPath, "tooldata")
layerPath = os.path.join(toolboxesPath, "layers")
layerDataPath = os.path.join(layerPath, "layerdata")
templateGDB = os.path.join(toolDataPath, "Templates.gdb")
toolbox = os.path.join(toolboxesPath, "ERG Tools.pyt")
|
jthurst3/newspeeches
|
parser2.py
|
Python
|
mit
| 1,487
| 0.019502
|
# parser2.py
# parses sentences from the CSV files
# J. Hassler Thurston
# RocHack Hackathon December 7, 2013
# Modified December 11, 2013
import nltk
from random import choice
cfg_file = 'upenn_grammar.cfg'
tbank_productions = []
nonterminals = []
rightside = []
def get_initial_rules():
global tbank_productions, nonterminals
# from http://stackoverflow.com/questions/7056996/how-do-i
|
-get-a-set-of-grammar-rules-from-penn-treebank-using-python-nltk
tbank_productions = [production for sent in nltk.corpus.treebank.parsed_sents() for production in sent.productions()]
nonterminals = [production.lhs().__str__() for production in tbank_productions]
rightside = [production.rhs().__str__() for production in tbank_productions]
tbank_grammar = nltk.grammar.ContextFreeGrammar(nltk.grammar.Nonterminal('S'), tbank
|
_productions)
print generate_sample(tbank_grammar)
# modified from http://stackoverflow.com/questions/15009656/how-to-use-nltk-to-generate-sentences-from-an-induced-grammar
def generate_sample(grammar, items=[nltk.grammar.Nonterminal('S')]):
frags = []
if len(items) == 1:
print items
if isinstance(items[0], nltk.grammar.Nonterminal):
frags.append(generate_sample(grammar, grammar.productions(lhs=items[0])))
else:
frags.append(items[0])
else:
print items[:2]
# This is where we need to make our changes
chosen_expansion = choice(items)
#print type(chosen_expansion)
frags.append(generate_sample(grammar, [chosen_expansion]))
return frags
|
CompassionCH/compassion-switzerland
|
website_compassion/controllers/cms_form.py
|
Python
|
agpl-3.0
| 1,098
| 0
|
##############################################################################
#
# Copyright (C) 2019-2020 Compassion CH (http://www.compassion.ch)
# @author: Christopher Meier <dev@c-meier.ch>
#
# The licence is in the file __manifest__.py
#
##############################################################################
"""
This file blocks all the rou
|
tes defined automatically by cms_form.
"""
from odoo import http
from odoo.addons.cms_form.controllers.main import (
CMSFormController,
CMSWizardFormController,
CMSSearchFormController,
)
class UwantedCMSFormController(CMSFormController):
@http.route()
def cms_form(self, model, model_id=None, **kw):
return http.request.render("website.404")
class UnwantedCMSWizardFormController(CMSWizardFormController):
@http.route()
def cms_wiz
|
(self, wiz_model, model_id=None, **kw):
return http.request.render("website.404")
class UnwantedCMSSearchFormController(CMSSearchFormController):
@http.route()
def cms_form(self, model, **kw):
return http.request.render("website.404")
|
Reagankm/KnockKnock
|
venv/lib/python3.4/site-packages/mpl_toolkits/axes_grid1/axes_grid.py
|
Python
|
gpl-2.0
| 31,905
| 0.001254
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import matplotlib.cbook as cbook
import matplotlib.pyplot as plt
import matplotlib.axes as maxes
#import matplotlib.colorbar as mcolorbar
from . import colorbar as mcolorbar
import matplotlib as mpl
import matplotlib.patches as mpatches
import matplotlib.lines as mlines
import matplotlib.ticker as ticker
from matplotlib.gridspec import SubplotSpec
from .axes_divider import Size, SubplotDivider, LocatableAxes, Divider
def _extend_axes_pad(value):
# Check whether a list/tuple/array or scalar has been passed
ret = value
if not hasattr(ret, "__getitem__"):
ret = (value, value)
return ret
def _tick_only(ax, bottom_on, left_on):
bottom_off = not bottom_on
left_off = not left_on
# [l.set_visible(bottom_off) for l in ax.get_xticklabels()]
# [l.set_visible(left_off) for l in ax.get_yticklabels()]
# ax.xaxis.label.set_visible(bottom_off)
# ax.yaxis.label.set_visible(left_off)
ax.axis["bottom"].toggle(ticklabels=bottom_off, label=bottom_off)
ax.axis["left"].toggle(ticklabels=left_off, label=left_off)
class Colorbar(mcolorbar.Colorbar):
def _config_axes_deprecated(self, X, Y):
'''
Make an axes patch and outline.
'''
ax = self.ax
ax.set_frame_on(False)
ax.set_navigate(False)
xy = self._outline(X, Y)
ax.update_datalim(xy)
ax.set_xlim(*ax.dataLim.intervalx)
ax.set_ylim(*ax.dataLim.intervaly)
self.outline = mlines.Line2D(xy[:, 0], xy[:, 1],
color=mpl.rcParams['axes.edgecolor'],
linewidth=mpl.rcParams['axes.linewidth'])
ax.add_artist(self.outline)
self.outline.set_clip_box(None)
self.outline.set_clip_path(None)
c = mpl.rcParams['axes.facecolor']
self.patch = mpatches.Polygon(xy, edgecolor=c,
facecolor=c,
linewidth=0.01,
zorder=-1)
ax.add_artist(self.patch)
ticks, ticklabels, offset_string = self._ticker()
if self.orientation == 'vertical':
ax.set_yticks(ticks)
ax.set_yticklabels(ticklabels)
ax.yaxis.get_major_formatter().set_offset_string(offset_string)
else:
ax.set_xticks(ticks)
ax.set_xticklabels(ticklabels)
ax.xaxis.get_major_formatter().set_offset_string(offset_string)
class CbarAxesBase(object):
def colorbar(self, mappable, **kwargs):
locator = kwargs.pop("locator", None)
if locator is None:
if "ticks" not in kwargs:
kwargs["ticks"] = ticker.MaxNLocator(5)
if locator is not None:
if "ticks" in kwargs:
raise ValueError("Either *locator* or *ticks* need" +
" to be given, not both")
else:
kwargs["ticks"] = locator
self.hold(True)
if self.orientation in ["top", "bottom"]:
orientation = "horizontal"
else:
orientation = "vertical"
cb = Colorbar(self, mappable, orientation=orientation, **kwargs)
self._config_axes()
def on_changed(m):
#print 'calling on changed', m.get_cmap().name
cb.set_cmap(m.get_cmap())
cb.set_clim(m.get_clim())
cb.update_bruteforce(m)
self.cbid = mappable.callbacksSM.connect('changed', on_changed)
mappable.colorbar = cb
self.locator = cb.cbar_axis.get_major_locator()
return cb
def _config_axes(self):
'''
Make an axes patch and outline.
'''
ax = self
ax.set_navigate(False)
ax.axis[:].toggle(all=False)
b = self._default_label_on
ax.axis[self.orientation].toggle(all=b)
# for axis in ax.axis.values():
# axis.major_ticks.set_visible(False)
# axis.minor_ticks.set_visible(False)
# axis.major_ticklabels.set_visible(False)
# axis.minor_ticklabels.set_visible(False)
# axis.label.set_visible(False)
# axis = ax.axis[self.orientation]
# axis.major_ticks.set_visible(True)
# axis.minor_ticks.set_visible(True)
#axis.major_ticklabels.set_size(
# int(axis.major_ticklabels.get_size()*.9))
#axis.major_tick_pad = 3
# axis.major_ticklabels.set_visible(b)
# axis.minor_ticklabels.set_visible(b)
# axis.label.set_visible(b)
def toggle_label(self, b):
self._default_label_on = b
axis = self.axis[self.orientation]
axis.toggle(ticklabels=b, label=b)
#axis.major_ticklabels.set_visible(b)
#axis.minor_ticklabels.set_visible(b)
#axis.label.set_visible(b)
class CbarAxes(CbarAxesBase, LocatableAxes):
def __init__(self, *kl, **kwargs):
orientation = kwargs.pop("orientation", None)
if orientation is None:
raise ValueError("orientation must be specified")
self.orientation = orientation
self._default_label_on = True
self.locator = None
super(LocatableAxes, self).__init__(*kl, **kwargs)
def cla(self):
super(LocatableAxes, self).cla()
self._config_axes()
class Grid(object):
"""
A class that creates a grid of Axes. In matplotlib, the axes
location (and size) is specified in the normalized figure
coordinates. This may not be ideal for images that needs to be
displayed with a given aspect ratio. For example, displaying
images of a same size with some fixed padding between them cannot
be easily done in matplotlib. AxesGrid is used in such case.
"""
_defaultLocatableAxesClass = LocatableAxes
def __init__(self, fig,
rect,
nrows_ncols,
ngrids=None,
direction="row",
axes_pad=0.02,
add_all=True,
share_all=False,
share_x=True,
share_y=True,
#aspect=True,
label_mode="L",
axes_class=None,
):
"""
Build an :class:`Grid` instance with a grid nrows*ncols
:class:`~matplotlib.axes.Axes` in
:class:`~matplotlib.figure.Figure` *fig* with
*rect=[left, bottom, width, height]* (in
:class:`~matplotlib.figure.Figure` coordinates) or
the subplot position code (e.g., "121").
Optional keyword arguments:
================ ======== =========================================
Keyword Default Description
================ ======== =========================================
direction "row" [ "row" | "column" ]
axes_pad 0.02 float| pad between axes given in inches
or tuple-like of floats,
(horizontal padding, vertical padding)
add_all True [ True | False ]
share_all False [ True | False ]
share_x True [ True | False ]
share_y True [ True
|
| False ]
label_mode "L" [ "L" | "1" | "all" ]
axes_class None a type object which must be a subclass
of :class:`~matplotlib.axes.Axes`
================ ======== =========================================
"""
self._nrows, self._ncols = nrows_ncols
|
if ngrids is None:
ngrids = self._nrows * self._ncols
else:
if (ngrids > self._nrows * self._ncols) or (ngrids <= 0):
raise Exception("")
self.ngrids = ngrids
self._init_axes_pad(axes_pad)
if direction not in ["column", "row"]:
raise Exception("")
self._direction = direction
|
fujy/ROS-Project
|
src/rbx2/rbx2_arm_nav/scripts/moveit_attached_object_demo.py
|
Python
|
mit
| 4,719
| 0.008264
|
#!/usr/bin/env python
"""
moveit_attached_object_demo.py - Version 0.1 2014-01-14
Attach an object to the end-effector and then move the arm to test collision avoidance.
Created for the Pi Robot Project: http://www.pirob
|
ot.org
Copyright (c) 2014 Patrick Goebel. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.5
This program is distributed in the hope that it will be us
|
eful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details at:
http://www.gnu.org/licenses/gpl.html
"""
import rospy, sys
import thread, copy
import moveit_commander
from moveit_commander import RobotCommander, MoveGroupCommander, PlanningSceneInterface
from geometry_msgs.msg import PoseStamped, Pose
from moveit_msgs.msg import CollisionObject, AttachedCollisionObject, PlanningScene
from math import radians
from copy import deepcopy
class MoveItDemo:
def __init__(self):
# Initialize the move_group API
moveit_commander.roscpp_initialize(sys.argv)
rospy.init_node('moveit_demo')
# Construct the initial scene object
scene = PlanningSceneInterface()
# Pause for the scene to get ready
rospy.sleep(1)
# Initialize the MoveIt! commander for the right arm
right_arm = MoveGroupCommander('right_arm')
# Initialize the MoveIt! commander for the gripper
right_gripper = MoveGroupCommander('right_gripper')
# Get the name of the end-effector link
end_effector_link = right_arm.get_end_effector_link()
# Allow some leeway in position (meters) and orientation (radians)
right_arm.set_goal_position_tolerance(0.01)
right_arm.set_goal_orientation_tolerance(0.05)
# Allow replanning to increase the odds of a solution
right_arm.allow_replanning(True)
# Allow 5 seconds per planning attempt
right_arm.set_planning_time(5)
# Remove leftover objects from a previous run
scene.remove_attached_object(end_effector_link, 'tool')
scene.remove_world_object('table')
scene.remove_world_object('box1')
scene.remove_world_object('box2')
scene.remove_world_object('target')
# Set the height of the table off the ground
table_ground = 0.75
# Set the length, width and height of the table
table_size = [0.2, 0.7, 0.01]
# Set the length, width and height of the object to attach
tool_size = [0.3, 0.02, 0.02]
# Create a pose for the tool relative to the end-effector
p = PoseStamped()
p.header.frame_id = end_effector_link
scene.attach_mesh
# Place the end of the object within the grasp of the gripper
p.pose.position.x = tool_size[0] / 2.0 - 0.025
p.pose.position.y = 0.0
p.pose.position.z = 0.0
# Align the object with the gripper (straight out)
p.pose.orientation.x = 0
p.pose.orientation.y = 0
p.pose.orientation.z = 0
p.pose.orientation.w = 1
# Attach the tool to the end-effector
scene.attach_box(end_effector_link, 'tool', p, tool_size)
# Add a floating table top
table_pose = PoseStamped()
table_pose.header.frame_id = 'base_footprint'
table_pose.pose.position.x = 0.35
table_pose.pose.position.y = 0.0
table_pose.pose.position.z = table_ground + table_size[2] / 2.0
table_pose.pose.orientation.w = 1.0
scene.add_box('table', table_pose, table_size)
# Update the current state
right_arm.set_start_state_to_current_state()
# Move the arm with the attached object to the 'straight_forward' position
right_arm.set_named_target('straight_forward')
right_arm.go()
rospy.sleep(2)
# Return the arm in the "resting" pose stored in the SRDF file
right_arm.set_named_target('resting')
right_arm.go()
rospy.sleep(2)
scene.remove_attached_object(end_effector_link, 'tool')
moveit_commander.roscpp_shutdown()
moveit_commander.os._exit(0)
if __name__ == "__main__":
MoveItDemo()
|
ccd-utexas/ProEMOnline
|
layout.py
|
Python
|
mit
| 7,341
| 0.016347
|
# -*- coding: utf-8 -*-
"""
This scripts sets an initial layout for the ProEMOnline software. It uses the
PyQtGraph dockarea system and was designed from the dockarea.py example.
Contains:
Left column: Observing Log
Center column: Plots
Right column: Images and Process Log
Menu bar
"""
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtGui
import pyqtgraph.console
import numpy as np
import math
import astropy.io.fits as fits
from pyqtgraph.dockarea import *
#This program operates in four stages.
#Stage 0 - Program Initialized, waiting to open SPE file.
#Stage 1 - SPE file open, stars are being selected
#Stage 2 - Online data reduction and aperture photometry/plotting is done.
#Stage 3 - End of data acquisition detected. Final data written to file. Timestamps verified. Log saved. Weather/time log data saved.
# -> revert back to Stage 0.
stage=0 #start at 0
#Keep track of the current frame:
#One version that we do science on
#One version for display purposes
def newframe(fitsfile):
"""For given filename, return science and display images.
"""
img = fits.getdata(fitsfile)[0]
displayimg = np.copy(img)
#replace everything above 99%tile
#don't do calulcations on this adjusted array!!!
imgvals = displayimg.flatten()
img99percentile = np.percentile(imgvals,99)
displayimg[displayimg > img99percentile] = img99percentile
#make color
displayimg=np.array([displayimg,displayimg,displayimg]).transpose()
return img,displayimg
#Start with some initial example file
fitsfile = 'ProEMExample.fits' #initial file
img,displayimg = newframe(fitsfile)
#Use a function to display a new image
#Autoscaling levels optional
def displayframe(displayimg,autoscale=False):
"""Display an RBG image
Autoscale optional.
Return nothing.
"""
if autoscale:
w5.setImage(displayimg,autoRange=True,levels=[np.min(displayimg),np.max(displayimg)-1])
else:
w5.setImage(displayimg,autoRange=False,autoLevels=False)
#Set up a list to keep track of star positions
starpos=[]
#Open File functionality
class WithMenu(QtGui.QMainWindow):
def __init__(self):
super(WithMenu, self).__init__()
self.initUI()
def initUI(self):
#Note: Exit is protected on Mac. This may work on Windows.
exitAction = QtGui.QAction('Exit', self)
exitAction.setShortcut('Ctrl+Q')
exitAction.setStatusTip('Exit application')
exitAction.triggered.connect(self.showDialog)
openFile = QtGui.QAction('Open', self)
openFile.setShortcut('Ctrl+O')
openFile.setStatusTip('Open new File')
openFile.triggered.connect(self.showDialog)
menubar = self.menuBar()
fileMenu = menubar.addMenu('File')
fileMenu.addAction(openFile)
fileMenu.addAction(exitAction)
def showDialog(self):
fname = QtGui.QFileDialog.getOpenFileName(self, 'Open file',
'/home')
#print str(fname)
img = fits.getdata(str(fname))[0]
w5.setImage(img)
def closeEvent(self, event):
reply = QtGui.QMessageBox.question(self, 'Message',
"Really quit?", QtGui.QMessageBox.Yes |
QtGui.QMessageBox.No, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
event.accept()
else:
event.ignore()
app = QtGui.QApplication([])
win = WithMenu()
area = DockArea()
win.setCentralWidget(area)
win.resize(1200,600)
win.setWindowTitle('ProEM Online Data Analysis Demo')
## Create docks, place them into the window one at a time.
## Note that size arguments are only a suggestion; docks will still have to
## fill the entire dock area and obey the limits of their internal widgets.
d1 = Dock("Dock1 - Observing Log", size=(500,300))
d2 = Dock("Dock2 - Process Log", size=(500,300))
d3 = Dock("Dock3 - Fourier Transform", size=(500,400))
d4 = Dock("Dock4 (tabbed) - Smoothed", size=(500,200))
d5 = Dock("Dock5 - Image", size=(500,200))
d6 = Dock("Dock6 (tabbed) - Light Curve", size=(500,200))
d7 = Dock("Dock7 (tabbed) - Comparison Counts", size=(500,200))
d8 = Dock("Dock8 (tabbed) - Seeing", size=(500,200))
area.addDock(d1, 'left') ## place d1 at left edge of dock area (it will fill the whole space since there are no other docks yet)
area.addDock(d2, 'right') ## place d2 at right edge of dock area
area.addDock(d3, 'left', d2)## place d3 at the left edge of d2
area.addDock(d4, 'top',d3) ## place d4 on top d3
area.addDock(d5, 'top',d2) ## place d5 on top d2
area.addDock(d6, 'above', d4) ## place d6 above d4
area.addDock(d7, 'top', d3)
area.addDock(d8, 'above', d7)
## Add widgets into each dock
## First dock holds the Observing Log
w1 = pg.LayoutWidget()
o
|
bserver = QtGui.QLabel('Observer')
target = QtGui.QLabel('Target')
filt = QtGui.QLabel('Filter')
log = QtGui.QLabel('Log')
observerEdit = QtGui.QLineEdit()
targetEdit = QtGui.QLineEdit()
filtEdit = QtGui.QComboBox()
filtEdit.addItems(["BG40","u'","g'","r'","i'","z'","Other"])
|
logEdit = QtGui.QTextEdit()
w1.addWidget(observer, 1, 0)
w1.addWidget(observerEdit, 1, 1)
w1.addWidget(target, 2, 0)
w1.addWidget(targetEdit, 2, 1)
w1.addWidget(filt, 3, 0)
w1.addWidget(filtEdit, 3, 1)
w1.addWidget(log, 4, 0)
w1.addWidget(logEdit, 4, 1, 6, 1)
d1.addWidget(w1)
## Process Log
w2 = pg.LayoutWidget()
processLog = QtGui.QTextEdit()
processLog.setReadOnly(True)
#processLog.setTextBackgroundColor(QtGui.QColor("black"))
w2.addWidget(processLog, 0, 0, 6, 1)
d2.addWidget(w2)
## Fourier Transform - Just shows random updating noise for now
w3 = pg.PlotWidget(title="Fourier Transform")
curve = w3.plot(pen='y')
data = np.random.normal(size=(10,1000))
ptr = 0
def update():
global curve, data, ptr, w3
curve.setData(data[ptr%10])
if ptr == 0:
w3.enableAutoRange('xy', False) ## stop auto-scaling after the first data set is plotted
ptr += 1
timer = QtCore.QTimer()
timer.timeout.connect(update)
timer.start(50)
d3.addWidget(w3)
## Smoothed Light Curve
w4 = pg.PlotWidget(title="Dock 4 plot")
w4.plot(np.random.normal(size=100))
d4.addWidget(w4)
## Image
w5 = pg.ImageView()
w5.ui.roiBtn.hide()
w5.ui.normBtn.hide()
displayframe(displayimg,autoscale=True)
def click(event):
event.accept()
pos = event.pos()
#check if we're marking or unmarking a star
#if pos.
starpos.append([pos.x(),pos.y()])
#img[pos.x(),pos.y()]=[255,255-img[pos.x(),pos.y(),1],255-img[pos.x(),pos.y(),1]]
#w5.setImage(img,autoRange=False)
processLog.append("Star selected at "+str( (int(pos.x()),int(pos.y())) ))
w5.getImageItem().mouseClickEvent = click
d5.addWidget(w5)
## Light Curve
w6 = pg.PlotWidget(title="Dock 6 plot")
w6.plot(np.random.normal(size=100))
d6.addWidget(w6)
## Smoothed Light Curve
w7 = pg.PlotWidget(title="Dock 7 plot")
w7.plot(np.random.normal(size=100))
d7.addWidget(w7)
## Smoothed Light Curve
w8 = pg.PlotWidget(title="Dock 8 plot")
w8.plot(np.random.normal(size=100))
d8.addWidget(w8)
win.show()
## Start Qt event loop unless running in interactive mode or using pyside.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
|
hnarayanan/twist
|
demo/static/twist.py
|
Python
|
gpl-3.0
| 2,606
| 0.02878
|
__author__ = "Harish Narayanan"
__copyright__ = "Copyright (C) 2009 Simula Research Laboratory and %s" % __author__
__license__ = "GNU GPL Version 3 or any later version"
from cbc.twist import *
from sys import argv
""" DEMO - Twisting of a hyperelastic cube """
class Twist(StaticHyperelasticity):
""" Definition of the hyperelastic problem """
def mesh(self):
n = 8
return UnitCubeMesh(n, n, n)
# Setting up dirichlet conditions and boundaries
def dirichlet_values(self):
clamp = Expression(("0.0", "0.0", "0.0"))
twist = Expression(("0.0",
"y0 + (x[1] - y0) * cos(theta) - (x[2] - z0) * sin(theta) - x[1]",
"z0 + (x[1] - y0) * sin(theta) + (x[2] - z0) * cos(theta) - x[2]"),
y0=0.5, z0=0.5, theta=pi/6)
return [clamp, twist]
def dirichlet_boundaries(self):
left = "x[0] == 0.0"
right = "x[0] == 1.0"
return [left, right]
# List of material models
def material_model(self):
# Material parameters can either be numbers or spatially
# varying fields. For example,
mu = 3.8461
lmbda = Expression("x[0]*5.8 + (1 - x[0])*5.7")
C10 = 0.171; C01 = 4.89e-3; C20 = -2.4e-4; C30 = 5.e-4
delka = 1.0/sqrt(2.0)
M = Constant((0.0,1.0,0.0))
k1 = 1e2; k2 = 1e1
|
materials = []
materials.append(MooneyRivlin({'C1':mu/2, 'C2':mu/2, 'bulk':lmbda}))
materials.append(StVenantKirchhoff({'mu':mu, 'bulk':lmbda}))
materials.append(neoHookean({'half_nkT':mu, 'bulk':lmbda}))
materials.append(Isihara({'C10':C10,'C01':C01,'C20':C20,'bulk':lmbda}))
materials.append(Biderman({'C10':C10,'C01':C01,'C20':C20,'C30':C30,'bulk':lmbda}))
materials.a
|
ppend(AnisoTest({'mu1':mu,'mu2':2*mu,'M':M,'bulk':lmbda}))
materials.append(GasserHolzapfelOgden({'mu':mu,'k1':k1,'k2':k2,'M':M,'bulk':lmbda}))
materials.append(Ogden({'alpha1':1.3,'alpha2':5.0,'alpha3':-2.0,\
'mu1':6.3e5,'mu2':0.012e5,'mu3':-0.1e5}))
try:
index = int(argv[1])
except:
index = 2
print str(materials[index])
return materials[index]
def name_method(self, method):
self.method = method
def __str__(self):
return "A hyperelastic cube twisted by 30 degrees solved by " + self.method
# Setup the problem
twist = Twist()
twist.name_method("DISPLACEMENT BASED FORMULATION")
# Solve the problem
print twist
twist.solve()
|
Zashel/zrest
|
zrest/__init__.py
|
Python
|
apache-2.0
| 80
| 0
|
from . import
|
statuscodes
fr
|
om .exceptions import *
from . import basedatamodel
|
samdowd/drumm-farm
|
pages/migrations/0003_auto_20160515_0618.py
|
Python
|
mit
| 2,802
| 0.002855
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-05-15 06:18
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pages', '0002_auto_20160512_0259'),
]
operations = [
migrations.CreateModel(
name='BoardMember',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('title', models.CharField(blank=True, max_length=200)),
('month_appointed', models.CharField(max_length=20)),
('year_appointed', models.CharField(max_length=20)),
('job', models.CharField(max_length=200)),
],
),
migrations.AlterModelOptions(
name='category',
options={'verbose_name_plural': 'categories'},
),
migrations.AlterModelOptions(
name='listpage',
options={'verbose_name': 'page that lists things', 'verbose_name_plural': 'pages that list things'},
),
migrations.AlterModelOptions(
name='staticpage',
options={'verbose_name': 'page that displays set information', 'verbose_name_plural': 'pages that display set information'},
),
migrations.AlterField(
model_name='category',
name='blurb',
field=models.CharField(help_text='Small amount of text that will be displayed on the learn more page.', max_length=1000),
),
migrations.AlterField(
model_name='category',
name='url',
field=models.CharField(help_text="Input a computer-friendly version of the title. Example: 'Who We Are' would have the URL 'who-we-are'.", max_length=20, unique=True, verbose_name='URL'),
),
migrations.AlterField(
model_name='listpage',
name='listObjectName',
field=models.CharField(help_text="The name of the model you are listing. For example if you are listing staff
|
members, 'StaffMember'. You probably need Sam's help if you have a new type of thing to list.", max_length=200, verbose_name=
|
'name of object listed'),
),
migrations.AlterField(
model_name='page',
name='public',
field=models.BooleanField(default=True, verbose_name='Visible to Public'),
),
migrations.AlterField(
model_name='page',
name='url',
field=models.CharField(help_text="Input a computer-friendly version of the title. Example: 'Our Staff' would have the URL 'our-staff'.", max_length=20, unique=True, verbose_name='URL'),
),
]
|
CSIOM/piPi
|
src/managers/models.py
|
Python
|
gpl-3.0
| 2,864
| 0.005587
|
#!/usr/bin/env python
"""
This file is part of piPi project.
Copyright (C) 2015 CSIOM, http://www.csiom.com
Authors: The Csiom Team
piPi is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Foobar is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Foobar. If not, see <http://www.gnu.org/licenses/>.
"""
"""
%% src/mamagers/models.py %%
This file contains django model classes that will be used for ORM with database..
"""
from django.db import models
from django.contrib.auth.models import User
class ProjectDetails(models.Model):
"""Model class for project."""
title = models.CharField(max_length=100, verbose_name="Project Title")
project_code = models.CharField(max_length=20, blank=True, null=True)
technologies_required = models.CharField(max_length=100)
start_date = models.DateField(blank=True, null=True)
end_date = models.DateField(blank=True, null=True)
estimated_time = models.CharField(max_length=10)
is_inactive = models.BooleanField(default=False)
manager = models.ForeignKey(User)
project_summary = models.TextField()
class Meta:
verbose_name_plural="Project Details"
def __unicode__(self):
return self.title
class TeamMembers(models.Model):
"""Model class for team members of the project."""
project = models.ForeignKey(ProjectDetails)
user = models.ForeignKey(User)
is_active = models.BooleanField(default=True)
class Meta:
verbose_name_plural = "Team Members"
def __unicode__(self):
return unicode(self.project)
class Status(models.Model):
"""Model class for listing states of projects."""
state = models.Ch
|
arField(max_length=200,help_text="Name of status field")
make_active_tag = models.BooleanField(default=False,
help_text="Mark me if this state makes project active(started/resumed etc)")
make_inactive_tag = models.BooleanField(default=False,
help_text="Mark
|
me if this state makes project inactive(completed/cancelled etc)")
class Meta:
verbose_name_plural = "Status"
def __unicode__(self):
return self.state
class StatusOfProject(models.Model):
"""Model class to store status of projects."""
project = models.ForeignKey(ProjectDetails)
status = models.ForeignKey(Status)
remarks = models.TextField(default=None)
datetime = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return self.project
|
tazle/pik-laskutin
|
import-flights.py
|
Python
|
gpl-2.0
| 151
| 0
|
from pik.flights import Flight
import csv
import sys
reader = csv.reader(s
|
ys.stdin)
for flight in Flight.generate_f
|
rom_csv(reader):
print flight
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.