repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
mtils/ems
|
ems/qt4/gui/notification/balloon_form_notifier.py
|
Python
|
mit
| 2,764
| 0.002171
|
from PyQt4.QtCore import QObject, pyqtSignal
from ems.notification.abstract import FormNotifier
from ems.qt4.gui.widgets.balloontip import BalloonTip
from ems import qt4
from ems.qt4.util import variant_to_pyobject as py
class BalloonFormNotifier(FormNotifier):
def __init__(self):
self._widgetMap = {}
self._balloons = {}
self._defaultState = BalloonTip.ERROR
self._model = None
self._currentModelRow = 0
def map(self, key, widget):
self._widgetMap[key] = widget
self._balloons[key] = BalloonTip(widget)
self._balloons[key].setArrowA
|
tLeft(True)
|
self._balloons[key].setArrowAtTop(False)
def mapAll(self, widgetDict):
for fieldName in widgetDict:
self.map(fieldName, widgetDict[fieldName])
def showMessage(self, key, message, state=None):
state = self._defaultState if state is None else state
if not key in self._balloons:
return
if not len(message):
self._balloons[key].setMessage(message)
self._balloons[key].hide()
return
self._balloons[key].setState(state)
self._balloons[key].setMessage(message)
self._balloons[key].show()
def clearMessages(self):
for key in self._balloons:
self._balloons[key].setMessage('')
self._balloons[key].hide()
def getModel(self):
return self._model
def setModel(self, model):
self._connectToModel(model)
self._model = model
self._updateMessagesFromModel()
model = property(getModel, setModel)
def getCurrentModelRow(self):
return self._currentModelRow
def setCurrentModelRow(self, row):
self._currentModelRow = row
self._updateMessagesFromModel()
currentModelRow = property(getCurrentModelRow, setCurrentModelRow)
def _connectToModel(self, model):
model.messageChanged.connect(self._onModelMessageChanged)
model.messagesCleared.connect(self._onModelMessageCleared)
def _onModelMessageChanged(self, row, column, message):
if row != self._currentModelRow:
return
keyName = py(self._model.index(row, column).data(qt4.ColumnNameRole))
self.showMessage(keyName, message)
def _onModelMessageCleared(self, row):
if row != self._currentModelRow:
return
self.clearMessages()
def _updateMessagesFromModel(self):
self.clearMessages()
row = self._currentModelRow
for column in range(self._model.columnCount()):
keyName = py(self._model.index(row, column).data(qt4.ColumnNameRole))
self.showMessage(keyName, self._model.columnMessage(row, column))
|
kernevil/samba
|
python/samba/netcmd/domain_backup.py
|
Python
|
gpl-3.0
| 50,651
| 0.000138
|
# domain_backup
#
# Copyright Andrew Bartlett <abartlet@samba.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import datetime
import os
import sys
import tarfile
import logging
import shutil
import tempfile
import samba
import tdb
import samba.getopt as options
from samba.samdb import SamDB, get_default_backend_store
import ldb
from samba.samba3 import libsmb_samba_internal as libsmb
from samba.samba3 import param as s3param
from samba.ntacls import backup_online, backup_restore, backup_offline
from samba.auth import system_session
from samba.join import DCJoinContext, join_clone, DCCloneAndRenameContext
from samba.dcerpc.security import dom_sid
from samba.netcmd import Option, CommandError
from samba.dcerpc import misc, security, drsblobs
from samba import Ldb
from . fsmo import cmd_fsmo_seize
from samba.provision import make_smbconf, DEFAULTSITE
from samba.upgradehelpers import update_krbtgt_account_password
from samba.remove_dc import remove_dc
from samba.provision import secretsdb_self_join
from samba.dbchecker import dbcheck
import re
from samba.provision import guess_names, determine_host_ip, determine_host_ip6
from samba.provision.sambadns import (fill_dns_data_partitions,
get_dnsadmins_sid,
get_domainguid)
from samba.tdb_util import tdb_copy
from samba.mdb_util import mdb_copy
import errno
from subprocess import CalledProcessError
from samba import sites
from samba.dsdb import _dsdb_load_udv_v2
from samba.ndr import ndr_pack
from samba.credentials import SMB_SIGNING_REQUIRED
# work out a SID (based on a free RID) to use when the domain gets restored.
# This ensures that the restored DC's SID won't clash with any other RIDs
# already in use in the domain
def get_sid_for_restore(samdb, logger):
# Find the DN of the RID set of the server
res = samdb.search(base=ldb.Dn(samdb, samdb.get_serverName()),
scope=ldb.SCOPE_BASE, attrs=["serverReference"])
server_ref_dn = ldb.Dn(samdb, str(res[0]['serverReference'][0]))
res = samdb.search(base=server_ref_dn,
scope=ldb.SCOPE_BASE,
attrs=['rIDSetReferences'])
rid_set_dn = ldb.Dn(samdb, str(res[0]['rIDSetReferences'][0]))
# Get the alloc pools and next RID of the RID set
res = samdb.search(base=rid_set_dn,
scope=ldb.SCOPE_SUBTREE,
expression="(rIDNextRID=*)",
attrs=['rIDAllocationPool',
'rIDPreviousAllocationPool',
'rIDNextRID'])
# Decode the bounds of the RID allocation pools
try:
rid = int(res[0].get('rIDNextRID')[0])
except IndexError:
logger.info("The RID pool for this DC is not initalized "
"(e.g. it may be a fairly new DC).")
logger.info("To initialize it, create a temporary user on this DC "
"(you can delete it later).")
raise CommandError("Cannot create backup - "
"please initialize this DC's RID pool first.")
def split_val(num):
high = (0xFFFFFFFF00000000 & int(num)) >> 32
low = 0x00000000FFFFFFFF & int(num)
return low, high
pool_l, pool_h = split_val(res[0].get('rIDPreviousAllocationPool')[0])
npool_l, npool_h = split_val(res[0].get('rIDAllocationPool')[0])
# Calculate next RID based on pool bounds
if rid == npool_h:
raise CommandError('Out of RIDs, finished AllocPool')
if rid == pool_h:
if pool_h == npool_h:
raise CommandError('Out of RIDs, finished PrevAllocPool.')
rid = npool_l
else:
rid += 1
# Construct full SID
sid = dom_sid(samdb.get_domain_sid())
return str(sid) + '-' + str(rid)
def smb_sysvol_conn(server, lp, creds):
"""Returns an SMB connection to the sysvol share on the DC"""
# the SMB bindings rely on having a s3 loadparm
s3_lp = s3param.get_context()
s3_lp.load(lp.configfile)
# Force signing for the connection
saved_signing_state = creds.get_smb_signing()
creds.set_smb_signing(SMB_SIGNING_REQUIRED)
conn = libsmb.Conn(server, "sysvol", lp=s3_lp, creds=creds)
# Reset signing state
creds.set_smb_signing(saved_signing_state)
return conn
def get_timestamp():
return datetime.datetime.now().isoformat().replace(':', '-')
def backup_filepath(targetdir, name, time_str):
filename = 'samba-backup-%s-%s.tar.bz2' % (name, time_str)
return os.path.join(targetdir, filename)
def create_backup_tar(logger, tmpdir, backup_filepath):
# Adds everything in the tmpdir into a new tar file
logger.info("Creating backup file %s..." % backup_filepath)
tf = tarfile.open(backup_filepath, 'w:bz2')
tf.add(tmpdir, arcname='./')
tf.close()
def create_log_file(targetdir, lp, backup_type, server, include_secrets,
extra_info=None):
# create a summary file about the backup, which will get included in the
# tar file. This makes it easy for users to see what the backup involved,
# without having to untar the DB and interrogate it
f = open(os.path.join(targetdir, "backup.txt"), 'w')
try:
time_str = datetime.datetime.now().strftime('%Y-%b-%d %H:%M:%S')
f.write("Backup created %s\n" % time_str)
f.write("Using samba-tool version: %s\n" % lp.get('server string'))
f.write("Domain %s backup, using DC '%s'\n" % (backup_type, server))
f.write("Backup for domain
|
%s (NetBIOS), %s (DNS realm)\n" %
(lp.get('workgroup'), lp.get('realm').lower()))
f.write("Backup contains domain
|
secrets: %s\n" % str(include_secrets))
if extra_info:
f.write("%s\n" % extra_info)
finally:
f.close()
# Add a backup-specific marker to the DB with info that we'll use during
# the restore process
def add_backup_marker(samdb, marker, value):
m = ldb.Message()
m.dn = ldb.Dn(samdb, "@SAMBA_DSDB")
m[marker] = ldb.MessageElement(value, ldb.FLAG_MOD_ADD, marker)
samdb.modify(m)
def check_targetdir(logger, targetdir):
if targetdir is None:
raise CommandError('Target directory required')
if not os.path.exists(targetdir):
logger.info('Creating targetdir %s...' % targetdir)
os.makedirs(targetdir)
elif not os.path.isdir(targetdir):
raise CommandError("%s is not a directory" % targetdir)
# For '--no-secrets' backups, this sets the Administrator user's password to a
# randomly-generated value. This is similar to the provision behaviour
def set_admin_password(logger, samdb):
"""Sets a randomly generated password for the backup DB's admin user"""
# match the admin user by RID
domainsid = samdb.get_domain_sid()
match_admin = "(objectsid=%s-%s)" % (domainsid,
security.DOMAIN_RID_ADMINISTRATOR)
search_expr = "(&(objectClass=user)%s)" % (match_admin,)
# retrieve the admin username (just in case it's been renamed)
res = samdb.search(base=samdb.domain_dn(), scope=ldb.SCOPE_SUBTREE,
expression=search_expr)
username = str(res[0]['samaccountname'])
adminpass = samba.generate_random_password(12, 32)
logger.info("Setting %s password in backup to: %s" % (username, adminpass))
logger.info("Run 'samba-tool user setpassword %s' after restoring DB" %
username)
samdb.setpassword(search_expr, adminpass, force_change_at_next_login=False,
|
jfrygeo/solutions-geoprocessing-toolbox
|
utils/test/patterns_tests/IncidentDensityTestCase.py
|
Python
|
apache-2.0
| 4,288
| 0.011194
|
# coding: utf-8
# -----------------------------------------------------------------------------
# Copyright 2015 Esri
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# See the License for the specific language governing permissions and
# limitations under the License.
# -----------------------------------------------------------------------------
# ==================================================
# IncidentDensityTestCase.py
# --------------------------------------------------
# requirm
|
ents: ArcGIS X.X, Python 2.7 or Python 3.4
# author: ArcGIS Solutions
# company: Esri
# ==================================================
# history:
# 12/16/2015 - JH - initial creation
# ==================================================
import unittest
import arcpy
import os
import UnitTestUtilities
import Configuration
import DataDownload
class IncidentDensityTestCase(unittest.TestCase):
''' Test all tools and methods related to the Incident Density tool
in the Incident Analysis toolbox'''
inputPointFeatures = None
inputBoundaryFeatures = None
def setUp(self):
if Configuration.DEBUG == True: print(" IncidentDensityTestCase.setUp")
UnitTestUtilities.checkArcPy()
Configuration.incidentDataPath = DataDownload.runDataDownload(Configuration.patternsPaths, Configuration.incidentGDBName, Configuration.incidentURL)
if (Configuration.incidentScratchGDB == None) or (not arcpy.Exists(Configuration.incidentScratchGDB)):
Configuration.incidentScratchGDB = UnitTestUtilities.createScratch(Configuration.incidentDataPath)
Configuration.incidentInputGDB = os.path.join(Configuration.incidentDataPath, Configuration.incidentGDBName)
UnitTestUtilities.checkFilePaths([Configuration.incidentDataPath, Configuration.incidentInputGDB, Configuration.patterns_ProToolboxPath, Configuration.patterns_DesktopToolboxPath])
self.inputPointFeatures = os.path.join(Configuration.incidentInputGDB, "Incidents")
self.inputBoundaryFeatures = os.path.join(Configuration.incidentInputGDB, "Districts")
def tearDown(self):
if Configuration.DEBUG == True: print(" IncidentDensityTestCase.tearDown")
UnitTestUtilities.deleteScratch(Configuration.incidentScratchGDB)
def test_incident_density_pro(self):
if Configuration.DEBUG == True: print(" IncidentDensityTestCase.test_incident_density_pro")
arcpy.AddMessage("Testing Incident Density (Pro).")
self.test_incident_density(Configuration.patterns_ProToolboxPath)
def test_incident_density_desktop(self):
if Configuration.DEBUG == True: print(" IncidentDensityTestCase.test_incident_density_desktop")
arcpy.AddMessage("Testing Incident Density (Desktop).")
self.test_incident_density(Configuration.patterns_DesktopToolboxPath)
def test_incident_density(self, toolboxPath):
try:
if Configuration.DEBUG == True: print(" IncidentDensityTestCase.test_incident_density")
arcpy.CheckOutExtension("Spatial")
arcpy.ImportToolbox(toolboxPath, "iaTools")
runToolMsg = "Running tool (Incident Density)"
arcpy.AddMessage(runToolMsg)
Configuration.Logger.info(runToolMsg)
outputDensity = os.path.join(Configuration.incidentScratchGDB, "outputDensity")
arcpy.IncidentDensity_iaTools(self.inputPointFeatures, self.inputBoundaryFeatures, outputDensity)
arcpy.CheckInExtension("Spatial")
self.assertTrue(arcpy.Exists(outputDensity))
except arcpy.ExecuteError:
UnitTestUtilities.handleArcPyError()
except:
UnitTestUtilities.handleGeneralError()
|
DinoCow/airflow
|
tests/api_connexion/schemas/test_connection_schema.py
|
Python
|
apache-2.0
| 7,097
| 0.000564
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import re
import unittest
import marshmallow
from airflow.api_connexion.schemas.connection_schema import (
ConnectionCollection,
connection_collection_item_schema,
connection_collection_schema,
connection_schema,
)
from airflow.models import Connection
from airflow.utils.session import create_session, provide_session
from tests.test_utils.db import clear_db_connections
class TestConnectionCollectionItemSchema(unittest.TestCase):
def setUp(self) -> None:
with create_session() as session:
session.query(Connection).delete()
def tearDown(self) -> None:
clear_db_connections()
@provide_session
def test_serialize(self, session):
connection_model = Connection(
conn_id='mysql_default',
conn_type='mysql',
host='mysql',
login='login',
schema='testschema',
port=80,
)
session.add(connection_model)
session.commit()
connection_model = session.query(Connection).first()
deserialized_connection = connection_collection_item_schema.dump(connection_model)
self.assertEqual(
deserialized_connection,
{
'connection_id': "mysql_default",
'conn_type': 'mysql',
'host': 'mysql',
'login': 'login',
|
'schema': 'testschema',
'port': 80,
},
)
def test_deserialize(self):
connection_dump_1 = {
'connection_id': "mysql_default_1",
'conn_type': 'mysql',
'host': 'mysql',
'login': 'login',
'schema': 'testschema',
'port': 80,
}
connection_dump_2 = {
'connection_id': "mysql_default_2",
'conn_type': "postgres",
}
result_1 = conne
|
ction_collection_item_schema.load(connection_dump_1)
result_2 = connection_collection_item_schema.load(connection_dump_2)
self.assertEqual(
result_1,
{
'conn_id': "mysql_default_1",
'conn_type': 'mysql',
'host': 'mysql',
'login': 'login',
'schema': 'testschema',
'port': 80,
},
)
self.assertEqual(
result_2,
{
'conn_id': "mysql_default_2",
'conn_type': "postgres",
},
)
def test_deserialize_required_fields(self):
connection_dump_1 = {
'connection_id': "mysql_default_2",
}
with self.assertRaisesRegex(
marshmallow.exceptions.ValidationError,
re.escape("{'conn_type': ['Missing data for required field.']}"),
):
connection_collection_item_schema.load(connection_dump_1)
class TestConnectionCollectionSchema(unittest.TestCase):
def setUp(self) -> None:
with create_session() as session:
session.query(Connection).delete()
def tearDown(self) -> None:
clear_db_connections()
@provide_session
def test_serialize(self, session):
connection_model_1 = Connection(conn_id='mysql_default_1', conn_type='test-type')
connection_model_2 = Connection(conn_id='mysql_default_2', conn_type='test-type2')
connections = [connection_model_1, connection_model_2]
session.add_all(connections)
session.commit()
instance = ConnectionCollection(connections=connections, total_entries=2)
deserialized_connections = connection_collection_schema.dump(instance)
self.assertEqual(
deserialized_connections,
{
'connections': [
{
"connection_id": "mysql_default_1",
"conn_type": "test-type",
"host": None,
"login": None,
'schema': None,
'port': None,
},
{
"connection_id": "mysql_default_2",
"conn_type": "test-type2",
"host": None,
"login": None,
'schema': None,
'port': None,
},
],
'total_entries': 2,
},
)
class TestConnectionSchema(unittest.TestCase):
def setUp(self) -> None:
with create_session() as session:
session.query(Connection).delete()
def tearDown(self) -> None:
clear_db_connections()
@provide_session
def test_serialize(self, session):
connection_model = Connection(
conn_id='mysql_default',
conn_type='mysql',
host='mysql',
login='login',
schema='testschema',
port=80,
password='test-password',
extra="{'key':'string'}",
)
session.add(connection_model)
session.commit()
connection_model = session.query(Connection).first()
deserialized_connection = connection_schema.dump(connection_model)
self.assertEqual(
deserialized_connection,
{
'connection_id': "mysql_default",
'conn_type': 'mysql',
'host': 'mysql',
'login': 'login',
'schema': 'testschema',
'port': 80,
'extra': "{'key':'string'}",
},
)
def test_deserialize(self):
den = {
'connection_id': "mysql_default",
'conn_type': 'mysql',
'host': 'mysql',
'login': 'login',
'schema': 'testschema',
'port': 80,
'extra': "{'key':'string'}",
}
result = connection_schema.load(den)
self.assertEqual(
result,
{
'conn_id': "mysql_default",
'conn_type': 'mysql',
'host': 'mysql',
'login': 'login',
'schema': 'testschema',
'port': 80,
'extra': "{'key':'string'}",
},
)
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-network/azure/mgmt/network/version.py
|
Python
|
mit
| 348
| 0
|
# coding=utf-8
# ---------------------------------------------------------
|
-----------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------
|
------------------------------------------------------------
VERSION = "2.0.0rc1"
|
ezequielpereira/Time-Line
|
timelinelib/wxgui/components/filechooser.py
|
Python
|
gpl-3.0
| 2,868
| 0.001046
|
# Copyright (C) 2009, 2010, 2011, 2012, 2013, 2014, 2015 Rickard Lindberg, Roger Lindberg
#
# This file is part of Timeline.
#
# Timeline is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Timeline is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Timeline. If not, see <http://www.gnu.org/licenses/>.
import wx
import wx.lib.newevent
from gettext import gettext as _
class FileChooser(wx.Panel):
FilePathChangedEvent, EVT_FILE_PATH_CHANGED = wx.lib.newevent.NewEvent()
BORDER = 1
def __init__(self, parent,
dialog_message=_("Choose file"),
dialog_dir="",
dialog_wildcard="*",
**kwargs):
wx.Panel.__init__(self, parent, **kwargs)
self._dialog_message = dialog_message
self._dialog_dir = dialog_dir
self._dialog_wildcard = dialog_wildcard
self._create_gui()
def GetFilePath(self):
return self._path_text_field.GetValue()
def _create_gui(self):
self._create_path_text_field()
|
self._create_browse_button()
self._layout_components()
def _create_path_text_field(self):
self._path_text_field = wx.TextCtrl(self)
self._path_text_field.Bind(wx.EVT_TEXT, self._on_path_text_changed)
def _on_path_text_changed(self, evt):
wx.PostEvent(self, self.FilePathChangedEvent())
def _create_browse_button(self):
self._browse_b
|
utton = wx.Button(self, wx.ID_OPEN)
self._browse_button.Bind(wx.EVT_BUTTON, self._on_browse_button_click)
def _on_browse_button_click(self, evt):
dialog = wx.FileDialog(self,
message=self._dialog_message,
defaultDir=self._dialog_dir,
wildcard=self._dialog_wildcard,
style=wx.FD_OPEN)
if dialog.ShowModal() == wx.ID_OK:
self._path_text_field.SetValue(dialog.GetPath())
dialog.Destroy()
def _layout_components(self):
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add(self._path_text_field,
proportion=1,
flag=wx.ALL|wx.ALIGN_CENTER_VERTICAL,
border=self.BORDER)
sizer.Add(self._browse_button,
proportion=0,
flag=wx.ALL|wx.ALIGN_CENTER_VERTICAL,
border=self.BORDER)
self.SetSizer(sizer)
|
marrow/mongo
|
test/query/test_ops.py
|
Python
|
mit
| 5,358
| 0.035274
|
# encoding: utf-8
from __future__ import unicode_literals
import operator
import pytest
from marrow.mongo import Filter
from marrow.schema.compat import odict, py3
@pytest.fixture
def empty_ops(request):
return Filter()
@pytest.fixture
def single_ops(request):
return Filter({'roll': 27})
def test_ops_iteration(single_ops):
assert list(iter(single_ops)) == ['roll']
class TestOpsMapping(object):
def test_getitem(self, empty_ops, single_ops):
with pytest.raises(KeyError):
empty_ops['roll']
assert single_ops['roll'] == 27
def test_setitem(self, empty_ops):
assert repr(empty_ops) == "Filter([])"
empty_ops['meaning'] = 42
if py3:
assert repr(empty_ops) == "Filter([('meaning', 42)])"
else:
assert repr(empty_ops) == "Filter([(u'meaning', 42)])"
def test_delitem(self, empty_ops, single_ops):
with pytest.raises(KeyError):
del empty_ops['roll']
if py3:
assert repr(single_ops) == "Filter([('roll', 27)])"
else:
assert repr(single_ops) == "Filter([(u'roll', 27)])"
del single_ops['roll']
assert repr(single_ops) == "Filter([])"
def test_length(self, empty_ops, single_ops):
assert len(empty_ops) == 0
assert len(single_ops) == 1
def test_keys(self, empty_ops, single_ops):
assert list(empty_ops.keys()) == []
assert list(single_ops.keys()) == ['roll']
def test_items(self, empty_ops, single_ops):
assert list(empty_ops.items()) == []
assert list(single_ops.items()) == [('roll', 27)]
def test_values(self, empty_ops, single_ops):
assert list(empty_ops.values()) == []
assert list(single_ops.values()) == [27]
def test_contains(self, single_ops):
assert 'foo' not in sing
|
le_ops
assert 'roll' in single_ops
def test_equality_inequality(self, empty_ops, single_ops):
assert empty_ops == {}
assert empty_ops != {'roll': 27}
assert single_ops != {}
assert single_ops ==
|
{'roll': 27}
def test_get(self, single_ops):
assert single_ops.get('foo') is None
assert single_ops.get('foo', 42) == 42
assert single_ops.get('roll') == 27
def test_clear(self, single_ops):
assert len(single_ops.operations) == 1
single_ops.clear()
assert len(single_ops.operations) == 0
def test_pop(self, single_ops):
assert len(single_ops.operations) == 1
with pytest.raises(KeyError):
single_ops.pop('foo')
assert single_ops.pop('foo', 42) == 42
assert len(single_ops.operations) == 1
assert single_ops.pop('roll') == 27
assert len(single_ops.operations) == 0
def test_popitem(self, single_ops):
assert len(single_ops.operations) == 1
assert single_ops.popitem() == ('roll', 27)
assert len(single_ops.operations) == 0
with pytest.raises(KeyError):
single_ops.popitem()
def test_update(self, empty_ops, single_ops):
assert len(empty_ops.operations) == 0
empty_ops.update(name="Bob Dole")
assert len(empty_ops.operations) == 1
if py3:
assert repr(empty_ops) == "Filter([('name', 'Bob Dole')])"
else:
assert repr(empty_ops) == "Filter([('name', u'Bob Dole')])"
assert len(single_ops.operations) == 1
if py3:
assert repr(single_ops) == "Filter([('roll', 27)])"
else:
assert repr(single_ops) == "Filter([(u'roll', 27)])"
single_ops.update([('name', "Bob Dole")])
assert len(single_ops.operations) == 2
if py3:
assert repr(single_ops) in ("Filter([('roll', 27), ('name', 'Bob Dole')])", "Filter([('name', 'Bob Dole'), ('roll', 27)])")
else:
assert repr(single_ops) in ("Filter([(u'roll', 27), (u'name', u'Bob Dole')])", "Filter([(u'name', u'Bob Dole'), (u'roll', 27)])")
def test_setdefault(self, empty_ops):
assert len(empty_ops.operations) == 0
empty_ops.setdefault('fnord', 42)
assert len(empty_ops.operations) == 1
assert empty_ops.operations['fnord'] == 42
empty_ops.setdefault('fnord', 27)
assert len(empty_ops.operations) == 1
assert empty_ops.operations['fnord'] == 42
def test_ops_shallow_copy(self, single_ops):
assert single_ops.operations == single_ops.copy().operations
class TestOperationsCombination(object):
def test_operations_and_clean_merge(self):
comb = Filter({'roll': 27}) & Filter({'foo': 42})
assert comb.as_query == {'roll': 27, 'foo': 42}
def test_operations_and_operator_overlap(self):
comb = Filter({'roll': {'$gte': 27}}) & Filter({'roll': {'$lte': 42}})
assert comb.as_query == {'roll': {'$gte': 27, '$lte': 42}}
def test_paradoxical_condition(self):
comb = Filter({'roll': 27}) & Filter({'roll': {'$lte': 42}})
assert comb.as_query == {'roll': {'$eq': 27, '$lte': 42}}
comb = Filter({'roll': {'$gte': 27}}) & Filter({'roll': 42})
assert list(comb.as_query['roll'].items()) in ([('$gte', 27), ('$eq', 42)], [('$eq', 42), ('$gte', 27)])
def test_operations_or_clean_merge(self):
comb = Filter({'roll': 27}) | Filter({'foo': 42})
assert comb.as_query == {'$or': [{'roll': 27}, {'foo': 42}]}
comb = comb | Filter({'bar': 'baz'})
assert comb.as_query == {'$or': [{'roll': 27}, {'foo': 42}, {'bar': 'baz'}]}
def test_operations_hard_and(self):
comb = Filter({'$and': [{'a': 1}, {'b': 2}]}) & Filter({'$and': [{'c': 3}]})
assert comb.as_query == {'$and': [{'a': 1}, {'b': 2}, {'c': 3}]}
def test_operations_soft_and(self):
comb = Filter({'$and': [{'a': 1}, {'b': 2}]}) & Filter({'c': 3})
assert comb.as_query == {'$and': [{'a': 1}, {'b': 2}], 'c': 3}
|
sliwinski-milosz/json_validator
|
tests/test_json_validator.py
|
Python
|
mit
| 4,213
| 0
|
import unittest
import os
from json_validator.validator import validate_params, ValidationError
arg1 = "something"
arg2 = "something_else"
schema_dirpath = os.path.dirname(os.path.realpath(__file__))
schema_filepath = os.path.join(schema_dirpath, "schema.json")
correct_params = {"param1": "some string",
"param2": ["string_in_array", "string_in_array2"]}
wrong_params = {"param1": ["string_in_array", "string_in_array2"],
"param2": "string"}
class JsonValidatorTest(unittest.TestCase):
def test_default_params_var_name(self):
@validate_params(schema_filename=schema_filepath)
def test_function(first_arg, second_arg, params):
return "Returned by function"
self.assertEqual(
test_function(arg1, arg2, correct_params),
"Returned by function"
)
self.assertEqual(
test_function(arg1, arg2, wrong_params),
{'status': 'Wrong params!'}
)
def test_non_default_params_var_name(self):
@validate_params(schema_filename=schema_filepath,
params_variable="params_test")
def test_function(first_arg, second_arg, params_test):
return "Returned by function"
self.assertEqual(
test_function(arg1, arg2, correct_params),
"Returned by function"
)
self.assertEqual(
test_function(arg1, arg2, wrong_params),
{'status': 'Wrong params!'}
)
def test_debug(self):
@validate_params(schema_filename=schema_filepath,
debug=True)
def test_function(first_arg, second_arg, params):
return "Returned by function"
with self.assertRaises(ValidationError):
test_function(arg1, arg2, wrong_params)
def test_message(self):
@validate_params(schema_filename=schema_filepath,
message="Message test!")
def test_function(first_arg, second_arg, params):
return "Returned by function"
self.assertEqual(
test_function(arg1, arg2, wrong_params),
{'status': 'Message test!'}
)
def test_decorator_without_arguments(self):
@validate_params
def test_function(first_arg, second_arg, params):
return "Returned by function"
self.assertEqual(
test_function(arg1, arg2, correct_params),
"Returned by function"
)
self.assertEqual(
test_function(arg1, arg2, wrong_params),
{'status': 'Wrong params!'}
)
def test_none_params(self):
@validate_params
def test_function(first_arg, second_arg, params):
return "Returned by function"
self.assertEqual(test_function(arg1, arg2, params=None),
{'status': 'Wrong params!'})
def test_no_params_at_all(self):
@validate_params
def test_function(first_arg, second_arg):
return "Returned by function"
self.assertRaises(
Exception,
test_function, arg1, arg2
)
def test_get_params_from_args(self):
@validate_params
def test_function(params):
return "Returned by function"
self.assertEqual(
test_function(correct_params),
"Returned by function"
)
def test_get_params_from_kwargs(self):
@validate_params
def test_function(params):
return "Returned by function"
self.assertEqual(
test_function(params=correct_params),
"Returned by function"
|
)
def save_schema_to_json():
'''
Save some example schema to json file
'''
import json
schema = {
"required": [
"param1"
],
"type": "object",
"properties": {
"param1": {
"type": "string"
},
"param2": {
"type": "array"
}
}
}
with open("schema.json", "w") as jsonout:
json.dump(schema, jsonout, indent=4)
if __name__ == '
|
__main__':
unittest.main()
|
SKIRT/PTS
|
core/extract/scaling.py
|
Python
|
agpl-3.0
| 3,571
| 0.002241
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.core.extract.scaling Contains the ScalingExtractor class, used for extracting scaling information
# from a simulation's log files.
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import astronomical modules
from astropy.table import Table
# -----------------------------------------------------------------
class ScalingExtractor(object):
"""
This class ...
"""
def __init__(self):
"""
The constructor ...
:return:
"""
# -- Attributes --
# The parallelization mode
self.mode = None
# The number of processes and threads
self.processes = None
self.threads = None
# The path to the scaling file
self.scaling_file_path = None
# The scaling table
self.table = None
# The timeline and memory usage tables
self.timeline = None
self.memory = None
# -----------------------------------------------------------------
def run(self, simulation, timeline, memory):
"""
This function ...
:return:
:param simulation:
:param timeline:
:param memory:
"""
# Set the parallelization mode
self.mode = simulation.analysis.scaling_run_name.split("__")[4]
# Set the number of processes and threads
self.processes = simulation.processes()
self.threads = simulation.threads()
# Set the path to the scaling file
self.scaling_file_path = simulation.analysis.scaling_data_file
# Cache local references to the timeline and mem
|
ory usage tables
self.timeline = timeline
self.memory = memory
# Write the relevant of the cu
|
rrent simulation
self.write()
# Read in the extracted scaling table
self.read()
# Return the scaling table
return self.table
# -----------------------------------------------------------------
def write(self):
"""
This function ...
:return:
"""
# Open the output file
resultfile = open(self.scaling_file_path, 'a')
# Add a line to the output file containing the runtimes for the current simulation
resultfile.write(self.mode + ' ' + str(self.processes) + ' ' + str(self.threads) + ' ' + str(self.timeline.setup)
+ ' ' + str(self.timeline.stellar) + ' ' + str(self.timeline.spectra) + ' ' + str(self.timeline.dust)
+ ' ' + str(self.timeline.writing) + ' ' + str(self.timeline.waiting) + ' ' + str(self.timeline.communication)
+ ' ' + str(self.timeline.total) + ' ' + str(self.memory.peak) + '\n')
# Close the output file
resultfile.close()
# -----------------------------------------------------------------
def read(self):
"""
This function ...
:return:
"""
# Read in the scaling data file
self.table = Table.read(self.scaling_file_path, format="ascii.ecsv")
# -----------------------------------------------------------------
|
bow/crimson
|
tests/test_star.py
|
Python
|
bsd-3-clause
| 2,311
| 0
|
"""star subcommand tests"""
# (c) 2015-2021 Wibowo Arindrarto <contact@arindrarto.dev>
import json
import pytest
from click.testing import CliRunner
from crimson.cli import main
from .utils import get_test_path
@pytest.fixture(scope="module")
def star_fail():
runner = CliRunner()
in_file = get_test_path("star_nope.txt")
result = runner.invoke(main, ["star", in_file])
return result
@pytest.fixture(scope="module")
def star_v230_01():
runner = CliRunner()
in_file = get_test_path("star_v230_01.txt")
result = runner.invoke(main, ["star", in_file])
result.json = json.loads(result.output)
return result
@pytest.fixture(scope="module")
def star_v230_02():
runner = CliRunner()
in_file = get_test_path("star_v230_02.txt")
result = runner.invoke(main, ["star", in_file])
result.json = json.loads(result.output)
return result
def test_star_fail_exit_code(star_fail):
assert star_fail.exit_code != 0
def test_star_fail_output(star_fail):
err_msg = "Unexpected file structure. No contents parsed."
assert err_msg in star_fail.output
@pytest.mark.parametrize(
"attr, exp",
[
("avgDeletionLength", 1.36),
("avgInputLength", 98),
("avgInsertionLength", 1.21),
("avgMappedLength", 98.27),
("mappingSpeed", 403.16),
("nInput", 14782416),
("nMappedMultipleLoci", 1936775),
("nMappedTooManyLoci", 27644),
("nSplicesATAC", 2471),
("nSplicesAnnotated", 3780876),
("nSplicesGCAG", 22344),
("nSplicesGTAG", 3780050),
("nSplicesNonCanonical", 5148),
("nSplicesTotal", 3810013),
("nUniquelyMapped", 12347431),
("pctMappedMultipleLoci", 13.1),
|
("pctMappedTooManyLoci", 0.19),
("pctUniquelyMapped", 83.53),
("pctUnmappedForOther", 0.03),
("pctUnmappedForTooManyMismatches", 0.0),
|
("pctUnmappedForTooShort", 3.16),
("rateDeletionPerBase", 0.0),
("rateInsertionPerBase", 0.0),
("rateMismatchPerBase", 0.24),
("timeEnd", "Dec 11 19:01:56"),
("timeJobStart", "Dec 11 18:55:02"),
("timeMappingStart", "Dec 11 18:59:44"),
],
)
def test_star_v230_01(star_v230_01, attr, exp):
assert star_v230_01.json.get(attr) == exp, attr
|
rackn/container-networking-ansible
|
test/common/library/ec2_vpc_facts.py
|
Python
|
apache-2.0
| 1,587
| 0.00063
|
#!/usr/bin/python
#
# Retrieve information on an existing VPC.
#
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
import boto.vpc
def main():
argument_s
|
pec = ec2_argument_spec()
argument_spec.update(dict(
resource_tags=dict(type='dict', required=True)
))
module = AnsibleModule(argument_spec=argument_spec)
ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module)
if not region:
module.fail_json(msg="region must be specified")
try:
|
connection = boto.vpc.connect_to_region(
region,
aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_key)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=str(e))
vpcs = connection.get_all_vpcs()
vpcs_w_resources = filter(
lambda x: x.tags == module.params.get('resource_tags'), vpcs)
if len(vpcs_w_resources) != 1:
if len(vpcs_w_resources) == 0:
module.fail_json(msg="No vpc found")
else:
module.fail_json(msg="Multiple VPCs with specified resource_tags")
vpc = vpcs_w_resources[0]
subnets = connection.get_all_subnets(filters={'vpc_id': vpc.id})
def subnet_data(s):
d = s.__dict__
del d["connection"]
del d["region"]
return d
data = map(subnet_data, subnets)
facts = {
'ec2_vpc': {
'id': vpc.id,
'subnets': data
}
}
module.exit_json(changed=False, ansible_facts=facts)
main()
|
ecolitan/projecteuler-answers
|
Multiples_of_3_and_5.py
|
Python
|
gpl-2.0
| 260
| 0.015385
|
#If we list all the natural numbers below 10 that are multip
|
les of 3 or 5, we get 3, 5, 6 and 9. The sum of these multiples is 23.
#Find the sum of all the multiples of 3 or 5 below 1000.
print sum([x for x in xr
|
ange(1,1000) if (x % 3 == 0) or (x % 5 == 0)])
|
ISS-Mimic/Mimic
|
Pi/kivytest/Test_Kivy.py
|
Python
|
mit
| 225
| 0.026667
|
import os
os.environ['KIVY_GL_BA
|
CKEND'] = 'gl' #need this to fix a kivy segfault that occurs with python3 for some reason
from kivy.app import App
class TestApp(App):
pa
|
ss
if __name__ == '__main__':
TestApp().run()
|
felipenaselva/felipe.repository
|
script.module.cryptopy/lib/crypto/cipher/rijndael.py
|
Python
|
gpl-2.0
| 14,723
| 0.051484
|
# -*- coding: iso-8859-1 -*-
""" crypto.cipher.rijndael
Rijndael encryption algorithm
This byte oriented implementation is intended to closely
match FIPS specification for readability. It is not implemented
for performance.
Copyright © (c) 2002 by Paul A. Lambert
Read LICENSE.txt for license information.
2002-06-01
"""
from crypto.cipher.base import BlockCipher, padWithPadLen, noPadding
class Rijndael(BlockCipher):
""" Rijndael encryption algorithm """
def __init__(self, key = None, padding = padWithPadLen(), keySize=16, blockSize=16 ):
self.name = 'RIJNDAEL'
self.keySize = keySize
self.strength = keySize*8
self.blockSize = blockSize # blockSize is in bytes
self.padding = padding # change default to noPadding() to get normal ECB behavior
assert( keySize%4==0 and NrTable[4].has_key(keySize/4)),'key size must be 16,20,24,29 or 32 bytes'
assert( blockSize%4==0 and NrTable.has_key(blockSize/4)), 'block size must be 16,20,24,29 or 32 bytes'
self.Nb = self.blockSize/4 # Nb is number of columns of 32 bit words
self.Nk = keySize/4 # Nk is the key length in 32-bit words
self.Nr = NrTable[self.Nb][self.Nk] # The number of rounds (Nr) is a function of
# the block (Nb) and key (Nk) sizes.
if key != None:
self.setKey(key)
def setKey(self, key):
""" Set a key and generate the expanded key """
assert( len(key) == (self.Nk*4) ), 'Key length must be same as keySize parameter'
self.__expandedKey = keyExpansion(self, key)
self.reset() # BlockCipher.reset()
def encryptBlock(self, plainTextBlock):
""" Encrypt a block, plainTextBlock must be a array of bytes [Nb by 4] """
self.state = self._toBlock(plainTextBlock)
AddRoundKey(self, self.__expandedKey[0:self.Nb])
for round in range(1,self.Nr): #for round = 1 step 1 to Nr1
SubBytes(self)
ShiftRows(self)
MixColumns(self)
AddRoundKey(self, self.__expandedKey[round*self.Nb:(round+1)*self.Nb])
SubBytes(self)
ShiftRows(self)
AddRoundKey(self, self.__expandedKey[self.Nr*self.Nb:(self.Nr+1)*self.Nb])
return self._toBString(self.state)
def decryptBlock(self, encryptedBlock):
""" decrypt a block (array of bytes) """
self.state = self._toBlock(encryptedBlock)
AddRoundKey(self, self.__expandedKey[self.Nr*self.Nb:(self.Nr+1)*self.Nb])
for round in range(self.Nr-1,0,-1):
InvShiftRows(self)
InvSubBytes(self)
AddRoundKey(self, self.__expandedKey[round*self.Nb:(round+1)*self.Nb])
InvMixColumns(self)
InvShiftRows(self)
InvSubBytes(self)
AddRoundKey(self, self.__expandedKey[0:self.Nb])
return self._toBString(self.state)
def _toBlock(self, bs):
""" Convert binary string to array of bytes, state[col][row]"""
assert ( len(bs) == 4*self.Nb ), 'Rijndarl blocks must be of size blockSize'
return [[ord(bs[4*i]),ord(bs[4*i+1]),ord(bs[4*i+2]),ord(bs[4*i+3])] for i in range(self.Nb)]
def _toBString(self, block):
""" Convert block (array of bytes) to binary string """
l = []
for col in block:
for rowElement in col:
l.append(chr(rowElement))
return ''.join(l)
#-------------------------------------
""" Number of rounds Nr = NrTable[Nb][Nk]
Nb Nk=4 Nk=5 Nk=6 Nk=7 Nk=8
------------------------------------- """
NrTable = {4: {4:10, 5:11, 6:12, 7:13, 8:14},
5: {4:11, 5:11, 6:12, 7:13, 8:14},
6: {4:12, 5:12, 6:12, 7:13, 8:14},
7: {4:13, 5:13, 6:13, 7:13, 8:14},
8: {4:14, 5:14, 6:14, 7:14, 8:14}}
#-------------------------------------
def keyExpansion(algInstance, keyString):
""" Expand a string of size keySize into a larger array """
Nk, Nb, Nr = algInstance.Nk, algInstance.Nb, algInstance.Nr # for readability
key = [ord(byte) for byte in keyString] # convert string to list
w = [[key[4*i],key[4*i+1],key[4*i+2],key[4*i+3]] for i in range(Nk)]
for i in range(Nk,Nb*(
|
Nr+1)):
temp = w[i-1] # a four byte column
if (i%Nk) == 0 :
temp = temp[1:]+[temp[0]] # RotWord(temp)
temp = [ Sbox[byte] for byte in temp ]
temp[0] ^= Rcon[i/Nk]
elif Nk > 6 and i%Nk == 4 :
temp = [ Sbox[by
|
te] for byte in temp ] # SubWord(temp)
w.append( [ w[i-Nk][byte]^temp[byte] for byte in range(4) ] )
return w
Rcon = (0,0x01,0x02,0x04,0x08,0x10,0x20,0x40,0x80,0x1b,0x36, # note extra '0' !!!
0x6c,0xd8,0xab,0x4d,0x9a,0x2f,0x5e,0xbc,0x63,0xc6,
0x97,0x35,0x6a,0xd4,0xb3,0x7d,0xfa,0xef,0xc5,0x91)
#-------------------------------------
def AddRoundKey(algInstance, keyBlock):
""" XOR the algorithm state with a block of key material """
for column in range(algInstance.Nb):
for row in range(4):
algInstance.state[column][row] ^= keyBlock[column][row]
#-------------------------------------
def SubBytes(algInstance):
for column in range(algInstance.Nb):
for row in range(4):
algInstance.state[column][row] = Sbox[algInstance.state[column][row]]
def InvSubBytes(algInstance):
for column in range(algInstance.Nb):
for row in range(4):
algInstance.state[column][row] = InvSbox[algInstance.state[column][row]]
Sbox = (0x63,0x7c,0x77,0x7b,0xf2,0x6b,0x6f,0xc5,
0x30,0x01,0x67,0x2b,0xfe,0xd7,0xab,0x76,
0xca,0x82,0xc9,0x7d,0xfa,0x59,0x47,0xf0,
0xad,0xd4,0xa2,0xaf,0x9c,0xa4,0x72,0xc0,
0xb7,0xfd,0x93,0x26,0x36,0x3f,0xf7,0xcc,
0x34,0xa5,0xe5,0xf1,0x71,0xd8,0x31,0x15,
0x04,0xc7,0x23,0xc3,0x18,0x96,0x05,0x9a,
0x07,0x12,0x80,0xe2,0xeb,0x27,0xb2,0x75,
0x09,0x83,0x2c,0x1a,0x1b,0x6e,0x5a,0xa0,
0x52,0x3b,0xd6,0xb3,0x29,0xe3,0x2f,0x84,
0x53,0xd1,0x00,0xed,0x20,0xfc,0xb1,0x5b,
0x6a,0xcb,0xbe,0x39,0x4a,0x4c,0x58,0xcf,
0xd0,0xef,0xaa,0xfb,0x43,0x4d,0x33,0x85,
0x45,0xf9,0x02,0x7f,0x50,0x3c,0x9f,0xa8,
0x51,0xa3,0x40,0x8f,0x92,0x9d,0x38,0xf5,
0xbc,0xb6,0xda,0x21,0x10,0xff,0xf3,0xd2,
0xcd,0x0c,0x13,0xec,0x5f,0x97,0x44,0x17,
0xc4,0xa7,0x7e,0x3d,0x64,0x5d,0x19,0x73,
0x60,0x81,0x4f,0xdc,0x22,0x2a,0x90,0x88,
0x46,0xee,0xb8,0x14,0xde,0x5e,0x0b,0xdb,
0xe0,0x32,0x3a,0x0a,0x49,0x06,0x24,0x5c,
0xc2,0xd3,0xac,0x62,0x91,0x95,0xe4,0x79,
0xe7,0xc8,0x37,0x6d,0x8d,0xd5,0x4e,0xa9,
0x6c,0x56,0xf4,0xea,0x65,0x7a,0xae,0x08,
0xba,0x78,0x25,0x2e,0x1c,0xa6,0xb4,0xc6,
0xe8,0xdd,0x74,0x1f,0x4b,0xbd,0x8b,0x8a,
0x70,0x3e,0xb5,0x66,0x48,0x03,0xf6,0x0e,
0x61,0x35,0x57,0xb9,0x86,0xc1,0x1d,0x9e,
0xe1,0xf8,0x98,0x11,0x69,0xd9,0x8e,0x94,
0x9b,0x1e,0x87,0xe9,0xce,0x55,0x28,0xdf,
0x8c,0xa1,0x89,0x0d,0xbf,0xe6,0x42,0x68,
0x41,0x99,0x2d,0x0f,0xb0,0x54,0xbb,0x16)
InvSbox = (0x52,0x09,0x6a,0xd5,0x30,0x36,0xa5,0x38,
0xbf,0x40,0xa3,0x9e,0x81,0xf3,0xd7,0xfb,
0x7c,0xe3,0x39,0x82,0x9b,0x2f,0xff,0x87,
0x34,0x8e,0x43,0x44,0xc4,0xde,0xe9,0xcb,
0x54,0x7b,0x94,0x32,0xa6,0xc2,0x23,0x3d,
0xee,0x4c,0x95,0x0b,0x42,0xfa,0xc3,0x4e,
0x08,0x2e,0xa1,0x66,0x28,0xd9,0x24,0xb2,
0x76,0x5b,0xa2,0x49,0x6d,0x8b,0xd1,0x25,
0x72,0xf8,0xf6,0x64,0x86,0x68,0x98,0x16,
0xd4,0xa4,0x5c,0xcc,0x5d,0x65,0xb6,0x92,
0x6c,0x70,0x48,0x50,0xfd,0xed,0xb9,0xda,
0x5e,0x15,0x46,0x57,0xa7,0x8d,0x9d,0x84,
0x90,0xd8,0xab,0x00,0x8c,0xbc,0xd3,0x0a,
0xf7,0xe4,0x58,0x05,0xb8,0xb3,0x45,0x06,
0xd0,0x2c,0x1e,0x8f,0xca,0x3f,0x0f,0x02,
0xc1,0xaf,0xbd,0x03,0x01,0x13,0
|
metomi/rose
|
demo/rose-config-edit/demo_meta/app/05-validate/meta/lib/python/macros/null.py
|
Python
|
gpl-3.0
| 765
| 0
|
#!/usr/bin/env python3
# Copyright (C) British Crown (Met Office) & Contributors.
# -----------------------------------------------------------------------------
import metomi.rose.macro
class NullChecker(metomi.rose.macro.MacroBase):
|
"""Class to report errors for missing or null settings."""
REPORTS_INFO = [
(None, None, None, "Warning for null section, null option"),
("made", "up", None, "Warning for non-data & non-metadata setting"),
]
def validate(self, config, meta_config):
"""Validate meaningless settings."""
self.reports = []
for section, option, value, message in self.REPORTS_INFO:
self.add_report(section, option, va
|
lue, message, is_warning=True)
return self.reports
|
gvnn/memd
|
setup.py
|
Python
|
mit
| 208
| 0.004808
|
#!/usr/bin/env pyth
|
on
from distutils.core import setup
setup(
name='memd',
version='0.0.1',
url='https://github.com/gvnn/memd',
packages=['memd'],
install_requires=['python-mem
|
cached']
)
|
AdrianGaudebert/socorro
|
alembic/versions/495bf3fcdb63_fix_invalid_notice_in_update_signatures_hourly.py
|
Python
|
mpl-2.0
| 692
| 0.011561
|
"""fix invalid RAISE NOTICE in update_signatures_hourly.
Revision ID: 495bf3fcdb63
Revises: 3f007539efc
Create Date: 2014-07-07 20:33:34.634141
"""
# revision identifiers, used by Alembic.
revision = '495bf3fcdb63'
down_revision = '1baef149e5d1'
from alembic import op
from socorro.lib import citexttype, j
|
sontype, buildtype
from socorro.lib.migrations import fix_permissions, load_stored_proc
import sqlalchemy as sa
from sqlalchemy import types
from sqlalchemy.dialects import postgresql
from sqlalchemy.sql import table, column
|
def upgrade():
load_stored_proc(op, ['update_signatures_hourly.sql'])
def downgrade():
load_stored_proc(op, ['update_signatures_hourly.sql'])
|
Sofia2/python-api
|
src/ssap/utils/strings.py
|
Python
|
apache-2.0
| 334
| 0.015015
|
# -*- codin
|
g: utf8 -*-
'''
Python SSAP API
Version 1.5
© Indra Sistemas, S.A.
2014 SPAIN
All rights reserved
'''
import sys
def bytes2String(data):
'''
Converts a Python 3 bytes object to a string.
'''
if sys.version_info[0] < 3:
return data
else:
return da
|
ta.decode("utf-8")
|
wojons/transcribe
|
helpers/workerBase.py
|
Python
|
mit
| 4,287
| 0.013529
|
import time, types, os, sys, signal
import multiprocessing
"""
This class is simple enough to allow workers which take incomming log lines and do things with them.
I really dont know what people will want to do with there logs and how they will want to output them
but this is where they will be able to control the output system.
"""
workers = {}
def workerTarget(*args, **kw):
global workers
method_name = args[0]
return getattr(workers[args[0]], 'worker_loop')(*args[1:])
class workerBase(object):
def __init__(self, name="master", pool_size=0, queue_size=1024):
self.queue_size, self.pool_size, self.name = queue_size, pool_size, name
self.queue_active = True
self.workers_stopped = False
signal.signal(signal.SIGINT, self.signal_handler) #shut down hoandler
signal.signal(signal.SIGTERM, self.signal_handler) #shut down hoandler
if self.pool_size == 0: #if no size is set lets use the total number of processses the system have
self.pool_size = multiprocessing.cpu_count()
global workers
workers[name] = self
def run(self):
if isinstance (self.worker_loop, types.MethodType):
args = list()
args.insert(0, self.name)
self.queue = multiprocessing.Queue(self.queue_size)
self.pool = multiprocessing.Pool(self.pool_size)
for x in range(0, self.pool_size): #avoid needing to run map but still get all the workers to start up
self.pool.apply_async(workerTarget, args)
return self
def queue_line(self, entry, metadata): #put the data in the queue
if self.queue_active == True:
try:
self.queue.put([entry, metadata], False) #should come up with a better method that the server will wait on false and try to queue there
except Queue.Full, e:
print (str(e))
except Exception, e:
sys.stderr.write("queue_line: "+str(e)+"\n")
else:
return False
def worker_stop(self):
self.queue_active = False
self.stop_loop = True
if self.workers_stopped == False:
while self.stop_loop == True:
if self.queue.empty == False:
time.sleep(1)
sys.stderr.write("Waiting for queue: "+queue+" to reach 0, currntly at "+str(self.queue.qsize()))
else:
try:
self.queue.close() # close the queue now since its empty
except:
pass
sys.stderr.write("Giving the workers a little more time to finish there last task\n")
self.stop_loop = False
self.workers_stopped = False
time.sleep(2)
try:
sys.stderr.write("Closing pool\n")
self.pool.close()
sys.stderr.write("after pool close\n")
finally:
sys.stderr.write("")
exit()
sys.stderr.write("")
exit(False)
def worker_loop(self): #to simplyfiy things this is the loop that feeds the data into the worker so users just need to handle data entry or what ever
while self.queue.empty == False or self.workers_stopped == False:
try:
#sys.stderr.write("Queue size: "+str(self.queue.qsize())+" @ "+str(time.time())+"\n")
todo = self.queue.get()
|
#print sys.stderr.write("Queue object: "+str(todo)+"\n")
|
self.worker(todo[0], todo[1])
#time.sleep(1)
except Queue.Empty, e:
print (str(e))
time.sleep(1)
except Exception, e:
sys.stderr.write("worker_loop: "+str(e)+"\n")
exit()
return True
def worker(self, entry, metadata):
raise NotImplementedError( "Write a method that gets run as a callback once for every log entry worker(self, entry, metadata)" )
def signal_handler(self, signal, frame):
self.worker_stop()
|
joharei/QtChordii
|
utils/__init__.py
|
Python
|
gpl-3.0
| 21
| 0
|
__a
|
uthor__ =
|
'johan'
|
mlperf/training_results_v0.6
|
Fujitsu/benchmarks/resnet/implementations/mxnet/3rdparty/tvm/topi/tests/python/test_topi_upsampling.py
|
Python
|
apache-2.0
| 2,484
| 0.002818
|
"""Test code for upsampling"""
import numpy as np
import tvm
import topi
import topi.testing
import math
def verify_upsampling(batch, in_channel, in_height, in_width, scale, layout='NCHW', method="NEAREST_NEIGHBOR"):
if layout == 'NCHW':
A = tvm.placeholder((batch, in_channel, in_height, in_width), name='A')
dtype = A.dtype
out_shape = (batch, in_channel, in_height*scale, in_width*scale)
a_np = np.random.uniform(size=(batch, in_channel, in_height, in_width)).astype(dtype)
elif layout == 'NHWC':
A = tvm.placeholder((batch, in_height, in_width, in_channel), name='A')
dtype = A.dtype
out_shape = (batch, in_height*scale, in_width*scale, in_channel)
a_np = np.random.uniform(size=(batch, in_height, in_width, in_channel)).astype(dtype)
else:
raise NotImplementedError(
'Layout not supported {} '.format(layout))
B = topi.nn.upsampling(A, scale, layout=layout, method=method)
if method == "BILINEAR":
out_size = (in_height*scale, in_width*scale)
b_np = topi.testing.bilinear_resize_python(a_np, out_size, layout)
else:
b_np = topi.testing.upsampling_python(a_np, scale, layout)
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
s = topi.generic.schedule_injective(B)
a = tvm.nd.array(a_np, ctx)
b = tvm.nd.array(np.zeros(out_shape, dtype=dtype), ctx)
f = tvm.build(s, [A, B], device)
f(a, b)
np.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5, atol=1e-5)
for device in ['llvm', 'cuda', 'vulkan', 'nvptx']:
check_device(device)
def test_upsampling():
# NEAREST_NEIGHBOR - NCHW
verify_upsampling(8, 16, 32, 32, 2)
verify_upsampling(12, 32, 64, 64, 3)
# NEAREST_NEIGHBOR - NHWC
verify_upsampling(8, 16, 32, 32, 2, layout="NHWC")
verify_upsampling(12, 32, 64, 64, 3, layout="NHWC")
# BI
|
LINEAR - NCHW
verify_upsampling(2, 2, 32, 32, 2, method="BILINEAR")
verify_upsampling(2, 2, 32, 32, 3, method="BILINEAR")
# BILINEAR - NHWC
verify_upsampling(2, 2, 32, 32, 2, layout="NHWC", method="BILINEAR")
verify_upsampling(2, 2, 32, 32, 3, layout="NHWC", method="BILINEAR")
if __name__ == "__ma
|
in__":
test_upsampling()
|
sniemi/SamPy
|
cosmology/cc.py
|
Python
|
bsd-2-clause
| 6,151
| 0.017396
|
import sys
from math import *
if __name__ == '__main__':
try:
if sys.argv[1] == '-h':
print '''Cosmology calculator ala Ned Wright (www.astro.ucla.edu/~wright)
input values = redshift, Ho, Omega_m, Omega_vac
ouput values = age at z, distance in Mpc, kpc/arcsec, apparent to abs mag conversion
Options: -h for this message
-v for verbose response '''
sys.exit()
if sys.argv[1] == '-v':
verbose=1
length=len(sys.argv)-1
else:
verbose=0
length=len(sys.argv)
# if no values, assume Benchmark Model, input is z
if length == 2:
if float(sys.argv[1+verbose]) > 100:
z=float(sys.argv[1+verbose])/299790. # velocity to redshift
else:
z=float(sys.argv[1+verbose]) # redshift
H0 = 75 # Hubble constant
WM = 0.3 # Omega(matter)
WV = 1.0 - WM - 0.4165/(H0*H0) # Omega(vacuum) or lambda
# if one value, assume Benchmark Model with given Ho
elif length == 3:
z=float(sys.argv[1+verbose]) # redshift
H0 = float(sys.argv[2+verbose]) # Hubble constant
WM = 0.3 # Omega(matter)
WV = 1.0 - WM - 0.4165/(H0*H0) # Omega(vacuum) or lambda
# if Univ is Open, use Ho, Wm and set Wv to 0.
elif length == 4:
z=float(sys.argv[1+verbose]) # redshift
H0 = float(sys.argv[2+verbose]) # Hubble constant
WM = float(sys.argv[3+verbose]) # Omega(matter)
WV = 0.0 # Omega(vacuum) or lambda
# if Univ is General, use Ho, Wm and given Wv
elif length == 5:
z=float(sys.argv[1+verbose]) # redshift
H0 = float(sys.argv[2+verbose]) # Hubble constant
WM = float(sys.argv[3+verbose]) # Omega(matter)
WV = float(sys.argv[4+verbose]) # Omega(vacuum) or lambda
# or else fail
else:
print 'need some values or too many values'
sys.exit()
# initialize constants
WR = 0. # Omega(radiation)
WK = 0. # Omega curvaturve = 1-Omega(total)
c = 299792.458 # velocity of light in km/sec
Tyr = 977.8 # coefficent for converting 1/H into Gyr
DTT = 0.5 # time from z to now in units of 1/H0
DTT_Gyr = 0.0 # value of DTT in Gyr
age = 0.5 # age of Universe in units of 1/H0
age_Gyr = 0.0 # value of age in Gyr
zage = 0.1 # age of Universe at redshift z in units of 1/H0
zage_Gyr = 0.0 # value of zage in Gyr
DCMR = 0.0 # comoving radial distance in units of c/H0
DCMR_Mpc = 0.0
DCMR_Gyr = 0.0
DA = 0.0 # angular size distance
DA_Mpc = 0.0
DA_Gyr = 0.0
kpc_DA = 0.0
DL = 0.0 # luminosity distance
DL_Mpc = 0.0
DL_Gyr = 0.0 # DL in units of billions of light years
V_Gpc = 0.0
a = 1.0 # 1/(1+z), the scale factor of the Universe
az = 0.5 # 1/(1+z(object))
h = H0/100.
WR = 4.165E-5/(h*h) # includes 3 massless neutrino species, T0 = 2.72528
WK = 1-WM-WR-WV
az = 1.0/(1+1.0*z)
age = 0.
n=1000 # number of points in integrals
for i in range(n):
a = az*(i+0.5)/n
adot = sqrt(WK+(WM/a)+(WR/(a*a))+(WV*a*a))
age = age + 1./adot
zage = az*age/n
zage_Gyr = (Tyr/H0)
|
*zage
DTT = 0.0
DCMR = 0.0
# do integral over a=1/(1+z) from az to 1 in n steps, midpoin
|
t rule
for i in range(n):
a = az+(1-az)*(i+0.5)/n
adot = sqrt(WK+(WM/a)+(WR/(a*a))+(WV*a*a))
DTT = DTT + 1./adot
DCMR = DCMR + 1./(a*adot)
DTT = (1.-az)*DTT/n
DCMR = (1.-az)*DCMR/n
age = DTT+zage
age_Gyr = age*(Tyr/H0)
DTT_Gyr = (Tyr/H0)*DTT
DCMR_Gyr = (Tyr/H0)*DCMR
DCMR_Mpc = (c/H0)*DCMR
# tangential comoving distance
ratio = 1.00
x = sqrt(abs(WK))*DCMR
if x > 0.1:
if WK > 0:
ratio = 0.5*(exp(x)-exp(-x))/x
else:
ratio = sin(x)/x
else:
y = x*x
if WK < 0: y = -y
ratio = 1. + y/6. + y*y/120.
DCMT = ratio*DCMR
DA = az*DCMT
DA_Mpc = (c/H0)*DA
kpc_DA = DA_Mpc/206.264806
DA_Gyr = (Tyr/H0)*DA
DL = DA/(az*az)
DL_Mpc = (c/H0)*DL
DL_Gyr = (Tyr/H0)*DL
# comoving volume computation
ratio = 1.00
x = sqrt(abs(WK))*DCMR
if x > 0.1:
if WK > 0:
ratio = (0.125*(exp(2.*x)-exp(-2.*x))-x/2.)/(x*x*x/3.)
else:
ratio = (x/2. - sin(2.*x)/4.)/(x*x*x/3.)
else:
y = x*x
if WK < 0: y = -y
ratio = 1. + y/5. + (2./105.)*y*y
VCM = ratio*DCMR*DCMR*DCMR/3.
V_Gpc = 4.*pi*((0.001*c/H0)**3)*VCM
if verbose == 1:
print 'For H_o = ' + '%1.1f' % H0 + ', Omega_M = ' + '%1.2f' % WM + ', Omega_vac = ',
print '%1.2f' % WV + ', z = ' + '%1.3f' % z
print 'It is now ' + '%1.1f' % age_Gyr + ' Gyr since the Big Bang.'
print 'The age at redshift z was ' + '%1.1f' % zage_Gyr + ' Gyr.'
print 'The light travel time was ' + '%1.1f' % DTT_Gyr + ' Gyr.'
print 'The comoving radial distance, which goes into Hubbles law, is',
print '%1.1f' % DCMR_Mpc + ' Mpc or ' + '%1.1f' % DCMR_Gyr + ' Gly.'
print 'The comoving volume within redshift z is ' + '%1.1f' % V_Gpc + ' Gpc^3.'
print 'The angular size distance D_A is ' + '%1.1f' % DA_Mpc + ' Mpc or',
print '%1.1f' % DA_Gyr + ' Gly.'
print 'This gives a scale of ' + '%.2f' % kpc_DA + ' kpc/".'
print 'The luminosity distance D_L is ' + '%1.1f' % DL_Mpc + ' Mpc or ' + '%1.1f' % DL_Gyr + ' Gly.'
print 'The distance modulus, m-M, is '+'%1.2f' % (5*log10(DL_Mpc*1e6)-5)
else:
print '%1.2f' % zage_Gyr,
print '%1.2f' % DCMR_Mpc,
print '%1.2f' % kpc_DA,
print '%1.2f' % (5*log10(DL_Mpc*1e6)-5)
except IndexError:
print 'need some values or too many values'
except ValueError:
print 'nonsense value or option'
|
gabinetedigital/gd
|
gd/govpergunta/__init__.py
|
Python
|
agpl-3.0
| 11,253
| 0.00329
|
# -*- coding:utf-8 -*-
#
# Copyright (C) 2011 Governo do Estado do Rio Grande do Sul
#
# Author: Lincoln de Sousa <lincoln@gg.rs.gov.br>
# Author: Rodrigo Sebastiao da Rosa <rodrigo-rosa@procergs.rs.gov.br>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Web application definitions to the govp tool"""
from json import loads
from flask import Blueprint, render_template, redirect, current_app
from gd.utils.gdcache import cache, fromcache, tocache, removecache
from gd import auth
from gd.content.wp import wordpress #, gallery
from gd.utils import msg, format_csrf_error, dumps, twitts
from gd.govpergunta.forms import ContribForm
from gd.model import Contrib, session
# from gd.govpergunta.pairwise import Pairwise
THEMES = {'cuidado': u'Cuidado Integral',
'familia': u'Saúde da Família',
'emergencia': u'Urgência e Emergência',
'medicamentos': u'Acesso a Medicamentos',
'regional': u'Saúde na sua Região'}
govpergunta = Blueprint(
'govpergunta', __name__,
template_folder='templates',
static_folder='static')
# @govpergunta.route('/contribuir')
# def index():
# """Renders the index template"""
# form = ContribForm()
# return render_template('govpergunta.html', wp=wordpress, form=form)
# def _get_pairwise():
# """Helper function to get the pairwise instance saved in the
# session"""
# if ('pairwise' not in fsession) or \
# (fsession['version'] != PAIRWISE_VERSION):
# fsession['pairwise'] = Pairwise()
# fsession['version'] = PAIRWISE_VERSION
# return fsession['pairwise']
# @govpergunta.route('/')
# def index():
# pairwise = _get_pairwise()
# pair = pairwise.get_pair()
# fsession.modified = True
# return render_template(
# 'vote.html',
# pair=pair,
# theme=THEMES[pair['left'].theme]
# )
# @govpergunta.route('/invalidate')
# def invalidate():
# """With 50 votes, the user will be redirected to the index page and
# it's pairwise session will be destroied"""
# del fsession['pairwise']
# return redirect(url_for('index'))
# @govpergunta.route('/add_vote', methods=('POST',))
# def add_vote():
# if ('pairwise' not in fsession) or \
# (fsession['version'] != PAIRWISE_VERSION):
# return redirec
|
t(url_for('.index'))
# pairwise = fsession['pairwise']
# try:
# pairwise.vote(
# request.values.get('direction'),
# request.values.get('token'))
# fsession.modified = True
#
|
except InvalidTokenError:
# pass
# return redirect(url_for('.index'))
@govpergunta.route('/')
def index():
return redirect('/govpergunta/resultados/')
# pagination, posts = wordpress.getPostsByTag(
# tag='governador-pergunta')
# images = gallery.search('GovernadorPergunta', limit=24)[::-1]
# videos = [wordpress.wpgd.getVideo(i) for i in (14, 16, 12)]
# return render_template(
# 'results.html', posts=posts, images=images, videos=videos)
@govpergunta.route('/resultados/')
@govpergunta.route('/resultados/<int:ano>/')
# @cache.memoize()
def resultados(ano=2012):
"""Renders a wordpress page special"""
cn = 'results-{0}'.format(ano)
slideshow = fromcache(cn) or tocache(cn,wordpress.getRecentPosts(
category_name='destaque-govpergunta-%s' % str(ano),
post_status='publish',
numberposts=4,
thumbsizes=['slideshow']))
categoria = 'resultados-gov-pergunta-%s' % str(ano)
retorno = fromcache("contribs-{0}".format(ano)) or \
tocache("contribs-{0}".format(ano) ,wordpress.wpgovp.getContribuicoes(principal='S',category=categoria))
menus = fromcache('menuprincipal') or tocache('menuprincipal', wordpress.exapi.getMenuItens(menu_slug='menu-principal') )
try:
twitter_hash_cabecalho = twitts()
except KeyError:
twitter_hash_cabecalho = ""
questions = None
for q in retorno:
if isinstance(q, list):
questions = q
return render_template(
'resultados.html',
menu=menus,
questions=questions,
sidebar=wordpress.getSidebar,
twitter_hash_cabecalho=twitter_hash_cabecalho,
ano=ano,
slideshow=slideshow,
wp=wordpress
)
@govpergunta.route('/resultados-detalhe/<int:postid>/')
# @cache.memoize()
def resultado_detalhe(postid):
"""Renders a contribution detail"""
principal = fromcache("res-detalhe-{0}".format(postid)) or \
tocache("res-detalhe-{0}".format(postid),wordpress.wpgovp.getContribuicoes(principal='S',postID=postid))
# print "PRINCIPAL +++++++++++++++++++++", principal[1][0]
retorno = fromcache("contribs-detalhe-{0}".format(postid)) or \
tocache("contribs-detalhe-{0}".format(postid),wordpress.wpgovp.getContribuicoes(principal='N',postID=postid))
# print "RETORNO +++++++++++++++++++++", retorno
comts = fromcache("com-res-detalhe-{0}".format(postid)) or \
tocache("com-res-detalhe-{0}".format(postid),wordpress.getComments(status='approve',post_id=postid))
qtd = retorno[0]
detalhes = retorno[1]
return render_template(
'resultados-detalhes.html',
agregadas=detalhes,
qtd_agregadas=qtd,
principal=principal[1][0],
comments=comts,
postid=postid
)
@govpergunta.route('/results/<path:path>')
# @cache.memoize()
def results_page(path):
page = fromcache("page-{0}".format(path)) or tocache("page-{0}".format(path),wordpress.getPageByPath(path))
return render_template('results_page.html', page=page)
@govpergunta.route('/contrib_json', methods=('POST',))
def contrib_json():
"""Receives a user contribution and saves to the database
This function will return a JSON format with the result of the
operation. That can be successful or an error, if it finds any
problem in data received or the lack of the authentication.
"""
if not auth.is_authenticated():
return msg.error(_(u'User not authenticated'))
raise Exception('Not funny')
form = ContribForm(csrf_enabled=False)
if form.validate_on_submit():
Contrib(
title=form.data['title'].encode('utf-8'),
content=form.data['content'].encode('utf-8'),
theme=form.data['theme'],
user=auth.authenticated_user())
session.commit()
# Returning the csrf
data = { 'data': _('Contribution received successful') }
data.update({ 'csrf': form.csrf.data })
return msg.ok(data)
else:
return format_csrf_error(form, form.errors, 'ValidationError')
# -- JSON API that publishes contributions
def _format_contrib(contrib):
"""Returns a dictionary representation of a contribution"""
return {
'id': contrib.id,
'title': contrib.title,
'content': contrib.content,
'creation_date': contrib.creation_date,
'theme': contrib.theme,
'moderation': contrib.moderation,
}
@govpergunta.route('/contribs/all.json')
# @cache.cached()
def contribs_all():
"""Lists all contributions in the JSON format"""
r = fromcache("contribs_all_") or tocache("contribs_all_",dumps([
_format_contrib(i)
for i in Contrib.query.filter_by(status=True)]))
return r
@govpergunta.route('/contribs/user.json')
# @cache.cached()
def contribs_user():
"""Lists all contributions in the JSON format"""
try:
|
grahambell/taco-python
|
doc/conf.py
|
Python
|
gpl-3.0
| 8,346
| 0.005991
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Taco Module for Python documentation build configuration file, created by
# sphinx-quickstart on Mon Feb 17 16:17:48 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../lib'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Taco Module for Python'
copyright = '2014, Graham Bell'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1.0'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module
|
index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Co
|
pyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'TacoModuleforPythondoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'TacoModuleforPython.tex', 'Taco Module for Python Documentation',
'Graham Bell', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'tacomoduleforpython', 'Taco Module for Python Documentation',
['Graham Bell'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'TacoModuleforPython', 'Taco Module for Python Documentation',
'Graham Bell', 'TacoModuleforPython', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL address
|
hpleva/pyemto
|
pyemto/examples/alloy_discovery/make_alloy_final.py
|
Python
|
mit
| 8,645
| 0.014112
|
import pyemto
import numpy as np
import os
latpath = "../../../../" # Path do bmdl, kstr and shape directories
# each system need to have same number of alloy elements
#systems = [['Fe','Al'],['Fe','Cr']]
#systems = [['Fe'],['Al']]
systems = [['Al']]
#concentrations = [[0.5,0.5]]
concentrations = [[1.0]]
magn = "NM" # Possible NM (Non-magnetic), FM (ferromagnetic) and
# DLM (Disordered local moments)
initial_sws = 3.0
# Check that initialsws is correct format
if type(initial_sws) is float:
initial_sws = [initial_sws for x in range(3)]
elif type(initial_sws) is list:
pass
else:
print("ERROR: Initialsws should be float or list of 3 floats")
exit()
if not len(initial_sws) == 3:
print("ERROR: intialsws shoubd be a float or list of 3 floats!")
exit()
# Sanity checks
for s in systems:
if not len(s) == len(systems[0]):
print("Each system need to have same number of alloy elements!")
exit()
for c in concentrations:
if not len(c) == len(systems[0]):
print("Each given concetrations must have same number number as elements in system!")
exit()
# Next check magnetic states of system and initialize splits
splits = []
if magn == "FM":
afm = "F"
for s in systems:
splt = []
for atom in s:
if atom == "Fe":
splt.append(2.0)
else:
splt.append(0.5)
splits.append(splt)
elif magn == "DLM":
afm = "F"
# First duplicate each atoms and concetration
newsystems = []
newconcs = []
for i in range(len(systems)):
news = []
newc = []
splt = []
for j in range(len(systems[i])):
news.append(systems[i][j])
news.append(systems[i][j])
if systems[i][j] == "Fe":
splt.append( 2.0)
splt.append(-2.0)
else:
splt.append( 0.5)
splt.append(-0.5)
splits.append(splt)
newsystems.append(news)
systems = newsystems
for c in concentrations:
newc = []
for conc in c:
newc.append(conc)
newc.append(conc)
newconcs.append(newc)
concentrations = newconcs
elif magn == "NM":
afm = "P"
for s in systems:
splt = []
for atom in s:
splt.append(0.0)
splits.append(splt)
else:
print("Wrong magnetic state is given: " + magn)
print("Should be one of NM, FM or DLM!")
exit()
results = []
#We are ready to make inputs
for si in range(len(systems)):
s = systems[si]
split = splits[si]
# Create main directory
sname = ""
if magn == "DLM":
nlist = [s[i] for i in range(0,len(s),2)]
else:
nlist = s
for atom in nlist:
sname = sname + atom
#
# Make directories
if not os.path.lexists(sname):
os.makedirs(sname)
for c in concentrations:
sc_res = []
# Make subdirectory for concentration
cname = ""
count = 0
if magn == "DLM":
clist = [c[i] for i in range(0,len(c),2)]
else:
clist = c
for conc in clist:
count += 1
cname = cname +str(int(conc*1000)).zfill(4)
if not count == len(clist):
cname = cname+"-"
apath = os.path.join(sname,cname)
if not os.path.lexists(apath):
os.makedirs(apath)
# Make subdirectory for magnetic state
apath = os.path.join(apath,magn)
if not os.path.lexists(apath):
os.makedirs(apath)
# Construct base jobname
jobname = ""
for i in range(len(nlist)):
if jobname == "":
pass
else:
jobname = jobname + "_"
jobname = jobname + nlist[i].lower() + "%4.2f" % (clist[i])
finalname = jobname + "_final"
# BCC first
alloy = pyemto.System(folder=apath)
initialsws = initial_sws[0] # We need some clever way to get this
alloy.bulk(lat='bcc', jobname=jobname+"_bcc",atoms=s,concs=c,
latpath=latpath,sws=initialsws, xc='PBE')
swsrange = np.linspace(initialsws-0.1,initialsws+0.1,7) # A list of 7 different volumes
#alloy.lattice_constants_batch_generate(sws=swsrange)
sws0, B0, e0 = alloy.lattice_constants_analyze(sws=swsrange,prn=False)
sc_res.append([e0,B0,sws0])
alloy.bulk(lat='bcc',
jobname=finalname+"_bcc",
latpath=latpath,
sws=sws0,
atoms = s,
concs = c,
splts = split,
afm = afm,
amix=0.02,
efmix=0.9,
expan='M',
sofc='Y',
xc='PBE',
nky=21)
alloy.write_inputs()
# FCC second
alloy = pyemto.System(folder=apath)
initialsws = initial_sws[1] # We need some clever way to get this
alloy.bulk(lat='fcc', jobname=jobname+"_fcc",atoms=s,concs=c,
latpath=latpath,sws=initialsws, xc='PBE')
swsrange = np.linspace(initialsws-0.1,initialsws+0.1,7) # A list of 7 different volumes
sws0, B0, e0 = alloy.lattice_constants_analyze(sws=swsrange,prn=False)
sc_res.append([e0,B0,sws0])
alloy.bulk(lat='fcc',
jobname=finalname+"_fcc",
latpath=latpath,
sws=sws0,
atoms = s,
concs = c,
splts = split,
afm = afm,
amix=0.02,
efmix=0.9,
expan='M',
sofc='Y',
xc='PBE',
nky=21)
alloy.write_inputs()
# HCP last
alloy = pyemto.System(folder=apath)
initialsws = initial_sws[2] # We need some clever way to get this
alloy.bulk(lat='hcp',jobname=jobname,latpath=latpath,
sws=initialsws
|
, atoms = s,concs = c, xc='PBE')
swsrange = np.linspace(initialsws-0.1,initialsws+0.1,7) # A list of 7 different volumes
#alloy.lattice_constants_batch_generate(sws=swsrange)
sws0, c_over_a0, B0, e0, R0, cs0
|
= alloy.lattice_constants_analyze(sws=swsrange,prn=False)
alloy.sws = sws0
ca = round(c_over_a0,3)
sc_res.append([e0,B0,sws0,c_over_a0])
# Check is bmdl, kstr and kstr exsist with correct c over a
hcpname ="hcp_"+str(ca) # Structure name
strucpath = "../"
# Check if input files are in place
if os.path.exists(os.path.join(strucpath,hcpname+".bmdl")):
pass
else:
print("Making structures")
# make input files
alloy.lattice.set_values(jobname=hcpname,latpath="",
lat='hcp',kappaw=[0.0,-20.0],msgl=0,ca=ca,
dmax=2.2)
alloy.lattice.bmdl.write_input_file(folder=strucpath)
alloy.lattice.kstr.write_input_file(folder=strucpath)
alloy.lattice.shape.write_input_file(folder=strucpath)
alloy.lattice.batch.write_input_file(folder=strucpath)
# Make kfcd and kgrn input files
alloy.bulk(lat='hcp',
jobname=finalname+"_hcp",
latpath=latpath,
latname=hcpname,
sws=sws0,
ca= ca,
atoms = s,
concs = c,
splts = split,
afm = afm,
amix=0.02,
efmix=0.9,
expan='M',
sofc='Y',
xc='PBE',
nky=21,
nkz=17)
alloy.write_inputs()
results.append([[s,c],sc_res])
print("Results obtained:")
for r in results:
# Generate system name
sname = ""
for i in range(len(r[0][0])):
sna
|
robcarver17/pysystemtrade
|
systems/accounts/account_costs.py
|
Python
|
gpl-3.0
| 12,395
| 0.002098
|
import pandas as pd
from syscore.algos import calculate_weighted_average_with_nans
from syscore.genutils import str2Bool
from syscore.dateutils import ROOT_BDAYS_INYEAR
from syscore.pdutils import turnover
from sysquant.estimators.turnover import turnoverDataForTradingRule
from systems.system_cache import diagnostic, input
from systems.accounts.account_inputs import accountInputs
class accountCosts(accountInputs):
@diagnostic()
def get_SR_cost_for_instrument_forecast(
self, instrument_code: str, rule_variation_name: str
) -> float:
"""
Get the SR cost for a forecast/rule combination
:param instrument_code: instrument to get values for
:type instrument_code: str
:param rule_variation_name: rule to get values for
:type rule_variation_name: str
|
:returns: float
KEY OUTPUT
"""
transaction_cost = self.get_SR_transaction_cost_for_instrument_forecast(
instrument_code = instrument_code,
rule_variation_name = rule_variation_name
)
holding_cost = self.get_SR_holding_cost_only(instrument_code)
return transaction_cost + holding_cost
@diagnostic()
def get_SR_transaction_cost_for_instrument_forecast(
self, instrument_code: str, rule_vari
|
ation_name: str
) -> float:
"""
Get the SR cost for a forecast/rule combination
:param instrument_code: instrument to get values for
:type instrument_code: str
:param rule_variation_name: rule to get values for
:type rule_variation_name: str
:returns: float
KEY OUTPUT
"""
use_pooled_costs = str2Bool(
self.config.forecast_cost_estimates["use_pooled_costs"]
)
if use_pooled_costs:
SR_cost = self._get_SR_cost_for_rule_with_pooled_costs(
instrument_code, rule_variation_name
)
else:
SR_cost = self._get_SR_cost_of_rule_for_individual_instrument(
instrument_code, rule_variation_name
)
return SR_cost
@input
def _get_SR_cost_for_rule_with_pooled_costs(
self, instrument_code: str, rule_variation_name: str
) -> float:
instrument_code_list = self.has_same_rules_as_code(instrument_code)
SR_cost = self._get_SR_cost_instr_forecast_for_list(
instrument_code_list, rule_variation_name
)
return SR_cost
@diagnostic()
def _get_SR_cost_instr_forecast_for_list(
self, instrument_code_list: list, rule_variation_name: str
) -> float:
"""
Get the SR cost for a forecast/rule combination, averaged across multiple instruments
:param instrument_code_list: instrument to get values for
:type instrument_code: str
:param rule_variation_name: rule to get values for
:type rule_variation_name: str
:returns: float
"""
list_of_SR_cost = [
self._get_SR_cost_of_rule_for_individual_instrument(
instrument_code, rule_variation_name
)
for instrument_code in instrument_code_list
]
# weight by length
cost_weightings = self._get_forecast_length_weighting_for_list_of_instruments(
instrument_code_list, rule_variation_name
)
weighted_SR_costs = [
SR_cost * weight
for SR_cost, weight in zip(list_of_SR_cost, cost_weightings)
]
avg_SR_cost = sum(weighted_SR_costs)
return avg_SR_cost
@diagnostic()
def _get_forecast_length_weighting_for_list_of_instruments(
self, instrument_code_list: list, rule_variation_name: str
) -> list:
forecast_lengths = [
self._get_forecast_length_for_instrument_rule(
instrument_code, rule_variation_name
)
for instrument_code in instrument_code_list
]
total_length = float(sum(forecast_lengths))
weights = [
forecast_length / total_length for forecast_length in forecast_lengths
]
return weights
@diagnostic()
def _get_forecast_length_for_instrument_rule(
self, instrument_code: str, rule_variation_name: str
) -> int:
forecast = self.get_capped_forecast(instrument_code, rule_variation_name)
return len(forecast)
@diagnostic()
def _get_SR_cost_of_rule_for_individual_instrument(
self, instrument_code: str, rule_variation_name: str
) -> float:
# note the turnover may still be pooled..
turnover = self.forecast_turnover(instrument_code, rule_variation_name)
SR_cost = self.get_SR_cost_given_turnover(instrument_code, turnover)
return SR_cost
@diagnostic()
def get_SR_cost_given_turnover(
self, instrument_code: str, turnover: float
) -> float:
SR_cost_trading = self.get_SR_trading_cost_only_given_turnover(
instrument_code, turnover
)
SR_cost_holding = self.get_SR_holding_cost_only(instrument_code)
SR_cost = SR_cost_holding + SR_cost_trading
return SR_cost
def get_SR_trading_cost_only_given_turnover(
self, instrument_code: str, turnover: float
) -> float:
cost_per_trade = self.get_SR_cost_per_trade_for_instrument(instrument_code)
SR_cost_trading = turnover * cost_per_trade
return SR_cost_trading
def get_SR_holding_cost_only(self, instrument_code: str) -> float:
cost_per_trade = self.get_SR_cost_per_trade_for_instrument(instrument_code)
hold_turnovers = self.get_rolls_per_year(instrument_code) / 2.0
SR_cost_holding = hold_turnovers * cost_per_trade
return SR_cost_holding
@diagnostic()
def get_turnover_for_forecast_combination(
self, codes_to_use: list, rule_variation_name: str
) -> turnoverDataForTradingRule:
turnover_as_list = self._forecast_turnover_for_list_by_instrument(
codes_to_use, rule_variation_name=rule_variation_name
)
turnover_as_dict = dict(
[
(instrument_code, turnover)
for (instrument_code, turnover) in zip(codes_to_use, turnover_as_list)
]
)
turnover_data_for_trading_rule = turnoverDataForTradingRule(turnover_as_dict)
return turnover_data_for_trading_rule
@diagnostic()
def forecast_turnover(
self, instrument_code: str, rule_variation_name: str
) -> float:
use_pooled_turnover = str2Bool(
self.config.forecast_cost_estimates["use_pooled_turnover"]
)
if use_pooled_turnover:
turnover = self._forecast_turnover_pooled(
instrument_code, rule_variation_name
)
else:
turnover = self._forecast_turnover_for_individual_instrument(
instrument_code, rule_variation_name
)
return turnover
@diagnostic()
def _forecast_turnover_pooled(
self, instrument_code: str, rule_variation_name: str
) -> float:
instrument_code_list = self.has_same_rules_as_code(instrument_code)
turnover_for_SR = self._forecast_turnover_for_list(
instrument_code_list, rule_variation_name=rule_variation_name
)
return turnover_for_SR
@diagnostic()
def _forecast_turnover_for_list(
self, instrument_code_list: list, rule_variation_name: str
) -> float:
"""
Get the average turnover for a rule, over instrument_code_list
:param instrument_code_list: instruments to get values for
:type instrument_code_list: list of str
:param rule_variation_name: rule to get values for
:type rule_variation_name: str
:returns: float
"""
turnovers = self._forecast_turnover_for_list_by_instrument(
codes_to_use=instrument_code_list, rule_variation_name=rule_variation_name
)
# weight by length
weights = self._get_forecast_length_weighting_for_list_of_instrume
|
Nebucatnetzer/tamagotchi
|
pygame/lib/python3.4/site-packages/faker/providers/barcode/__init__.py
|
Python
|
gpl-2.0
| 761
| 0
|
# coding=utf-8
from __future__ import unicode_literals
from .. import BaseProvider
class Provider(BaseProvider):
def ean(self, length=13):
code = [self.random_digit() for i in range(length - 1)]
if length not in (8, 13):
raise AssertionError("length can only be 8 or 13")
if length == 8:
weights = [3, 1, 3, 1, 3, 1, 3]
elif length == 13:
weights = [1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3]
weighted_sum = sum([x * y for x, y in zip(code, weights)])
check_digit = (10 - weighted_sum % 10) % 10
|
code.append
|
(check_digit)
return ''.join(str(x) for x in code)
def ean8(self):
return self.ean(8)
def ean13(self):
return self.ean(13)
|
lambdamusic/testproject
|
konproj/urls.py
|
Python
|
gpl-2.0
| 2,206
| 0.047597
|
from django.conf.urls.defaults import *
from django.conf import settings
prefix = settings.URL_PREFIX
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.views.generic import RedirectView
urlpatterns = patterns('',) # init
# November 20, 2013
#
# We have three server types for now:
#
# 1) admin: shows only the admin, and live database (for stats)
#
# 2) local: shows everything, local database
#
# 3) live: shows only the site, and live database
#
#
# TODO: add a staging site + DB for testing features before live
if settings.ADMIN_SERVER:
# online admin: no other controllers are defined
from django.contrib import admin
admin.autodiscover()
urlpatterns += patterns('',
# standard admin urls
(r'^', include(admin.site.urls) ),
# April 8, 2014: hack to prevent 404 and 500 pages from throwing errors
url(r'^homepage$', RedirectView.as_view(url='/'), name='homepage'),
url(r'^contact$', RedirectView.as_view(url='/'), name='contact'),
)
else:
if settings.LOCAL_SERVER:
# load admin on LOCAL too, but on a sub-url path
from django.contrib import admin
admin.autodiscover()
# from myutils.adminextra import custom_admin_views
# from concepts.databrowse_load import *
urlpatterns += patterns('',
# Customized views for the application admin home
# (r'^'+prefix+'ad
|
min/(concepts/)$', custom_admin_views.concepts),
# (r'^'+prefix+'admin/contributions/$', poms_custom_admin_views.contributions),
# standard admin urls
(r'^'+prefix+'admin/', include(admin.site.urls) ),
# url(r'^'+prefix+'databrowse/(.*)', databrowse.s
|
ite.root, name='databrowsehome'),
)
# standard urls for LOCAL & LIVE
urlpatterns += patterns('',
# Registration app
(r'^registration/', include('registration.backends.default.urls')),
# Koncepts app
url(r'^', include('koncepts.urls')),
)
if settings.LOCAL_SERVER: # ===> static files on local machine
urlpatterns += staticfiles_urlpatterns()
urlpatterns += patterns('',
(r'^media/uploads/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),
)
|
Qix-/starfuse
|
starfuse/fs/mapped_file.py
|
Python
|
mit
| 7,231
| 0.001798
|
"""A pseudo-file mapped into memory
Provides better performance for frequent reads/writes,
and makes reading/writing easier via regions (windows)
of memory. Allows memory to be accessed via array reads/
writes as well.
"""
import mmap
import logging
log = logging.getLogger(__name__)
class ReadOnlyError(Exception):
"""The mapped file is flagged as read-only"""
def __init__(self, path):
super(ReadOnlyError, self).__init__('mapped file is flagged as read only: %s' % path)
class RegionOverflowError(Exception):
"""Data at an offset was requested but the offset was greater than the allocated size"""
def __init__(self, offset):
sup
|
er(RegionOverflowError, self).__init__('region overflow offset: %d (did you allocate?)' % offset)
class Region(object):
"""A virtual region of mapped memory
This class is a 'faked' mmap() result that allows for the finer allocation of memory mappings
beyond/below what the filesystem really allows. It is backed by true mmap()'d pages and
uses magic methods to achieve the appearance of being an isolated region of memory."""
__slot
|
s__ = 'parent', 'base_offset', '__size', 'cursor'
def __init__(self, parent, base_offset, size):
self.parent = parent
self.base_offset = base_offset
self.__size = size
self.cursor = 0
def __len__(self):
return self.__size
def __str__(self):
return str(self.read(offset=0, length=len(self)))
def __enter__(self):
return self
def __exit__(self, tipo, value, traceback):
return self
def region(self, offset=-1, size=-1):
(offset, size) = self._sanitize_segment(offset, size)
return self.parent.region(self.base_offset + offset, size)
def _sanitize_segment(self, offset, length):
if offset >= len(self):
raise ValueError('offset falls outside region size')
elif offset < 0:
offset = self.cursor
if length == 0:
raise ValueError('length must be at least 1')
elif length < 0:
length = len(self) - offset
return (offset, length)
def read(self, length=-1, offset=-1, advance=True):
(offset, length) = self._sanitize_segment(offset, length)
offset += self.base_offset
result = self.parent.read(length, offset, advance=advance)
if advance:
self.cursor += len(result)
return result
def write(self, value, length=-1, offset=-1, advance=True):
if length < 0:
length = len(value)
(offset, length) = self._sanitaize_segment(offset, length)
offset += self.base_offset
result = self.parent.write(value, length, offset, advance=advance)
if advance:
self.cursor += result
return result
class MappedFile(Region):
"""Manages mmap()-ings of a file into vmem.
This class prevents virtual address space from growing too large by
re-using existing maps if the requested regions have already been mapped.
"""
def __init__(self, path, page_count, read_only=False):
# XXX TODO NOTE remove this line when write functionality is added.
read_only = True
# getting 'too many files open' error? increase the constant on the next line
# (must be an exponent of 2)
self._page_size = page_count * mmap.PAGESIZE
# make sure we're sane here - allocation granularity needs to divide into page size!
assert (self._page_size % mmap.ALLOCATIONGRANULARITY) == 0, 'page size is not a multiple of allocation granularity!'
self._file = open(path, 'r+b')
self._pages = dict()
self.read_only = read_only
self._path = path
self.cursor = 0
super(MappedFile, self).__init__(self, base_offset=0, size=len(self))
def __len__(self):
self._file.seek(0, 2)
size = self._file.tell()
return size
def __del__(self):
self.close()
def close(self):
"""Unmaps all mappings"""
for i in self._pages:
self._pages[i].close()
self._file.close()
def region(self, offset, size):
"""Requests a virtual region be 'allocated'"""
lower_page = offset - (offset % self._page_size)
upper_page = ((offset + size) // self._page_size) * self._page_size
lower_page_id = lower_page // self._page_size
upper_page_id = upper_page // self._page_size
# make sure we're mapped
for i in range(lower_page_id, upper_page_id + 1):
if i not in self._pages:
page_offset = i * self._page_size
page_size = min(self._page_size, len(self) - page_offset)
log.debug('mapping vfile page: id=%d offset=%d size=%d', i, page_offset, page_size)
self._pages[i] = mmap.mmap(self._file.fileno(), offset=page_offset, length=page_size)
# create a region
return Region(self, base_offset=offset, size=size)
def read(self, length=1, offset=-1, advance=True):
"""Reads data from the virtual region"""
(offset, length) = self._sanitize_segment(offset, length)
results = []
length = min(length, len(self))
abs_offset = offset
cur_page = abs_offset // self._page_size
abs_offset %= self._page_size
while length > 0:
readable = self._page_size - abs_offset
readable = min(readable, length)
results.append(self._pages[cur_page][abs_offset:abs_offset + readable])
length -= readable
abs_offset = 0
cur_page += 1
result = ''.join(results)
if advance:
self.cursor += len(result)
return result
def write(self, value, offset=-1, length=-1, advance=True):
if self.read_only:
raise ReadOnlyError(self._path)
# TODO
assert False, 'not implemented'
return 0
def __getitem__(self, offset):
if isinstance(offset, slice):
(start, fin, step) = offset.indices(len(self))
result = self.read(offset=start, length=fin - start)
if step not in [None, 1]:
result = result[::step]
return result
if not isinstance(offset, int):
raise TypeError('offset is not an integer: %s' % repr(offset))
if offset >= len(self):
raise RegionOverflowError(offset)
page = offset // self._page_size
rel_offset = offset % self._page_size
return self._pages[page][rel_offset]
def __setitem__(self, offset, value):
if self.read_only:
raise ReadOnlyError(self._path)
if isinstance(offset, slice):
raise ValueError('Slice assignment not supported in mapped files; assemble your data first and then write')
if not isinstance(offset, int):
raise TypeError('offset is not an integer: %s' % repr(offset))
if offset >= len(self):
raise RegionOverflowError(offset)
page = offset // self._page_size
rel_offset = offset % self._page_size
self._pages[page][rel_offset] = value
|
miurahr/seahub
|
seahub/role_permissions/admin.py
|
Python
|
apache-2.0
| 102
| 0
|
# Copyright (c) 2012-2016 Seafile Ltd.
from django.contrib import admin
# Register your models here.
| ||
hasadna/anyway
|
anyway/parsers/schools_with_description_2020.py
|
Python
|
mit
| 6,488
| 0.003967
|
import logging
from datetime import datetime
import math
import numpy as np
import pandas as pd
from flask_sqlalchemy import SQLAlchemy
from ..models import SchoolWithDescription2020
from ..utilities import init_flask, time_delta, chunks, ItmToWGS84
school_fields = {
"school_id": "סמל_מוסד",
"school_name": "שם_מוסד",
"municipality_name": "שם_רשות",
"yishuv_name": "שם_ישוב",
"institution_type": "סוג_מוסד",
"lowest_grade": "משכבה",
"highest_grade": "עד_שכבה",
"location_accuracy": "רמת_דיוק_מיקום",
"x": "X",
"y": "Y",
}
app = init_flask()
db = SQLAlchemy(app)
coordinates_converter = ItmToWGS84()
def get_numeric_value(value, func):
"""
:returns: value if parameter value exists OR None if the parameter value does not exist
"""
return func(value) if value and not np.isnan(value) else None
def get_str_value(value):
"""
:returns: value if parameter value exists OR None if the parameter value does not exist
"""
return value if value and value not in ["nan", "Nan", "NaN", "NAN"] else None
def get_schools_with_description(schools_description_filepath, schools_coordinates_filepath):
logging.info("\tReading schools description data from '%s'..." % schools_description_filepath)
df_schools = pd.read_excel(schools_description_filepath)
logging.info("\tReading schools coordinates data from '%s'..." % schools_coordinates_filepath)
df_coordinates = pd.read_excel(schools_coordinates_filepath)
schools = []
# get school_id
df_schools = df_schools.drop_duplicates(school_fields["school_id"])
# sort by school_id
df_schools = df_schools.sort_values(school_fields["school_id"], ascending=True)
all_schools_tuples = []
for _, school in df_schools.iterrows():
school_id = get_numeric_value(school[school_fields["school_id"]], int)
school_name = get_str_value(school[school_fields["school_name"]]).strip('"')
if school_id in list(df_coordinates[school_fields["school_id"]].values):
x_coord = df_coordinates.loc[
df_coordinates[school_fields["school_id"]] == school_id, school_fields["x"]
].values[0]
y_coord = df_coordinates.loc[
df_coordinates[school_fields["school_id"]] == school_id, school_fields["y"]
].values[0]
location_accuracy = get_str_value(
df_coordinates.loc[
df_coordinates[school_fields["school_id"]] == school_id,
school_fields["location_accuracy"],
].values[0]
)
else:
x_coord = None
y_coord = None
location_accuracy = None
if x_coord and not math.isnan(x_coord) and y_coord and not math.isnan(y_coord):
longitude, latitude = coordinates_converter.convert(x_coord, y_coord)
else:
longitude, latitude = (
None,
None,
) # otherwise yield will produce: UnboundLocalError: local variable referenced before assignment
# Don't insert duplicates of 'school_name','x', 'y'
school_tuple = (school_name, x_coord, y_coord)
if school_tuple in all_schools_tu
|
ples:
continue
el
|
se:
all_schools_tuples.append(school_tuple)
school = {
"school_id": get_numeric_value(school[school_fields["school_id"]], int),
"school_name": school_name,
"municipality_name": get_str_value(school[school_fields["municipality_name"]]),
"yishuv_name": get_str_value(school[school_fields["yishuv_name"]]),
"institution_type": get_str_value(school[school_fields["institution_type"]]),
"lowest_grade": get_str_value(school[school_fields["lowest_grade"]]),
"highest_grade": get_str_value(school[school_fields["highest_grade"]]),
"location_accuracy": location_accuracy,
"longitude": longitude,
"latitude": latitude,
"x": x_coord,
"y": y_coord,
}
if school["institution_type"] in [
"בית ספר",
"תלמוד תורה",
"ישיבה קטנה",
'בי"ס תורני',
"ישיבה תיכונית",
'בי"ס חקלאי',
'בי"ס רפואי',
'בי"ס כנסייתי',
"אולפנה",
'בי"ס אקסטרני',
'בי"ס קיבוצי',
"תלמוד תורה ליד מעיין חינוך התורני",
'בי"ס מושבי',
]:
schools.append(school)
return schools
def truncate_schools_with_description():
curr_table = "schools_with_description"
sql_truncate = "TRUNCATE TABLE " + curr_table
db.session.execute(sql_truncate)
db.session.commit()
logging.info("Truncated table " + curr_table)
def import_to_datastore(schools_description_filepath, schools_coordinates_filepath, batch_size):
try:
assert batch_size > 0
started = datetime.now()
schools = get_schools_with_description(
schools_description_filepath, schools_coordinates_filepath
)
truncate_schools_with_description()
new_items = 0
logging.info("inserting " + str(len(schools)) + " new schools")
for schools_chunk in chunks(schools, batch_size):
db.session.bulk_insert_mappings(SchoolWithDescription2020, schools_chunk)
db.session.commit()
new_items += len(schools)
logging.info(f"\t{new_items} items in {time_delta(started)}")
return new_items
except Exception as exception:
error = f"Schools import succeeded partially with {new_items} schools. Got exception : {exception}"
raise Exception(error)
def parse(schools_description_filepath, schools_coordinates_filepath, batch_size):
started = datetime.now()
total = import_to_datastore(
schools_description_filepath=schools_description_filepath,
schools_coordinates_filepath=schools_coordinates_filepath,
batch_size=batch_size,
)
db.session.execute(
"UPDATE schools_with_description SET geom = ST_SetSRID(ST_MakePoint(longitude,latitude),4326)\
WHERE geom IS NULL;"
)
logging.info("Total: {0} schools in {1}".format(total, time_delta(started)))
|
MarsZone/DreamLand
|
muddery/utils/event_handler.py
|
Python
|
bsd-3-clause
| 7,071
| 0.003253
|
"""
EventHandler handles all events. The handler sets on every object.
"""
import random
from muddery.utils import defines
from muddery.statements.statement_handler import STATEMENT_HANDLER
from muddery.utils import utils
from muddery.worlddata.data_sets import DATA_SETS
from django.conf import settings
from django.apps import apps
from evennia.utils import logger
PERMISSION_BYPASS_EVENTS = {perm.lower() for perm in settings.PERMISSION_BYPASS_EVENTS}
def get_event_additional_model():
"""
Set a dict of additional model names.
"""
additional_model = {}
# list event's additional data's model
for data_settings in DATA_SETS.event_additional_data:
for record in data_settings.objects.all():
key = record.serializable_value("key")
additional_model[key] = data_settings.model_name
return additional_model
class EventHandler(object):
"""
"""
_additional_model = get_event_additional_model()
def __init__(self, owner):
"""
Initialize the handler.
"""
self.owner = owner
self.events = {}
# Load events.
event_records = DATA_SETS.event_data.objects.filter(trigger_obj=owner.get_data_key())
for record in event_records:
event = {}
# Set data.
event_type = record.type
trigger_type = record.trigger_type
for field in record._meta.fields:
event[field.name] = record.serializable_value(field.name)
event["type"] = event_type
# Set additional data.
if record.key in self._additional_model:
model_name = self._additional_model[record.key]
model_additional = apps.get_model(settings.WORLD_DATA_APP, model_name)
try:
add_record = model_additional.objects.get(key = record.key)
# Set data.
for add_field in add_record._meta.fields:
event[add_field.name] = add_record.serializable_value(add_field.name)
except Exception, e:
pass
if not trigger_type in self.events:
self.events[trigger_type] = []
self.events[trigger_type].append(event)
def can_bypass(self, character):
"""
If the character can bypass the event.
"""
if not character:
return False
if character.player:
if character.player.is_superuser:
# superusers can bypass events
return True
for perm in character.player.permissions.all():
if perm in PERMISSION_BYPASS_EVENTS:
# has permission to bypass events
return True
#########################
#
# Event triggers
#
#########################
def get_function(self, event_type):
"""
Get the function of the event type.
"""
if event_type == defines.EVENT_ATTACK:
return self.do_attack
elif event_type == defines.EVENT_DIALOGUE:
return self.do_dialogue
def at_character_move_in(self, character):
"""
Called when a character moves in the event handler's owner, usually a room.
"""
if not character:
return
if self.can_bypass(character):
return
if defines.EVENT_TRIGGER_ARRIVE in self.events:
for event in self.events[defines.EVENT_TRIGGER_ARRIVE]:
# If has arrive event.
if STATEMENT_HANDLER.match_condition(event["condition"], character, self.owner):
# If matches the condition.
function = self.get_function(event["type"])
if function:
function(event, character)
def at_character_move_out(self, character):
"""
Called when a character moves out of a room.
"""
pass
def at_character_die(self):
"""
Called when a character is killed.
"""
owner = self.owner
if not owner:
return
if self.can_bypass(owner):
return
if defines.EVENT_TRIGGER_DIE in self.events:
for event in self.events[defines.EVENT_TRIGGER_DIE]:
#If has die event.
if STATEMENT_HANDLER.match_condition(event["condition"], owner, None):
# If matches the condition, run event on the owner.
function = self.get_function(event["type"])
if function:
function(event, self)
def at_character_kill(self, killers):
"""
Called when a character kills others.
This event is set on the character who is killed, and take effect on the killer!
"""
if defines.EVENT_TRIGGER_KILL in self.events:
for event in self.events[defines.EVENT_TRIGGER_KILL]:
# If has kill event.
for killer in killers:
if self.can_bypass(killer):
continue
if STATEMENT_HANDLER.match_condition(event["condition"], killer, self.owner):
function = self.get_function(event["type"])
if function:
function(event, killer)
def at_character_traverse(self, character):
"""
Called before a character traverses an exit.
If returns true, the character can pass the exit, else the character can not pass the exit.
"""
if not character:
return True
if self.can_bypass(character):
return True
triggered = False
if defines.EVENT_TRIGGER_TRAVERSE in self.events:
for event in self.events[defines.EVENT_TRIGGER_TRAVERSE]:
# If has traverse event.
if STATEMENT_HANDLER.match_condition(event["condition"], character, self.owner):
# If matches the condition.
triggered = True
function = self.get_function(event["type"])
|
if function:
function(event, character)
return not triggered
def do_attack(self, event, character):
""
|
"
Start a combat.
"""
rand = random.random()
# If matches the odds, put the character in combat.
# There can be several mods with different odds.
if rand <= event["odds"]:
# Attack mob.
character.attack_temp_target(event["mob"], event["level"], event["desc"])
def do_dialogue(self, event, character):
"""
Start a dialogue.
"""
# Get sentence.
npc = None
if event["npc"]:
npc = utils.search_obj_data_key(event["npc"])
if npc:
npc = npc[0]
character.show_dialogue(npc, event["dialogue"], 0)
|
splotz90/urh
|
tests/TestInstallation.py
|
Python
|
gpl-3.0
| 5,216
| 0.004793
|
import unittest
from subprocess import call, DEVNULL
import time
from tests.docker import docker_util
class VMHelper(object):
def __init__(self, vm_name: str, shell: str = "", ssh_username: str = None, ssh_port: str = None):
self.vm_name = vm_name
self.shell = shell # like cmd.exe /c
self.ssh_username = ssh_username
self.ssh_port = ssh_port
self.use_ssh = self.ssh_username is not None and self.ssh_port is not None
self.__vm_is_up = False
def start_vm(self):
call('VBoxManage startvm "{0}"'.format(self.vm_name), shell=True)
def stop_vm(self, save=True):
if save:
call('VBoxManage controlvm "{0}" savestate'.format(self.vm_name), shell=True)
return
if self.use_ssh:
self.send_command("sudo shutdown -h now")
else:
call('VBoxManage controlvm "{0}" acpipowerbutton'.format(self.vm_name), shell=True)
def wait_for_vm_up(self):
if not self.__vm_is_up:
print("Waiting for {} to come up.".format(self.vm_name))
command = "ping -c 1" if self.use_ssh else "ping -n 1"
command += " github.com"
while self.__send_command(command, hide_output=True, print_command=False) != 0:
time.sleep(1)
self.__vm_is_up = True
def send_command(self, command: str) -> int:
self.wait_for_vm_up()
return self.__send_command(command)
def __send_command(self, command: str, hide_output=False, print_command=True) -> int:
if self.use_ssh:
fullcmd = ["ssh", "-p", str(self.ssh_port), "{0}@127.0.0.1".format(self.ssh_username), '"{0}"'.format(command)]
else:
fullcmd = ["VBoxManage", "guestcontrol", '"{0}"'.format(self.vm_name), "run"] \
+ self.shell.split(" ") \
+ ['"{0}"'.format(command)]
kwargs = {"stdout": DEVNULL, "stderr": DEVNULL} if hide_output else {}
fullcmd = " ".join(fullcmd)
if print_command:
print("\033[1m" + fullcmd + "\033[0m")
return call(fullcmd, shell=True, **kwargs)
class TestInstallation(unittest.TestCase):
def test_linux(self):
distributions = [
#"archlinux",
"debian8",
#"ubuntu1404",
"ubuntu1604",
#"kali",
# "gentoo" # c
|
ant test gentoo till this bug is fixed: https://github.com/docker/docker/issues/1916#issuecomment-184356102
]
for distribution in distributions:
self.assertTrue(docker_util.run_image(distribution, rebuild=False), msg=distribution)
def test_windows(self):
"""
Run the unittests on Windows + Install via Pip
To Fix Windows Error in Guest OS:
type gped
|
it.msc and go to:
Windows Settings
-> Security Settings
-> Local Policies
-> Security Options
-> Accounts: Limit local account use of blank passwords to console logon only
and set it to DISABLED.
configure pip on guest:
%APPDATA%\Roaming\pip
[global]
no-cache-dir = false
[uninstall]
yes = true
:return:
"""
target_dir = r"C:\urh"
vm_helper = VMHelper("Windows 10", shell="cmd.exe /c")
vm_helper.start_vm()
vm_helper.send_command("pip uninstall urh")
vm_helper.send_command("rd /s /q {0}".format(target_dir))
vm_helper.send_command("git clone https://github.com/jopohl/urh " + target_dir)
rc = vm_helper.send_command(r"python C:\urh\src\urh\cythonext\build.py")
self.assertEqual(rc, 0)
rc = vm_helper.send_command(r"py.test C:\urh\tests".format(target_dir))
self.assertEqual(rc, 0)
vm_helper.send_command("pip install urh")
time.sleep(0.5)
rc = vm_helper.send_command("urh autoclose")
self.assertEqual(rc, 0)
vm_helper.send_command("pip uninstall urh")
vm_helper.stop_vm()
def test_osx(self):
"""
Run Unittests + Pip Installation on OSX
:return:
"""
vm_helper = VMHelper("OSX", ssh_port="3022", ssh_username="boss")
vm_helper.start_vm()
python_bin_dir = "/Library/Frameworks/Python.framework/Versions/3.5/bin/"
target_dir = "/tmp/urh"
vm_helper.send_command("rm -rf {0}".format(target_dir))
vm_helper.send_command("git clone https://github.com/jopohl/urh " + target_dir)
# Build extensions
rc = vm_helper.send_command("{0}python3 {1}/src/urh/cythonext/build.py".format(python_bin_dir, target_dir))
self.assertEqual(rc, 0)
# Run Unit tests
rc = vm_helper.send_command("{1}py.test {0}/tests".format(target_dir, python_bin_dir))
self.assertEqual(rc, 0)
vm_helper.send_command("{0}pip3 --no-cache-dir install urh".format(python_bin_dir))
rc = vm_helper.send_command("{0}urh autoclose".format(python_bin_dir))
self.assertEqual(rc, 0)
vm_helper.send_command("{0}pip3 uninstall --yes urh".format(python_bin_dir))
vm_helper.stop_vm()
|
jasonwee/asus-rt-n14uhp-mrtg
|
src/lesson_developer_tools/pdb_next.py
|
Python
|
apache-2.0
| 202
| 0.00495
|
import pdb
def calc(i, n):
|
j = i * n
return j
def f(n):
for i in range(n):
j = calc(i,
|
n)
print(i, j)
return
if __name__ == '__main__':
pdb.set_trace()
f(5)
|
GNOME/d-feet
|
src/dfeet/introspection_helper.py
|
Python
|
gpl-2.0
| 7,626
| 0.001574
|
# -*- coding: utf-8 -*-
from gi.repository import GLib, GObject, Gio
from dfeet import dbus_utils
def args_signature_markup(arg_signature):
return '<small><span foreground="#2E8B57">%s</span></small>' % (arg_signature)
def args_name_markup(arg_name):
return '<small>%s</small>' % (arg_name,)
class DBusNode(GObject.GObject):
"""object to represent a DBus Node (object path)"""
def __init__(self, name, object_path, node_info):
GObject.GObject.__init__(self)
self.__name = name
self.__object_path = object_path
self.__node_info = node_info # Gio.GDBusNodeInfo object
def __repr__(self):
return "Name: %s ; ObjPath: %s ; NodeInfo: %s" % (
self.name, self.object_path, self.node_info)
@property
def name(self):
return self.__name
@property
def object_path(self):
return self.__object_path
@property
def node_info(self):
return self.__node_info
class DBusInterface(DBusNode):
"""object to represent a DBus Interface"""
def __init__(self, dbus_node_obj, iface_info):
DBusNode.__init__(self, dbus_node_obj.name,
dbus_node_obj.object_path, dbus_node_obj.node_info)
self.__iface_info = iface_info # Gio.GDBusInterfaceInfo object
def __repr__(self):
return "iface '%s' on node '%s'" % (self.iface_info.name, self.node_info.path)
@property
def iface_info(self):
return self.__iface_info
class DBusProperty(DBusInterface):
"""object to represent a DBus Property"""
def __init__(self, dbus_iface_obj, property_info):
DBusInterface.__init__(self, dbus_iface_obj, dbus_iface_obj.iface_info)
self.__property_info = property_info # Gio.GDBusPropertyInfo object
self.__value = None # the value
def __repr__(self):
sig = dbus_utils.sig_to_string(self.property_info.signature)
return "%s %s (%s)" % (sig, self.property_info.name, self.property_info.flags)
@property
def property_info(self):
return self.__property_info
@property
def value(self):
return self.__value
@value.setter
def value(self, new_val):
self.__value = new_val
@property
def markup_str(self):
sig = dbus_utils.sig_to_string(self.property_info.signature)
readwrite = list()
if self.readable:
readwrite.append("read")
if self.writable:
readwrite.append("write")
s = "%s %s <small>(%s)</small>" % (
args_signature_markup(sig),
args_name_markup(self.property_info.name), " / ".join(readwrite))
if self.value is not None:
s += " = %s" % (GLib.markup_escape_text(str(self.value), -1),)
return s
@property
def readable(self):
if int(self.property_info.flags) == int(Gio.DBusPropertyInfoFlags.READABLE) or \
int(self.property_info.flags) == \
(int(Gio.DBusPropertyInfoFlags.WRITABLE | Gio.DBusPropertyInfoFlags.READABLE)):
return True
else:
return False
@property
def writable(self):
if int(self.property_info.flags) == int(Gio.DBusPropertyInfoFlags.WRITABLE) or \
int(self.property_info.flags) == \
(int(Gio.DBusPropertyInfoFlags.WRITABLE | Gio.DBusPropertyInfoFlags.READABLE)):
return True
else:
return False
class DBusSignal(DBusInterface):
"""object to represent a DBus Signal"""
def __init__(self, dbus_iface_obj, signal_info):
DBusInterface.__init__(self, dbus_iface_obj,
dbus_iface_obj.iface_info)
self.__signal_info = signal_info # Gio.GDBusSignalInfo object
def __repr__(self):
return "%s" % (self.signal_info.name)
@property
def signal_info(self):
return self.__signal_info
@property
de
|
f args(self):
args = list()
for arg in self.signal_info.args:
sig = dbus_utils.sig_to_string(arg.signature)
args.append({'signature': sig, 'name': arg.name})
return args
@property
def args_markup_str(self):
result = ''
result += '<span foreground="#FF00FF">(</span>'
result += ', '.join('%s' % (args_signature_markup(arg[
|
'signature'])) for arg in self.args)
result += '<span foreground="#FF00FF">)</span>'
return result
@property
def markup_str(self):
return "%s %s" % (self.signal_info.name, self.args_markup_str)
class DBusMethod(DBusInterface):
"""object to represent a DBus Method"""
def __init__(self, dbus_iface_obj, method_info):
DBusInterface.__init__(self, dbus_iface_obj, dbus_iface_obj.iface_info)
self.__method_info = method_info # Gio.GDBusMethodInfo object
def __repr__(self):
return "%s(%s) ↦ %s (%s)" % (
self.method_info.name, self.in_args_str,
self.out_args_str, DBusInterface.__repr__(self))
@property
def in_args_code(self):
in_args = ""
for a in self.__method_info.in_args:
in_args += a.signature
return in_args
@property
def method_info(self):
return self.__method_info
@property
def markup_str(self):
return "%s %s <b>↦</b> %s" % (
self.method_info.name, self.in_args_markup_str, self.out_args_markup_str)
@property
def in_args(self):
in_args = list()
for in_arg in self.method_info.in_args:
sig = dbus_utils.sig_to_string(in_arg.signature)
in_args.append({'signature': sig, 'name': in_arg.name})
return in_args
@property
def out_args(self):
out_args = list()
for out_arg in self.method_info.out_args:
sig = dbus_utils.sig_to_string(out_arg.signature)
out_args.append({'signature': sig, 'name': out_arg.name})
return out_args
@property
def in_args_str(self):
result = ""
for arg in self.in_args:
result += "%s %s, " % (arg['signature'], arg['name'])
return result[0:-2]
@property
def out_args_str(self):
result = ""
for arg in self.out_args:
result += "%s %s, " % (arg['signature'], arg['name'])
return result[0:-2]
def __args_markup_str(self, args):
"""markup a given list of args"""
result = ''
result += '<span foreground="#FF00FF">(</span>'
result += ', '.join(
'%s %s' % (
args_signature_markup(arg['signature']),
args_name_markup(arg['name'])) for arg in args)
result += '<span foreground="#FF00FF">)</span>'
return result
@property
def in_args_markup_str(self):
return self.__args_markup_str(self.in_args)
@property
def out_args_markup_str(self):
return self.__args_markup_str(self.out_args)
class DBusAnnotation(DBusInterface):
"""object to represent a DBus Annotation"""
def __init__(self, dbus_iface_obj, annotation_info):
DBusInterface.__init__(self, dbus_iface_obj,
dbus_iface_obj.iface_info)
self.__annotation_info = annotation_info # Gio.GDBusAnnotationInfo object
def __repr__(self):
return "%s: %s" % (self.annotation_info.key, self.annotation_info.value)
@property
def annotation_info(self):
return self.__annotation_info
@property
def markup_str(self):
return "%s: %s" % (self.annotation_info.key, self.annotation_info.value)
|
openstates/openstates
|
openstates/ut/people.py
|
Python
|
gpl-3.0
| 3,136
| 0.001276
|
from scrapelib import HTTPError
from openstates.utils import LXMLMixin
from pupa.scrape import Person, Scraper
class UTPersonScraper(Scraper, LXMLMixin):
def scrape(self):
PARTIES = {"R": "Republican", "D": "Democratic"}
representative_url = "http://house.utah.gov/rep/{}"
senator_url = "http://senate.utah.gov/senators/district{}.html"
json_link = "http://le.utah.gov/data/legislators.json"
person_json = self.get(json_link).json()
for info in person_json["legislators"]:
chamber = "lower" if info["house"] == "H" else "upper"
person = Person(
name=info["formatName"],
district=info["district"],
party=PARTIES[info["party"]],
image=info["image"],
primary_org=chamber,
)
person.add_source(json_link)
if chamber == "lower":
link = representative_url.format(info["id"])
else:
link = senator_url.format(info["district"])
try:
self.head(link)
except HTTPError:
self.logger.warning("Bad URL for {}".format(info["formatName"]))
else:
person.add_link(
|
link)
address = info.get("address")
email = info.get("email")
fax = info.get("fax")
# Work phone seems to be the person's non-legislative
# office phone, and thus a last option
# For example, we called one and got the firm
# where he's a lawyer. We're picking
# them in order of how l
|
ikely we think they are
# to actually get us to the person we care about.
phone = info.get("cell") or info.get("homePhone") or info.get("workPhone")
if address:
person.add_contact_detail(
type="address", value=address, note="District Office"
)
if phone:
person.add_contact_detail(
type="voice", value=phone, note="District Office"
)
if email:
person.add_contact_detail(
type="email", value=email, note="District Office"
)
if fax:
person.add_contact_detail(type="fax", value=fax, note="District Office")
BASE_FINANCE_URL = "http://www.disclosures.utah.gov/Search/PublicSearch"
conflicts_of_interest = info.get("CofI") or []
finance_reports = info.get("FinanceReport") or []
extra_links = []
for conflict in conflicts_of_interest:
extra_links.append(conflict["url"])
for finance in finance_reports:
# Some links are just to the base disclosure website
# Presumably, these members don't yet have their forms up
if finance != BASE_FINANCE_URL:
extra_links.append(finance["url"])
if extra_links:
person.extras["links"] = extra_links
yield person
|
jealousrobot/PlexArt
|
lib/cherrypy/_helper.py
|
Python
|
gpl-3.0
| 10,332
| 0.000194
|
"""
Helper functions for CP apps
"""
import six
from cherrypy._cpcompat import urljoin as _urljoin, urlencode as _urlencode
from cherrypy._cpcompat import basestring
import cherrypy
def expose(func=None, alias=None):
"""
Expose the function or class, optionally providing an alias or set of aliases.
"""
def expose_(func):
func.exposed = True
if alias is not None:
if isinstance(alias, basestring):
parents[alias.replace(".", "_")] = func
else:
for a in alias:
parents[a.replace(".", "_")] = func
return func
import sys
import types
decoratable_types = types.FunctionType, types.MethodType, type,
if six.PY2:
# Old-style classes are type types.ClassType.
decoratable_types += types.ClassType,
if isinstance(func, decoratable_types):
if alias is None:
# @expose
func.exposed = True
return func
else:
# func = expose(func, alias)
parents = sys._getframe(1).f_locals
return expose_(func)
elif func is None:
if alias is None:
# @expose()
parents = sys._getframe(1).f_locals
return expose_
else:
# @expose(alias="alias") or
# @expose(alias=["alias1", "alias2"])
parents = sys._getframe(1).f_locals
return expose_
else:
# @expose("alias") or
# @expose(["alias1", "alias2"])
parents = sys._getframe(1).f_locals
alias = func
return expose_
def popargs(*args, **kwargs):
"""A decorator for _cp_dispatch
(cherrypy.dispatch.Dispatcher.dispatch_method_name).
Optional keyword argument: handler=(Object or Function)
Provides a _cp_dispatch function that pops off path segments into
cherrypy.request.params under the names specified. The dispatch
is then forwarded on to the next vpath element.
Note that any existing (and exposed) member function of the class that
popargs is applied to will override that value of the argument. For
instance, if you have a method named "list" on the class decorated with
popargs, then accessing "/list" will call that function instead of popping
it off as the requested parameter. This restriction applies to all
_cp_dispatch functions. The only way around this restriction is to create
a "blank class" whose only function is to provide _cp_dispatch.
If there are path elements after the arguments, or more arguments
are requested than are available in the vpath, then the 'handler'
keyword argument specifies the next object to handle the parameterized
request. If handler is not specified or is None, then self is used.
If handler is a function rather than an instance, then that function
will be called with the args specified and the return value from that
function used as the next object INSTEAD of adding the parameters to
cherrypy.request.args.
This decorator may be used in one of two ways:
As a class decorator:
@cherrypy.popargs('year', 'month', 'day')
class Blog:
def index(self, year=None, month=None, day=None):
#Process the parameters here; any url like
#/, /2009, /2009/12, or /2009/12/31
#will fill in the appropriate parameters.
def create(self):
#This link will still be available at /create. Defined functions
#take precedence over arguments.
Or as a member of a class:
class Blog:
_cp_dispatch = cherrypy.popargs('year', 'month', 'day')
#...
The handler argument may be used to mix arguments with built in functions.
For instance, the following setup allows different activities at the
day, month, and year level:
class DayHandler:
def index(self, year, month, day):
#Do something with this day; probably list entries
def delete(self, year, month, day):
#Delete all entries for this day
@cherrypy.popargs('day', handler=DayHandler())
class MonthHandler:
def index(self, year, month):
#Do something with this month; probably list entries
def delete(self, year, month):
#Delete all entries for this month
@cherrypy.popargs('month', handler=MonthHandler())
class YearHandler:
def index(self, year):
#Do something with this year
#...
@cherrypy.popargs('year', handler=YearHandler())
class Root:
def index(self):
#...
"""
# Since keyword arg comes after *args, we have to process it ourselves
# for lower versions of python.
handler = None
handler_call = False
for k, v in kwargs.items():
if k == 'handler':
handler = v
else:
raise TypeError(
"cherrypy.popargs() got an unexpected keyword argument '{0}'"
.format(k)
)
import inspect
if handler is not None \
and (hasattr(handler, '__call__') or inspect.isclass(handler)):
handler_call = True
def decorated(cls_or_self=None, vpath=None):
if inspect.isclass(cls_or_self):
# cherrypy.popargs is a class decorator
cls = cls_or_self
setattr(cls, cherrypy.dispatch.Dispatcher.dispatch_method_name, decorated)
return cls
# We're in the actual function
self = cls_or_self
parms = {}
for arg in args:
if not vpath:
break
parms[arg] = vpath.pop(0)
if handler is not None:
if handler_call:
return handler(**parms)
else:
cherrypy.request.params.update(parms)
return handler
cherrypy.request.params.update(parms)
# If we are the ultimate handler, then to prevent our _cp_dispatch
# from being called again, we will resolve remaining elements through
# getattr() directly.
if vpath:
return getattr(self, vpath.pop(0), None)
else:
return self
return decorated
def url(path="", qs="", script_name=None, base=None, relative=None):
"""Create an absolute URL for the given path.
If 'path' starts with a slash ('/'), this will return
(base + script_name + path + qs).
If it does not start with a slash, this returns
(base + script_name [+ request.path_info] + path + qs).
If script_name is None, cherrypy.request will be used
to find a script_name, if available.
If base is None, cherrypy.request.base will be used (if available).
Note that you can use cherrypy.tools.proxy to change this.
Finally, note that this function can be used to obtain an absolute URL
for the current request path (minus the querystring) by passing no args.
If you call url(qs=cherrypy.request.query_string), you should get the
original browser URL (assuming no internal redirections).
If relative is None or not provided, request.app.relative_urls will
be used (if available, else False). If False, the output will be an
absolute URL (including the scheme, host, vhost, and script_name).
If True, the output will instead be a URL that is relative to the
current request path, perhaps including '..' atoms. If relative is
the string 'server', the output will instead be a URL that is
relative to the server root; i.e., it will start with a slash.
"""
if isinstance(qs, (tuple, list, dict)):
qs = _urlencode(qs)
if qs:
qs = '?' + qs
if cherrypy.request.app:
if not path.startswith("/"):
# Append/remove traili
|
ng slash from path_info as needed
# (this is to support mistyped URL's without redirecting
|
;
# if you want to redirect, use tools.trailing_slash).
pi = cherrypy.request.path_info
if cherrypy.request.is_index is True:
if not pi.endswith('/'):
pi = pi + '/'
elif cher
|
edisonlz/fruit
|
web_project/base/site-packages/django/conf/__init__.py
|
Python
|
apache-2.0
| 7,796
| 0.002437
|
"""
Settings and configuration for Django.
Values will be read from the module specified by the DJANGO_SETTINGS_MODULE environment
variable, and then from django.conf.global_settings; see the global settings file for
a list of all possible variables.
"""
import logging
import os
import sys
import time # Needed for Windows
import warnings
from django.conf import global_settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.functional import LazyObject, empty
from django.utils import importlib
from django.utils.module_loading import import_by_path
from django.utils import six
ENVIRONMENT_VARIABLE = "DJANGO_SETTINGS_MODULE"
class LazySettings(LazyObject):
"""
A lazy proxy for either global Django settings or a custom settings object.
The user can manually configure settings prior to using them. Otherwise,
Django uses the settings module pointed to by DJANGO_SETTINGS_MODULE.
"""
def _setup(self, name=None):
"""
Load the settings module pointed to by the environment variable. This
is used the first time we need any settings at all, if the user has not
previously configured the settings manually.
"""
try:
settings_module = os.environ[ENVIRONMENT_VARIABLE]
if not settings_module: # If it's set but is an empty string.
raise KeyError
except KeyError:
desc = ("setting %s" % name) if name else "settings"
raise ImproperlyConfigured(
"Requested %s, but settings are not configured. "
"You must either define the environment variable %s "
"or call settings.configure() before accessing settings."
% (desc, ENVIRONMENT_VARIABLE))
self._wrapped = Settings(settings_module)
self._configure_logging()
def __getattr__(self, name):
if self._wrapped is empty:
self._setup(name)
return getattr(self._wrapped, name)
def _configure_logging(self):
"""
Setup logging from LOGGING_CONFIG and LOGGING settings.
"""
if not sys.warnoptions:
try:
# Route warnings through python logging
logging.captureWarnings(True)
# Allow DeprecationWarnings through the warnings filters
warnings.simplefilter("default", DeprecationWarning)
except AttributeError:
# No captureWarnings on Python 2.6, DeprecationWarnings are on anyway
pass
if self.LOGGING_CONFIG:
from django.utils.log import DEFAULT_LOGGING
# First find the logging configuration function ...
logging_config_func = import_by_path(self.LOGGING_CONFIG)
logging_config_func(DEFAULT_LOGGING)
# ... then invoke it with the logging settings
if self.LOGGING:
logging_config_func(self.LOGGING)
def configure(self, default_settings=global_settings, **options):
"""
Called to manually configure the settings. The 'default_settings'
parameter sets where to retrieve any unspecified values from (its
argument must support attribute access (__getattr__)).
"""
if self._wrapped is not empty:
raise RuntimeError('Settings already configured.')
holder = UserSettingsHolder(default_settings)
for name, value in options.items():
setattr(holder, name, value)
self._wrapped = holder
self._configure_logging()
@property
def configured(self):
"""
Returns True if the settings have already been configured.
"""
return self._wrapped is not empty
class BaseSettings(object):
"""
Common logic for settings whether set by a module or by the user.
"""
def __setattr__(self, name, value):
if name in ("MEDIA_URL", "STATIC_URL") and value and not value.endswith('/'):
raise ImproperlyConfigured("If set, %s must end with a slash" % name)
elif name == "ALLOWED_INCLUDE_ROOTS" and isinstance(value, six.string_types):
raise ValueError("The ALLOWED_INCLUDE_ROOTS setting must be set "
"to a tuple, not a string.")
object.__setattr__(self, name, value)
class Settings(BaseSettings):
def __init__(self, settings_module):
# update this dict from global settings (but only for ALL_CAPS settings)
for setting in dir(global_settings):
if setting == setting.upper():
setattr(self, setting, getattr(global_settings, setting))
# store the settings module in case someone later cares
self.SETTINGS_MODULE = settings_module
try:
mod = importlib.import_module(self.SETTINGS_MODULE)
except ImportError as e:
raise ImportError(
"Could not import settings '%s' (Is it on sys.path? Is there an import error in the settings file?): %s"
% (self.SETTINGS_MODULE, e)
)
# Settings that should be converted into tuples if they're mistakenly entered
# as strings.
tuple_settings = ("INSTALLED_APPS", "TEMPLATE_DIRS")
for setting in dir(mod):
if setting == setting.upper():
setting_value = getattr(mod, setting)
if setting in tuple_settings and \
isinstance(setting_value, six.string_types):
warnings.warn("The %s setting must be a tuple. Please fix your "
"settings, as auto-correction is now deprecated." % setting,
DeprecationWarning, stacklevel=2)
setting_value = (setting_value,) # In case the user forgot the comma.
setattr(self, setting, setting_value)
if not self.SECRET_KEY:
raise ImproperlyConfigured("The SECRET_KEY setting must not be empty.")
if hasattr(time, 'tzset') and self.TIME_ZONE:
# When we can, attempt to validate the timezone. If we can't find
# this file, no check happens and it's harmless.
zoneinfo_root = '/usr/share/zoneinfo'
if (os.path.exists(z
|
oneinfo_root) and not
os.path.exists(os.path.join(zoneinfo_root, *(self.TIME_ZONE.split('/'))))):
raise ValueError("Incorrect timezone setting: %s" % self.TIME_ZONE)
# Move the time zone info into os.environ. See ticket #2315 for why
# we don't do this unconditionally (brea
|
ks Windows).
os.environ['TZ'] = self.TIME_ZONE
time.tzset()
class UserSettingsHolder(BaseSettings):
"""
Holder for user configured settings.
"""
# SETTINGS_MODULE doesn't make much sense in the manually configured
# (standalone) case.
SETTINGS_MODULE = None
def __init__(self, default_settings):
"""
Requests for configuration variables not in this class are satisfied
from the module specified in default_settings (if possible).
"""
self.__dict__['_deleted'] = set()
self.default_settings = default_settings
def __getattr__(self, name):
if name in self._deleted:
raise AttributeError
return getattr(self.default_settings, name)
def __setattr__(self, name, value):
self._deleted.discard(name)
return super(UserSettingsHolder, self).__setattr__(name, value)
def __delattr__(self, name):
self._deleted.add(name)
return super(UserSettingsHolder, self).__delattr__(name)
def __dir__(self):
return list(self.__dict__) + dir(self.default_settings)
settings = LazySettings()
|
sodafree/backend
|
build/ipython/IPython/quarantine/ipy_exportdb.py
|
Python
|
bsd-3-clause
| 2,037
| 0.025037
|
from IPython.core import ipapi
from IPython.core import macro
ip = ipapi.get()
import os,pprint
def export(filename = None):
lines = ['import IPython.core.ipapi', 'ip = IPython.core.ipapi.get()','']
vars = ip.db.keys('autorestore/*')
vars.sort()
varstomove = []
get = ip.db.get
macros = []
variables = []
for var in vars:
k = os.path.basename(var)
v = get(var)
if k.startswith('_'):
continue
if isinstance(v, macro.Macro):
macros.append((k,v))
if type(v) in [int, str, float]:
variables.append((k,v))
if macros:
lines.extend(['# === Macros ===' ,''])
for k,v in macros:
lines.append("ip.defmacro('%s'," % k)
for line in v.value.splitlines():
lines.append(' ' + repr(line+'\n'))
lines.extend([')', ''])
if variables:
lines.extend(['','# === Variables ===',''])
for k,v in variables:
varstomove.append(k)
lines.append('%s = %s' % (k,repr(v)))
lines.append('ip.push("%s")' % (' '.join(varstomove)))
bkms = ip.db.get('bookmarks',{})
if bkms:
lines.extend(['','# === Bookmarks ===',''])
lines.append("ip.db['bookmarks'] = %s " % pprint.pformat(bkms, indent =
|
2) )
|
aliases = ip.db.get('stored_aliases', {} )
if aliases:
lines.extend(['','# === Alias definitions ===',''])
for k,v in aliases.items():
try:
lines.append("ip.define_alias('%s', %s)" % (k, repr(v[1])))
except (AttributeError, TypeError):
pass
env = ip.db.get('stored_env')
if env:
lines.extend(['','# === Stored env vars ===',''])
lines.append("ip.db['stored_env'] = %s " % pprint.pformat(env, indent = 2) )
out = '\n'.join(lines)
if filename:
open(filename,'w').write(out)
else:
print out
|
charukiewicz/beer-manager
|
venv/lib/python3.4/site-packages/passlib/tests/__init__.py
|
Python
|
mit
| 20
| 0
|
"
|
""passlib te
|
sts"""
|
pgmillon/ansible
|
lib/ansible/modules/cloud/vmware/vmware_host_firewall_manager.py
|
Python
|
gpl-3.0
| 17,107
| 0.003975
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: vmware_host_firewall_manager
short_description: Manage firewall configurations about an ESXi host
description:
- This module can be used to manage firewall configurations about an ESXi host when ESXi hostname or Cluster name is given.
version_added: '2.5'
author:
- Abhijeet Kasurde (@Akasurde)
- Aaron Longchamps (@alongchamps)
notes:
- Tested on vSphere 6.0, vSphere 6.5
requirements:
- python >= 2.6
- PyVmomi
options:
cluster_name:
description:
- Name of the cluster.
- Firewall settings are applied to every ESXi host system in given cluster.
- If C(esxi_hostname) is not given, this parameter is required.
esxi_hostname:
description:
- ESXi hostname.
- Firewall settings are applied to this ESXi host system.
- If C(cluster_name) is not given, this parameter is required.
rules:
description:
- A list of Rule set which needs to be managed.
- Each member of list is rule set name and state to be set the rule.
- Both rule name and rule state are required parameters.
- Additional IPs and networks can also be specified
- Please see examples for more information.
default: []
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Enable vvold rule set for all ESXi Host in given Cluster
vmware_host_firewall_manager:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
cluster_name: cluster_name
rules:
- name: vvold
enabled: True
delegate_to: localhost
- name: Enable vvold rule set for an ESXi Host
vmware_host_firewall_manager:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
esxi_hostname: '{{ esxi_hostname }}'
rule
|
s:
- name: vvold
enabled: True
delegate_to: localhost
- name: Manage multiple rule set for an ESXi Host
vmware_host_firewall_manager:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
esxi_
|
hostname: '{{ esxi_hostname }}'
rules:
- name: vvold
enabled: True
- name: CIMHttpServer
enabled: False
delegate_to: localhost
- name: Manage IP and network based firewall permissions for ESXi
vmware_host_firewall_manager:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
esxi_hostname: '{{ esxi_hostname }}'
rules:
- name: gdbserver
enabled: True
allowed_hosts:
- all_ip: False
ip_address:
192.168.20.10
- name: CIMHttpServer
enabled: True
allowed_hosts:
- all_ip: False
ip_network:
192.168.100.0/24
- name: remoteSerialPort
enabled: True
allowed_hosts:
- all_ip: False
ip_address:
192.168.100.11
ip_network:
192.168.200.0/24
delegate_to: localhost
'''
RETURN = r'''
rule_set_state:
description:
- dict with hostname as key and dict with firewall rule set facts as value
returned: success
type: dict
sample: {
"rule_set_state": {
"localhost.localdomain": {
"CIMHttpServer": {
"current_state": False,
"desired_state": False,
"previous_state": True,
"allowed_hosts": {
"current_allowed_all": True,
"previous_allowed_all": True,
"desired_allowed_all": True,
"current_allowed_ip": [],
"previous_allowed_ip": [],
"desired_allowed_ip": [],
"current_allowed_networks": [],
"previous_allowed_networks": [],
"desired_allowed_networks": [],
}
},
"remoteSerialPort": {
"current_state": True,
"desired_state": True,
"previous_state": True,
"allowed_hosts": {
"current_allowed_all": False,
"previous_allowed_all": True,
"desired_allowed_all": False,
"current_allowed_ip": ["192.168.100.11"],
"previous_allowed_ip": [],
"desired_allowed_ip": ["192.168.100.11"],
"current_allowed_networks": ["192.168.200.0/24"],
"previous_allowed_networks": [],
"desired_allowed_networks": ["192.168.200.0/24"],
}
}
}
}
}
'''
try:
from pyVmomi import vim
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi
from ansible.module_utils._text import to_native
from ansible.module_utils.compat import ipaddress
class VmwareFirewallManager(PyVmomi):
def __init__(self, module):
super(VmwareFirewallManager, self).__init__(module)
cluster_name = self.params.get('cluster_name', None)
esxi_host_name = self.params.get('esxi_hostname', None)
self.options = self.params.get('options', dict())
self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
self.firewall_facts = dict()
self.rule_options = self.module.params.get("rules")
self.gather_rule_set()
def gather_rule_set(self):
for host in self.hosts:
self.firewall_facts[host.name] = {}
firewall_system = host.configManager.firewallSystem
if firewall_system:
for rule_set_obj in firewall_system.firewallInfo.ruleset:
temp_rule_dict = dict()
temp_rule_dict['enabled'] = rule_set_obj.enabled
allowed_host = rule_set_obj.allowedHosts
rule_allow_host = dict()
rule_allow_host['ip_address'] = allowed_host.ipAddress
rule_allow_host['ip_network'] = [ip.network + "/" + str(ip.prefixLength) for ip in allowed_host.ipNetwork]
rule_allow_host['all_ip'] = allowed_host.allIp
temp_rule_dict['allowed_hosts'] = rule_allow_host
self.firewall_facts[host.name][rule_set_obj.key] = temp_rule_dict
def ensure(self):
"""
Function to ensure rule set configuration
"""
fw_change_list = []
enable_disable_changed = False
allowed_ip_changed = False
results = dict(changed=False, rule_set_state=dict())
for host in self.hosts:
firewall_system = host.configManager.firewallSystem
if firewall_system is None:
continue
results['rule_set_state'][host.name] = dict()
for rule_option in self.rule_options:
rule_name = rule_option.get('name', None)
if rule_name is None:
self.module.fail_json(msg="Please specify rule.name for rule set"
" as it is required parameter.")
|
nickstenning/tagalog
|
tagalog/command/logship.py
|
Python
|
mit
| 1,992
| 0.002008
|
from __future__ import print_function, unicode_literals
import argparse
import json
import sys
import textwrap
from tagalog import io, stamp, tag, fields
from tagalog import shipper
parser = argparse.ArgumentParser(description=textwrap.dedent("""
Ship log data from STDIN to somewhere else, timestamping and preprocessing
each log entry into a JSON document along the way."""))
parser.add_argument('-t', '--tags', nargs='+',
help='Tag each request with the specified string tags')
parser.add_argument('-f', '--fields', nargs='+',
help='Add key=value fields specified to each request')
parser.add_argument('-s', '--shipper', default='redis',
help='Select the shipper to be used to ship logs')
parser.add_argument('--no-stamp', action='store_true')
parser.add_argument('--bulk', action='store_true',
help='Send log data in elasticsearch bulk format')
parser.add_argument('--bulk-index', default='logs',
help='Name of the elasticsearch index (default: logs)')
parser.add_argument('--bulk-type', default='message',
help='Name of the elasticsearch type (default: message)')
# TODO: make these the responsibility of the redis shipper
parser.add_argument('-k', '--key', default='logs')
parser.add_argument('-u', '--urls', nargs='+', default=['redis://localhost:6379'])
def main():
args = parser.parse_args()
shpr = shipper.get_shipper(args.shipper)(args)
msgs = io.messages(sys.stdin)
if not args.no_stamp:
msgs = stamp(msgs)
if args.tags:
msgs = tag(msgs, args.tags)
if args.fields:
msgs = fields(msgs, args.fields)
f
|
or msg in msgs:
payload =
|
json.dumps(msg)
if args.bulk:
command = json.dumps({'index': {'_index': args.bulk_index, '_type': args.bulk_type}})
payload = '{0}\n{1}\n'.format(command, payload)
shpr.ship(payload)
if __name__ == '__main__':
main()
|
ita1024/semantik
|
src/filters/others.py
|
Python
|
gpl-3.0
| 3,251
| 0.03199
|
#! /usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2007-2018 GPLV3
import os, sys, tarfile, io
from xml.sax import make_parser
from xml.sax.handler import ContentHandler
def debug(s):
sys.stderr.write(s)
def protect(t):
lst = t.split('&')
t = "&".join(lst)
lst = t.split('<')
t = "<".join(lst)
lst = t.split('>')
t = ">".join(lst)
lst = t.split('"')
t = """.join(lst)
return t
head = """<?xml version="1.0" encoding="utf8"?>
<semantik version="1">
<color_schemes>
<color_scheme name="Color 0" inner="#fffe8d" border="#000000" text="#000000"/>
<color_scheme name="Color 1" inner="#91ffab" border="#000000" text="#000000"/>
<color_scheme name="Color 2" inner="#9bfffe" border="#000000" text="#000000"/>
<color_scheme name="Color 3" inner="#b8bbff" border="#000000" text="#000000"/>
<color_scheme name="Color 4" inner="#e0aaff" border="#000000" text="#000000"/>
<color_scheme name="Color 5" inner="#ffa6a6" border="#000000" text="#000000"/>
<color_scheme name="Color 6" inner="#ffd8a6" border="#000000" text="#000000"/>
<color_scheme name="Color 7" inner="#ffffff" border="#000000" text="#000000"/>
</color_schemes>
"""
textitem = """<item id="%s" summary="%s" text="%s" len="13" comment="%s" pic_location="" pic_caption="" pic_comment="" tbl_rows="0" tbl_cols="0" c1="%s" c2="%s" color="1" custom_name="" custom_border="#000000" custom_inner="#000000" custom_text="#000000">
<tblsettings rows="0" cols="0"/>
</item>
"""
lst_vars = ['id', 'summary', 'text', 'len', 'comment', 'pic_location', 'pic_caption', 'pic_comment', 'tbl_rows', 'tbl_cols', 'c1', 'c2', 'color', 'custom_name', 'custom_border', 'custom_inner', 'custom_text']
rep = {
'text':'text',
'summary':'summary',
'xpos':'c2',
'ypos':'c1',
'id':'id',
}
class FFHandler(ContentHandler):
def __init__(self):
self.buf = []
self.out = []
self.trucs = []
self.cur = 0
self.count = 1
self.ids = [] # stack
self.links = [] # tuples
def startElement(self, name, attrs):
self.buf = []
if name == 'map':
self.out.append(head)
if name == 'node':
|
self.count += 1
#self.cur += 1
#debug(str(self.cur))
id = self.count
if len(self.ids) > 0:
par = self.ids[-1
|
]
self.links.append( (par, id) )
self.ids.append(id)
text = attrs.get('TEXT', '')
text = protect(text)
self.out.append('<item id="%d" summary="%s"/>\n' % (id, text))
def endElement(self, name):
txt = "".join(self.buf)
if name == 'node':
#self.cur -= 1
#debug(str(self.cur))
self.ids=self.ids[:-1]
elif name == 'map':
for (k, v) in self.links:
self.out.append('<link p="%d" v="%d"/>\n' % (k, v))
self.out.append('</semantik>')
def characters(self, cars):
self.buf.append(cars)
def parse_string(s):
parser = make_parser()
curHandler = FFHandler()
parser.setContentHandler(curHandler)
parser.parse(io.StringIO(str(s)))
return "".join(curHandler.out)
def parse_file(infile):
with open(infile, 'r', encoding='utf-8') as f:
txt = f.read()
truc = txt.replace('<?xml version="1.0" encoding="utf8"?>', '<?xml version="1.0" encoding="UTF-8"?>')
truc = parse_string(truc)
#file = open("/tmp/con.xml", "w")
#file.write(str(truc))
#file.close()
#debug(truc.encode('utf-8'))
return truc
|
mistercrunch/airflow
|
airflow/sensors/external_task.py
|
Python
|
apache-2.0
| 14,311
| 0.003005
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import datetime
import os
from typing import TYPE_CHECKING, Any, Callable, Collection, FrozenSet, Iterable, Optional, Union
from sqlalchemy import func
from airflow.exceptions import AirflowException
from airflow.models import BaseOperatorLink, DagBag, DagModel, DagRun, TaskInstance
from airflow.operators.dummy import DummyOperator
from airflow.sensors.base import BaseSensorOperator
from airflow.utils.helpers import build_airflow_url_with_query
from airflow.utils.session import provide_session
from airflow.utils.state import State
class ExternalTaskSensorLink(BaseOperatorLink):
"""
Operator link for ExternalTaskSensor. It allows users to access
DAG waited with ExternalTaskSensor.
"""
name = 'External DAG'
def get_link(self, operator, dttm):
query = {"dag_id": operator.external_dag_id, "execution_date": dttm.isoformat()}
return build_airflow_url_with_query(query)
class ExternalTaskSensor(BaseSensorOperator):
"""
Waits for a different DAG or a task in a different DAG to complete for a
specific logical date.
:param external_dag_id: The dag_id that contains the task you want to
wait for
:type external_dag_id: str
:param external_task_id: The task_id that contains the task you want to
wait for. If ``None`` (default value) the sensor waits for the DAG
:type external_task_id: str or None
:param external_task_ids: The list of task_ids that you want to wait for.
If ``None`` (default value) the sensor waits for the DAG. Either
external_task_id or external_task_ids can be passed to
ExternalTaskSensor, but not both.
:type external_task_ids: Iterable of task_ids or None, default is None
:param allowed_states: Iterable of allowed states, default is ``['success']``
:type allowed_states: Iterable
:param failed_states: Iterable of failed or dis-allowed states, default is ``None``
:type failed_states: Iterable
:param execution_delta: time difference with the previous execution to
look at, the default is the same logical date as the current task or DAG.
For yesterday, use [positive!] datetime.timedelta(days=1). Either
execution_delta or execution_date_fn can be passed to
ExternalTaskSensor, but not both.
:type execution_delta: Optional[datetime.timedelta]
:param execution_date_fn: function that receives the current execution's logical date as the first
positional argument and optionally any number of keyword arguments available in the
context dictionary, and returns the desired logical dates to query.
Either execution_delta or execution_date_fn can be passed to ExternalTaskSensor,
but not both.
:type execution_date_fn: Optional[Callable]
:param check_existence: Set to `True` to check if the external task exists (when
external_task_id is not None) or check if the DAG to wait for exists (when
external_task_id is None), and immediately cease waiting if the external task
or DAG does not exist (default value: False).
:type check_existence: bool
"""
template_fields = ['external_dag_id', 'external_task_id']
ui_color = '#19647e'
@property
def operator_extra_links(self):
"""Return operator extra links"""
return [ExternalTaskSensorLink()]
def __init__(
self,
*,
external_dag_id: str,
external_task_id: Optional[str] = None,
external_task_ids: Optional[Collection[str]] = None,
allowed_states: O
|
ptional[Iterable[str]] = None,
failed_states: Optional[Iterable[str]] = None,
execution_delta: Optional[datetime.timedelta] = None,
execution_date_fn: Optional[Callable] = None,
check_existence: bool = False,
**kwargs,
):
super().__init__(**kwargs)
self.allowed_states = list(allowed_states) if allowed_states else [State.SUCCESS]
self.failed_states = list(failed_states) if failed_states else []
to
|
tal_states = set(self.allowed_states + self.failed_states)
if set(self.failed_states).intersection(set(self.allowed_states)):
raise AirflowException(
f"Duplicate values provided as allowed "
f"`{self.allowed_states}` and failed states `{self.failed_states}`"
)
if external_task_id is not None and external_task_ids is not None:
raise ValueError(
'Only one of `external_task_id` or `external_task_ids` may '
'be provided to ExternalTaskSensor; not both.'
)
if external_task_id is not None:
external_task_ids = [external_task_id]
if external_task_ids:
if not total_states <= set(State.task_states):
raise ValueError(
f'Valid values for `allowed_states` and `failed_states` '
f'when `external_task_id` or `external_task_ids` is not `None`: {State.task_states}'
)
if len(external_task_ids) > len(set(external_task_ids)):
raise ValueError('Duplicate task_ids passed in external_task_ids parameter')
elif not total_states <= set(State.dag_states):
raise ValueError(
f'Valid values for `allowed_states` and `failed_states` '
f'when `external_task_id` is `None`: {State.dag_states}'
)
if execution_delta is not None and execution_date_fn is not None:
raise ValueError(
'Only one of `execution_delta` or `execution_date_fn` may '
'be provided to ExternalTaskSensor; not both.'
)
self.execution_delta = execution_delta
self.execution_date_fn = execution_date_fn
self.external_dag_id = external_dag_id
self.external_task_id = external_task_id
self.external_task_ids = external_task_ids
self.check_existence = check_existence
self._has_checked_existence = False
@provide_session
def poke(self, context, session=None):
if self.execution_delta:
dttm = context['logical_date'] - self.execution_delta
elif self.execution_date_fn:
dttm = self._handle_execution_date_fn(context=context)
else:
dttm = context['logical_date']
dttm_filter = dttm if isinstance(dttm, list) else [dttm]
serialized_dttm_filter = ','.join(dt.isoformat() for dt in dttm_filter)
self.log.info(
'Poking for tasks %s in dag %s on %s ... ',
self.external_task_ids,
self.external_dag_id,
serialized_dttm_filter,
)
# In poke mode this will check dag existence only once
if self.check_existence and not self._has_checked_existence:
self._check_for_existence(session=session)
count_allowed = self.get_count(dttm_filter, session, self.allowed_states)
count_failed = -1
if self.failed_states:
count_failed = self.get_count(dttm_filter, session, self.failed_states)
if count_failed == len(dttm_filter):
if self.external_task_ids:
raise AirflowException(
f'Some of the external tasks {self.external_task_ids} '
f'in DAG {self.external_dag_id} failed.'
|
alimanfoo/petl
|
docs/conf.py
|
Python
|
mit
| 7,473
| 0.006423
|
# -*- coding: utf-8 -*-
#
# petl documentation build configuration file, created by
# sphinx-quickstart on Fri Aug 19 11:16:43 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
import petl
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest',
'sphinx.ext.intersphinx', 'sphinx.ext.todo',
'sphinx.ext.coverage', 'sphinx.ext.imgmath',
'sphinx.ext.ifconfig', 'sphinx.ext.viewcode', 'sphinx_issues']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
issues_github_path = 'petl-developers/pet
|
l'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'petl'
copyright = u'2014, Alistair Miles'
# The ver
|
sion info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = petl.__version__
# The full version, including alpha/beta/rc tags.
release = petl.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'examples', 'notes', 'bin', 'dist']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'petldoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'petl.tex', u'petl Documentation',
u'Alistair Miles', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'petl', u'petl Documentation',
[u'Alistair Miles'], 1)
]
# Example configuration for intersphinx: refer to the Python standard library.
# disable temporarily
# intersphinx_mapping = {'http://docs.python.org/': None}
|
fnl/pymonad
|
setup.py
|
Python
|
bsd-3-clause
| 824
| 0.033981
|
from distutils.core import setup
setup(
name='PyMonad',
version='1.3',
author='Jason DeLaat',
author_email='jason.develops@gmail.com',
packages=['pymonad', 'pymonad.test'],
url='https://bitbucket.org/jason_delaat/pymonad',
license=open('LICENSE.txt').read(),
description='Co
|
llection of classes for programming with functors, applicative functors and monads.',
long_description=open('README.txt').read() + open("CHANGES.txt").read(),
classifiers=[ "Intended Audience :: Developers"
, "License ::
|
OSI Approved :: BSD License"
, "Operating System :: OS Independent"
, "Programming Language :: Python :: 2.7"
, "Programming Language :: Python :: 3"
, "Topic :: Software Development"
, "Topic :: Software Development :: Libraries"
, "Topic :: Utilities"
],
)
|
CMSS-BCRDB/RDSV1.0
|
trove/guestagent/strategies/replication/__init__.py
|
Python
|
apache-2.0
| 955
| 0
|
# Copyright 2014 Tesora, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES
|
OR CONDITIONS OF ANY KIND
|
, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from trove.guestagent.strategy import Strategy
from trove.openstack.common import log as logging
LOG = logging.getLogger(__name__)
def get_replication_strategy(replication_driver, ns=__name__):
LOG.debug("Getting replication strategy: %s.", replication_driver)
return Strategy.get_strategy(replication_driver, ns)
|
bridadan/mbed-ls
|
setup.py
|
Python
|
apache-2.0
| 1,786
| 0.004479
|
"""
This module defines the attributes of the
PyPI package for the mbed SDK test suite ecosystem tools
"""
"""
mbed SDK
Copyright (c) 2011-2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Author: Przemyslaw Wirkus <Przemyslaw.Wirkus@arm.com>
"""
import os
from distutils.core import setup
from setuptools import find_packages
DESCRIPTION = "mbed-ls is a Python module that detects and lists mbed-enabled devices connected to the host computer"
OWNER_NAMES = 'Przemyslaw Wirkus, Johan Seferidis, James Crosby'
OWNER_EMAILS = 'Przemyslaw.Wirkus@arm.com, Johan.Seferidis@arm.com, James.Crosby@arm.com'
# Utility function to cat in
|
a file (used for the README)
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(name='mbed-ls',
version='0.1.19',
description=DESCRIPTION,
long_description=read('README.md'),
author=OWNER_NAMES,
|
author_email=OWNER_EMAILS,
maintainer=OWNER_NAMES,
maintainer_email=OWNER_EMAILS,
url='https://github.com/ARMmbed/mbed-ls',
packages=find_packages(),
license="Apache-2.0",
test_suite = 'test',
entry_points={
"console_scripts": [
"mbedls=mbed_lstools:mbedls_main",
],
},
install_requires=["PrettyTable>=0.7.2"])
|
OLAPLINE/TM1py
|
TM1py/Services/PowerBiService.py
|
Python
|
mit
| 6,310
| 0.004596
|
from collections.abc import Iterable
from TM1py.Services import CellService
from TM1py.Services import ElementService
from TM1py.Utils import require_pandas
try:
import pandas as pd
_has_pandas = True
except ImportError:
_has_pandas = False
class PowerBiService:
def __init__(self, tm1_rest):
"""
:param tm1_rest: instance of RestService
"""
self._tm1_rest = tm1_rest
self.cells = CellService(tm1_rest)
self.elements = ElementService(tm1_rest)
@require_pandas
def execute_mdx(self, mdx, **kwargs) -> 'pd.DataFrame':
return self.cells.execute_mdx_dataframe_shaped(mdx, **kwargs)
@require_pandas
def execute_view(self, cube_name, view_name, private, **kwargs) -> 'pd.DataFrame':
return self.cells.execute_view_dataframe_shaped(cube_name, view_name, private, **kwargs)
@require_pandas
def get_member_properties(self, dimension_name: str, hierarchy_name: str, member_selection: Iterable = None,
skip_consolidations: bool = True, attributes: Iterable = None,
skip_parents: bool = False, level_names=None,
parent_attribute: str = None) -> 'pd.DataFrame':
"""
:param dimension_name: Name of the dimension
:param hierarchy_name: Name of the hierarchy in the dimension
:param member_selection: Selection of members. Iterable or valid MDX string
:param skip_consolidations: Boolean flag to skip consolidations
:param attributes: Selection of attributes. Iterable. If None retrieve all.
:param level_names: List of labels for parent columns. If None use level names from TM1.
:param skip_parents: Boolean Flag to skip parent columns.
:param parent_attribute: Attribute to be displayed in parent columns. If None, parent name is used.
:return: pandas DataFrame
"""
if not member_selection:
member_selection = f"{{ [{dimension_name}].[{hierarchy_name}].Members }}"
if skip_consolidations:
member_selection = f"{{ Tm1FilterByLevel({member_selection}, 0) }}"
if not isinstance(member_selection, str):
if isinstance(member_selection, Iterable):
member_selection = "{" + ",".join(f"[{dimension_name}].[{member}]" for member in member_selection) + "}"
else:
raise ValueError("Argument 'element_selection' must be None or str")
if not self.elements.attribute_cube_exists(dimension_name):
raise RuntimeError(self.elements.ELEMENT_ATTRIBUTES_PREFIX + dimension_name + " cube must exist")
members = [tupl[0] for tupl in self.elements.execute_set_mdx(
mdx=member_selection,
element_properties=None,
member_properties=("Name", "UniqueName"),
parent_properties=None)]
element_types = self.elements.get_element_types(
dimension_name=dimension_name,
hierarchy_name=hierarchy_name,
skip_consolidations=skip_consolidations)
df = pd.DataFrame(
data=[(member["Name"], element_types[member["Name"]])
for member
in members
if member["Name"] in element_types],
dtype=str,
columns=[dimension_name, 'Type'])
calculated_members_definition = list()
calculated_members_selection = list()
if not skip_parents:
levels = self.elements.get_levels_count(dimension_name, hierarchy_name)
# potential custom parent names
if not level_names:
level_names = self.elements.get_level_names(dimension_name, hierarchy_name, descending=True)
for parent in range(1, levels, 1):
name_or_attribute = f"Properties('{parent_attribute}')" if parent_attribute else "Name"
member = f"""
MEMBER [{self.elements.ELEMENT_ATTRIBUTES_PREFIX + dimension_name}].[{level_names[parent]}]
AS [{dimension_name}].CurrentMember.{'Parent.' * parent}{name_or_attribute}
"""
calculated_members_definition.append(member)
calculated_members_selection.append(
f"[{self.elements.ELEMENT_ATTRIBUTES_PREFIX + dimension_name}].[{level_names[parent]}]")
if attributes is None:
column_selection = "{Tm1SubsetAll([" + self.elements.ELEMENT_ATTRIBUTES_PREFIX + dimension_name + "])}"
else:
column_selection = "{" + ",".join(
"[" + self.elements.ELEMENT_ATTRIBUTES_PREFIX + dimension_name + "].[" + attribute + "]"
for attribute
in attributes) + "}"
if calculated_members_selection:
column_selection = column_selection + " + {" + ",".join(calcu
|
lated_members_selection) + "}"
member_selection = ",".join(
member["UniqueName"]
for member
in members)
mdx_with_block = ""
if calculated_mem
|
bers_definition:
mdx_with_block = "WITH " + " ".join(calculated_members_definition)
mdx = f"""
{mdx_with_block}
SELECT
{{ {member_selection} }} ON ROWS,
{{ {column_selection} }} ON COLUMNS
FROM [{self.elements.ELEMENT_ATTRIBUTES_PREFIX + dimension_name}]
"""
df_data = self.execute_mdx(mdx)
# override hierarchy name
df_data.rename(columns={hierarchy_name:dimension_name},inplace=True)
# shift levels to right hand side
if not skip_parents:
# skip max level (= leaves)
level_names = level_names[1:]
# iterative approach
for _ in level_names:
rows_to_shift = df_data[df_data[level_names[-1]] == ''].index
if rows_to_shift.empty:
break
df_data.iloc[rows_to_shift, -len(level_names):] = df_data.iloc[rows_to_shift, -len(level_names):].shift(
1, axis=1)
df_data.iloc[:, -len(level_names):] = df_data.iloc[:, -len(level_names):].fillna('')
return pd.merge(df, df_data, on=dimension_name).drop_duplicates()
|
maxolasersquad/ninja-ide
|
ninja_ide/core/__init__.py
|
Python
|
gpl-3.0
| 712
| 0.001404
|
# -*- coding: utf-8 -*-
#
# This file is part of NINJA-IDE (http://ninja-ide.org).
#
# NINJA-IDE is free software; you can redistribute it and/
|
or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# any later version.
#
# NINJA-IDE is distributed in the hope that it will be useful,
#
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NINJA-IDE; If not, see <http://www.gnu.org/licenses/>.
from .core import *
|
stephane-martin/salt-debian-packaging
|
salt-2016.3.3/salt/modules/openbsdservice.py
|
Python
|
apache-2.0
| 8,008
| 0.000999
|
# -*- coding: utf-8 -*-
'''
The service module for OpenBSD
.. important::
If you feel that Salt should be using this module to manage services on a
minion, and it is using a different module (or gives an error similar to
*'service.start' is not available*), see :ref:`here
<module-provider-override>`.
'''
# Import python libs
from __future__ import absolute_import
import os
import logging
# Import 3rd-party libs
import salt.ext.six as six
from salt.ext.six.moves import map # pylint: disable=import-error,redefined-builtin
# Import Salt libs
import salt.utils
log = logging.getLogger(__name__)
# XXX enable/disable support would be nice
# Define the module's virtual name
__virtualname__ = 'service'
__func_alias__ = {
'reload_': 'reload'
}
def __virtual__():
'''
Only work on OpenBSD
'''
if __grains__['os'] == 'OpenBSD' and os.path.exists('/etc/rc.d/rc.subr'):
krel = list(list(map(int, __grains__['kernelrelease'].split('.'))))
# The -f flag, used to force a script to run even if disabled,
# was added after the 5.0 release.
# the rcctl(8) command is the preferred way to manage services.
if krel[0] > 5 or (krel[0] == 5 and krel[1] > 0):
if not os.path.exists('/usr/sbin/rcctl'):
return __virtualname__
return (False, 'The openbsdservice execution module cannot be loaded: '
'only available on OpenBSD systems.')
def start(name):
'''
Start the specified service
CLI Example:
.. code-block:: bash
salt '*' service.start <service name>
'''
cmd = '/etc/rc.d/{0} -f start'.format(name)
return not __salt__['cmd.retcode'](cmd)
def stop(name):
'''
Stop the specified service
CLI Example:
.. code-block:: bash
salt '*' service.stop <service name>
'''
cmd = '/etc/rc.d/{0} -f stop'.format(name)
return not __salt__['cmd.retcode'](cmd)
def restart(name):
'''
Restart the named service
CLI Example:
.. code-block:: bash
salt '*' service.restart <service name>
'''
cmd = '/etc/rc.d/{0} -f restart'.format(name)
return not __salt__['cmd.retcode'](cmd)
def status(name, sig=None):
'''
Return the status for a service, returns a bool whether the service is
running.
CLI Example:
.. code-block:: bash
salt '*' service.status <service name>
'''
if sig:
return bool(__salt__['status.pid'](sig))
cmd = '/etc/rc.d/{0} -f check'.format(name)
return not __salt__['cmd.retcode'](cmd, ignore_retcode=True)
def reload_(name):
'''
.. versionadded:: 2014.7.0
Reload the named service
CLI Example:
.. code-block:: bash
salt '*' service.reload <service name>
'''
cmd = '/etc/rc.d/{0} -f reload'.format(name)
return not __salt__['cmd.retcode'](cmd)
import re
service_flags_regex = re.compile(r'^\s*(\w[\d\w]*)_flags=(?:(NO)|.*)$')
pkg_scripts_regex = re.compile(r'^\s*pkg_scripts=\'(.*)\'$')
start_daemon_call_regex = re.compile(r'(\s*start_daemon(?!\(\)))')
start_daemon_parameter_regex = re.compile(r'(?:\s+(\w[\w\d]*))')
def _get_
|
rc():
'''
Returns a dict where the key is the daemon's name and
the value a boolean indicating its status (True: enabled or False: disabled).
Check the daemons star
|
ted by the system in /etc/rc and
configured in /etc/rc.conf and /etc/rc.conf.local.
Also add to the dict all the localy enabled daemons via $pkg_scripts.
'''
daemons_flags = {}
try:
# now read the system startup script /etc/rc
# to know what are the system enabled daemons
with salt.utils.fopen('/etc/rc', 'r') as handle:
lines = handle.readlines()
except IOError:
log.error('Unable to read /etc/rc')
else:
for line in lines:
match = start_daemon_call_regex.match(line)
if match:
# the matched line is a call to start_daemon()
# we remove the function name
line = line[len(match.group(1)):]
# we retrieve each daemon name from the parameters of start_daemon()
for daemon in start_daemon_parameter_regex.findall(line):
# mark it as enabled
daemons_flags[daemon] = True
# this will execute rc.conf and rc.conf.local
# used in /etc/rc at boot to start the daemons
variables = __salt__['cmd.run']('(. /etc/rc.conf && set)',
clean_env=True,
output_loglevel='quiet',
python_shell=True).split('\n')
for var in variables:
match = service_flags_regex.match(var)
if match:
# the matched var look like daemon_name_flags=, we test its assigned value
# NO: disabled, everything else: enabled
# do not create a new key if the service hasn't been found in /etc/rc, see $pkg_scripts
if match.group(2) == 'NO':
daemons_flags[match.group(1)] = False
else:
match = pkg_scripts_regex.match(var)
if match:
# the matched var is pkg_scripts
# we can retrieve the name of each localy enabled daemon that wasn't hand started via /etc/rc
for daemon in match.group(1).split():
# create a new key and mark it as enabled
daemons_flags[daemon] = True
return daemons_flags
def available(name):
'''
.. versionadded:: 2014.7.0
Returns ``True`` if the specified service is available, otherwise returns
``False``.
CLI Example:
.. code-block:: bash
salt '*' service.available sshd
'''
path = '/etc/rc.d/{0}'.format(name)
return os.path.isfile(path) and os.access(path, os.X_OK)
def missing(name):
'''
.. versionadded:: 2014.7.0
The inverse of service.available.
Returns ``True`` if the specified service is not available, otherwise returns
``False``.
CLI Example:
.. code-block:: bash
salt '*' service.missing sshd
'''
return not available(name)
def get_all():
'''
.. versionadded:: 2014.7.0
Return all available boot services
CLI Example:
.. code-block:: bash
salt '*' service.get_all
'''
services = []
if not os.path.isdir('/etc/rc.d'):
return services
for service in os.listdir('/etc/rc.d'):
# this will remove rc.subr and all non executable files
if available(service):
services.append(service)
return sorted(services)
def get_enabled():
'''
.. versionadded:: 2014.7.0
Return a list of service that are enabled on boot
CLI Example:
.. code-block:: bash
salt '*' service.get_enabled
'''
services = []
for daemon, is_enabled in six.iteritems(_get_rc()):
if is_enabled:
services.append(daemon)
return sorted(set(get_all()) & set(services))
def enabled(name, **kwargs):
'''
.. versionadded:: 2014.7.0
Return True if the named service is enabled, false otherwise
CLI Example:
.. code-block:: bash
salt '*' service.enabled <service name>
'''
return name in get_enabled()
def get_disabled():
'''
.. versionadded:: 2014.7.0
Return a set of services that are installed but disabled
CLI Example:
.. code-block:: bash
salt '*' service.get_disabled
'''
services = []
for daemon, is_enabled in six.iteritems(_get_rc()):
if not is_enabled:
services.append(daemon)
return sorted(set(get_all()) & set(services))
def disabled(name):
'''
.. versionadded:: 2014.7.0
Return True if the named service is disabled, false otherwise
CLI Example:
.. code-block:: bash
salt '*' service.disabled <service name>
'''
return name in get_disabled()
|
leohmoraes/weblate
|
weblate/trans/views/basic.py
|
Python
|
gpl-3.0
| 14,336
| 0
|
# -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2015 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <http://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from django.shortcuts import render, redirect
from django.utils.translation import ugettext as _
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.db.models import Sum, Count, Q
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.core.urlresolvers import reverse
from django.utils.safestring import mark_safe
import django.views.defaults
from weblate.trans.models import (
Project, SubProject, Translation, Check,
Dictionary, Change, Unit, WhiteboardMessage
)
from weblate.requirements import get_versions, get_optional_versions
from weblate.lang.models import Language
from weblate.trans.forms import (
get_upload_form, SearchForm,
AutoForm, ReviewForm, NewLanguageForm,
UserManageForm,
)
from weblate.accounts.models im
|
port Profile, notify_new_language
from weblate.trans.views.helper import (
get_project, get_subproject, get_translation,
try_set_language,
)
import weblate
import datetime
from urllib import urlencode
def home(request):
"""
Home page of Weblate showing list of projects, stats
and user links if logged in.
"""
if 'show_set_password' in request.session:
messages
|
.warning(
request,
_(
'You have activated your account, now you should set '
'the password to be able to login next time.'
)
)
return redirect('password')
wb_messages = WhiteboardMessage.objects.all()
projects = Project.objects.all_acl(request.user)
if projects.count() == 1:
projects = SubProject.objects.filter(
project=projects[0]
).select_related()
# Warn about not filled in username (usually caused by migration of
# users from older system
if not request.user.is_anonymous() and request.user.first_name == '':
messages.warning(
request,
_('Please set your full name in your profile.')
)
# Some stats
top_translations = Profile.objects.order_by('-translated')[:10]
top_suggestions = Profile.objects.order_by('-suggested')[:10]
last_changes = Change.objects.last_changes(request.user)[:10]
return render(
request,
'index.html',
{
'projects': projects,
'top_translations': top_translations.select_related('user'),
'top_suggestions': top_suggestions.select_related('user'),
'last_changes': last_changes,
'last_changes_rss': reverse('rss'),
'last_changes_url': '',
'search_form': SearchForm(),
'whiteboard_messages': wb_messages,
}
)
def search(request):
"""
Performs site-wide search on units.
"""
search_form = SearchForm(request.GET)
context = {
'search_form': search_form,
}
if search_form.is_valid():
units = Unit.objects.search(
None,
search_form.cleaned_data,
).select_related(
'translation',
)
# Filter results by ACL
acl_projects, filtered = Project.objects.get_acl_status(request.user)
if filtered:
units = units.filter(
translation__subproject__project__in=acl_projects
)
limit = request.GET.get('limit', 50)
page = request.GET.get('page', 1)
paginator = Paginator(units, limit)
try:
units = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
units = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of
# results.
units = paginator.page(paginator.num_pages)
context['page_obj'] = units
context['title'] = _('Search for %s') % (
search_form.cleaned_data['q']
)
context['query_string'] = search_form.urlencode()
context['search_query'] = search_form.cleaned_data['q']
else:
messages.error(request, _('Invalid search query!'))
return render(
request,
'search.html',
context
)
def show_engage(request, project, lang=None):
# Get project object, skipping ACL
obj = get_project(request, project, skip_acl=True)
# Handle language parameter
language = None
if lang is not None:
language = try_set_language(lang)
context = {
'object': obj,
'project': obj,
'languages': obj.get_language_count(),
'total': obj.get_total(),
'percent': obj.get_translated_percent(language),
'url': obj.get_absolute_url(),
'language': language,
}
# Render text
if language is None:
status_text = _(
'<a href="%(url)s">Translation project for %(project)s</a> '
'currently contains %(total)s strings for translation and is '
'<a href="%(url)s">being translated into %(languages)s languages'
'</a>. Overall, these translations are %(percent)s%% complete.'
)
else:
# Translators: line of text in engagement widget, please use your
# language name instead of English
status_text = _(
'<a href="%(url)s">Translation project for %(project)s</a> into '
'English currently contains %(total)s strings for translation and '
'is %(percent)s%% complete.'
)
if 'English' in status_text:
status_text = status_text.replace('English', language.name)
context['status_text'] = mark_safe(status_text % context)
return render(
request,
'engage.html',
context
)
def show_project(request, project):
obj = get_project(request, project)
dict_langs = Dictionary.objects.filter(
project=obj
).values_list(
'language', flat=True
).distinct()
dicts = []
for language in Language.objects.filter(id__in=dict_langs):
dicts.append(
{
'language': language,
'count': Dictionary.objects.filter(
language=language,
project=obj
).count(),
}
)
last_changes = Change.objects.prefetch().filter(
Q(translation__subproject__project=obj) |
Q(dictionary__project=obj)
)[:10]
return render(
request,
'project.html',
{
'object': obj,
'project': obj,
'dicts': dicts,
'last_changes': last_changes,
'last_changes_rss': reverse(
'rss-project',
kwargs={'project': obj.slug}
),
'last_changes_url': urlencode(
{'project': obj.slug}
),
'add_user_form': UserManageForm(),
}
)
def show_subproject(request, project, subproject):
obj = get_subproject(request, project, subproject)
last_changes = Change.objects.prefetch().filter(
translation__subproject=obj
)[:10]
new_lang_form = NewLanguageForm()
return render(
request,
'subproject.html',
{
'object': obj,
'project': obj.project,
'translations': obj.translation_set.enabled(),
'show_languag
|
sametmax/Django--an-app-at-a-time
|
ignore_this_directory/django/contrib/flatpages/migrations/0001_initial.py
|
Python
|
mit
| 1,710
| 0.004678
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sites', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='FlatPage',
fields=[
('id', models.AutoField(verbose_na
|
me='ID', serialize=False, auto_created=True, primary_key=True)),
('url', models.CharField(max_length=100, verbose_name='U
|
RL', db_index=True)),
('title', models.CharField(max_length=200, verbose_name='title')),
('content', models.TextField(verbose_name='content', blank=True)),
('enable_comments', models.BooleanField(default=False, verbose_name='enable comments')),
('template_name', models.CharField(
help_text=(
"Example: 'flatpages/contact_page.html'. If this isn't provided, the system will use "
"'flatpages/default.html'."
), max_length=70, verbose_name='template name', blank=True
)),
('registration_required', models.BooleanField(
default=False, help_text='If this is checked, only logged-in users will be able to view the page.',
verbose_name='registration required'
)),
('sites', models.ManyToManyField(to='sites.Site', verbose_name='sites')),
],
options={
'ordering': ('url',),
'db_table': 'django_flatpage',
'verbose_name': 'flat page',
'verbose_name_plural': 'flat pages',
},
bases=(models.Model,),
),
]
|
Irdroid/httpie
|
httpie/utils.py
|
Python
|
bsd-3-clause
| 1,164
| 0.000859
|
from __future__ import division
def humanize_bytes(n, precision=2):
# Author: Doug Latornell
# Licence: MIT
# URL: http://code.activestate.com/recipes/577081/
"""Return a humanized string representation of a number of bytes.
Assumes `from __future__ import division`.
|
>>> humanize_bytes(1)
'1 B'
>>> humanize_bytes(1024, precision=1)
'1.0 kB'
>>> humanize_bytes(1024 * 123, precision=1)
'123.0 kB'
>>> h
|
umanize_bytes(1024 * 12342, precision=1)
'12.1 MB'
>>> humanize_bytes(1024 * 12342, precision=2)
'12.05 MB'
>>> humanize_bytes(1024 * 1234, precision=2)
'1.21 MB'
>>> humanize_bytes(1024 * 1234 * 1111, precision=2)
'1.31 GB'
>>> humanize_bytes(1024 * 1234 * 1111, precision=1)
'1.3 GB'
"""
abbrevs = [
(1 << 50, 'PB'),
(1 << 40, 'TB'),
(1 << 30, 'GB'),
(1 << 20, 'MB'),
(1 << 10, 'kB'),
(1, 'B')
]
if n == 1:
return '1 B'
for factor, suffix in abbrevs:
if n >= factor:
break
# noinspection PyUnboundLocalVariable
return '%.*f %s' % (precision, n / factor, suffix)
|
DarthMaulware/EquationGroupLeaks
|
Leak #4 - Don't Forget Your Base/EQGRP-Auction-File/Linux/etc/autoutils.py
|
Python
|
unlicense
| 22,459
| 0.005699
|
#!/bin/env python
import os
import re
import sys
import time
import pickle
import random
import socket
import os.path
import traceback
import subprocess
from optparse import OptionParser
VERSION='1.1.0.7'
COLOR = {
'success' : '\33[2;32m', # Green
'fail' : '\033[2;31m', # Red
'bad' : '\033[31;07m', # Red Highlight
'warn' : '\033[3;43m', # Yellow Highlight
'normal' : '\033[0;39m', # Black
'note' : '\033[0;34m' # NOPEN Blue
}
class autoutils:
def __init__(self):
# Set the Colors
self.COLOR_SUCCESS = COLOR['success']
self.COLOR_FAILURE = COLOR['fail']
self.COLOR_BADFAILURE = COLOR['bad']
self.COLOR_WARNING = COLOR['warn']
self.COLOR_NORMAL = COLOR['normal']
self.COLOR_NOTE = COLOR['note']
# Set directories
self.opdir = '/current'
self.opup = '%s/up' % self.opdir
self.opbin = '%s/bin' % self.opdir
self.opetc = '%s/etc' % self.opdir
self.opdown = '%s/down' % self.opdir
self.optmp = '%s/tmp' % self.opdir
# Set Python module path
sys.path = [self.opetc,self.optmp] + sys.path
# must have this
if not os.environ.has_key('NOPEN_AUTOPORT'):
sys.stderr.write('Could not find NOPEN_AUTOPORT variable. ' +
'Must call from NOPEN -gs.\n')
sys.exit(1)
# Nopen ENV Variables
self.nopen_autoport = int(os.environ['NOPEN_AUTOPORT'])
self.nopen_serverinfo = os.environ['NOPEN_SERVERINFO']
self.nopen_clientver = os.environ['NOPEN_CLIENTVER']
self.nopen_mylog = os.environ['NOPEN_MYLOG']
self.nopen_rhostname = os.environ['NOPEN_RHOSTNAME']
self.nopen_nhome = os.environ['NHOME']
self.nopen_mypid = os.environ['NOPEN_MYPID']
self.optargetcommands = os.path.join(
self.opdown, '%s_targetcommands' % self.nopen_rhostname)
# This is the nopen autoport socket
self.connected = False
self.nopen_socket = None
self.nopen = None
self.pid = os.getpid()
self.hidden_dir = ''
self.status = {}
self.statusFile = os.path.join(self.optmp,
'%s.%s_pystatus' % (self.nopen_rhostname, self.nopen_mypid))
self.stateFile = os.path.join(self.optmp, '%s_pystate' % self.nopen_rhostname)
self.state = {
'linux': False,
'solaris': False,
'hpux': False,
'hpux_it': False
}
self.tunnel = None
self.perl_return = False
self.perl_sock_file = ''
return
#
# Saves self.state dictionary into a file
#
def saveState(self):
f = open(self.stateFile, 'wb')
pickle.dump(self.state, f)
f.close()
#
# Loads a previously saved state
#
def loadState(self):
if os.path.exists(self.stateFile):
f = open(self.stateFile, 'rb')
self.state = pickle.load(f)
f.close()
#
# Yea...
#
def help(self, word):
print ' ___ '
print ' |_____ | '
print ' || | | '
print ' || | | '
print ' ||O O| | Looks like you\'re trying to %s' % str(word).upper()
print ' || | | Want some help?'
print ' || U | | '
print ' || | || '
print ' || | || '
print ' ||| | || '
print ' ||| | || '
print ' ||| | || '
print ' ||| | || '
print ' |||__| || '
print ' ||_____|| '
print ' |_______| '
return
#
# Takes out any autoutils stuff and then calls the parser's
# parse_args() method.
# args should be an array without the program name (sys.argv[1:])
#
def parseArgs(self, parser, args, values=None):
if len(args) > 0:
if args[0].startswith('perl:'):
self.perl_return = True
self.perl_sock_file = sys.argv[1].split(':', 1)[1]
args = args[1:]
return parser.parse_args(args, values)
#
# Makes the connection to the NOPEN autoport.
# This takes care of the forking too.
#
def connect(self):
os.close(sys.stdout.fileno())
sys.stdout = sys.stderr
if not self.connected:
self.nopen_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.nopen_socket.connect(('127.0.0.1', self.nopen_autoport))
self.nopen = self.nopen_socket.makefile()
self.connected = True
pid = os.fork()
if pid != 0:
self.nopen.close()
self.nopen_sock
|
et.close()
sys.exit(0)
self.pid = os.getpid()
self.nopen.write('#NOGS\n')
self.nopen.flush()
# going to run -status every time because something could change
# between runs and don't want to get caught with something bad.
self.parsestatus()
#if not os.path.exists(self.statusFile):
# self.parsestatus()
#else:
# f = open(self.statusFile, 'rb')
# self.status = pickle.
|
load(f)
# f.close()
self.loadState()
self.saveState()
return self.nopen
#
# Does any final stuff with the output, like sending it to a calling
# perl script, then returns back a string of the argument, or unchanged
# if mkstr is False.
#
def finish(self, ret=None, mkstr=True):
if self.connected:
self.cleanup()
if not ret:
ret = ''
if mkstr:
if ret.__class__() == []:
ret_str = '\n'.join(ret) + '\n'
else:
ret_str = str(ret)
ret = ret_str
if self.perl_return:
try:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM);
sock.connect((self.perl_sock_file))
sock.send(ret_str)
sock.close()
except:
print 'Could not connect to %s' % self.perl_sock_file
return ret
#
# Returns a list of any hidden directories found.
#
def getHidden(self, refresh=False):
tmpfile_chrs = '[A-Za-z0-9_-]' * 6
parent_dirs_old = [
'/var/tmp',
'/lib',
'/dev',
'/etc',
'/',
]
dirs_old = [
'.%s' % ('[A-Fa-f0-9]' * 16),
'.tmp%s' % tmpfile_chrs,
]
dir_regexs = [
'/var/spool/.cron%s.bak' % tmpfile_chrs,
'/var/log/.fsck%s.bak' % tmpfile_chrs,
'/lib/.security%s.lock' % tmpfile_chrs,
'/dev/.usb%s.lock' % tmpfile_chrs,
'/etc/.dev%s.save' % tmpfile_chrs,
'/var/tmp/.%s-unix' % tmpfile_chrs,
'/.opt%s.save' % tmpfile_chrs,
]
for pd in parent_dirs_old:
for d in dirs_old:
dir_regexs.append(os.path.join(pd, d))
parent_dirs = []
for dr in dir_regexs:
d = os.path.dirname(dr)
if not d in parent_dirs:
parent_dirs.append(d)
lsfile = os.path.join(self.opdown,
'stoichunt.%s' % self.nopen_rhostname)
if not os.path.exists(lsfile):
refresh = True
if refresh:
self.preserveFiles(lsfile)
output, nopenlines, outputlines = self.doit(
'-ls %s > T:%s' % (' '.join(parent_dirs), lsfile))
else:
outputlines = file_readlines(lsfile)
files = [x.strip('\n').split(None, 9)[-1] for x in outputlines]
dirs = []
for f in files:
for r in dir_regexs:
if re.match(r, f):
dirs.append((f, r))
if not refresh:
# do a listing of the specific dir's regex to confirm it's there,
# only if it wasn't just done
tolist = ' '.join(
|
Azure/azure-sdk-for-python
|
sdk/metricsadvisor/azure-ai-metricsadvisor/azure/ai/metricsadvisor/_generated/aio/_metrics_advisor.py
|
Python
|
mit
| 3,345
| 0.003886
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights res
|
erved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
|
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core import AsyncPipelineClient
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
from ._configuration import MetricsAdvisorConfiguration
from .operations import MetricsAdvisorOperationsMixin
from .. import models
class MetricsAdvisor(MetricsAdvisorOperationsMixin):
"""Microsoft Azure Metrics Advisor REST API (OpenAPI v2).
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param endpoint: Supported Cognitive Services endpoints (protocol and hostname, for example: https://:code:`<resource-name>`.cognitiveservices.azure.com).
:type endpoint: str
"""
def __init__(
self,
credential: "AsyncTokenCredential",
endpoint: str,
**kwargs: Any
) -> None:
base_url = '{endpoint}/metricsadvisor/v1.0'
self._config = MetricsAdvisorConfiguration(credential, endpoint, **kwargs)
self._client = AsyncPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
async def _send_request(self, http_request: HttpRequest, **kwargs: Any) -> AsyncHttpResponse:
"""Runs the network request through the client's chained policies.
:param http_request: The network request you want to make. Required.
:type http_request: ~azure.core.pipeline.transport.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to True.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.pipeline.transport.AsyncHttpResponse
"""
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
http_request.url = self._client.format_url(http_request.url, **path_format_arguments)
stream = kwargs.pop("stream", True)
pipeline_response = await self._client._pipeline.run(http_request, stream=stream, **kwargs)
return pipeline_response.http_response
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "MetricsAdvisor":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
|
wileeam/airflow
|
tests/providers/jdbc/operators/test_jdbc.py
|
Python
|
apache-2.0
| 1,539
| 0.0013
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from unittest.mock import patch
from airflow.providers.jdbc.operators.jdbc import JdbcOperator
class TestJdbcOperator(unittest.TestCase):
def setUp(self):
|
self.kwargs = dict(
sql='sql',
task_id='test_jdbc_operator',
dag=None
)
@patch('airflow.provi
|
ders.jdbc.operators.jdbc.JdbcHook')
def test_execute(self, mock_jdbc_hook):
jdbc_operator = JdbcOperator(**self.kwargs)
jdbc_operator.execute(context={})
mock_jdbc_hook.assert_called_once_with(jdbc_conn_id=jdbc_operator.jdbc_conn_id)
mock_jdbc_hook.return_value.run.assert_called_once_with(
jdbc_operator.sql, jdbc_operator.autocommit, parameters=jdbc_operator.parameters)
|
JoePelz/SAM
|
spec/python/test_integrity.py
|
Python
|
gpl-3.0
| 4,602
| 0.002825
|
from spec.python import db_connection
from sam import constants
from sam import common
from sam import integrity
import traceback
mysql_params = constants.dbconfig.copy()
sqlite_params = constants.dbconfig.copy()
mysql_params['dbn'] = 'mysql'
mysql_params['db'] = 'samapper_test'
sqlite_params['dbn'] = 'sqlite'
sqlite_params['db'] = '/tmp/sam_test.db'
db_mysql, _ = common.get_db(mysql_params)
db_sqlite, _2 = common.get_db(sqlite_params)
def test_mysql_access():
print(mysql_params)
assert integrity.check_and_fix_db_access_MySQL(mysql_params) == 0
def test_sqlite_access():
assert integrity.check_and_fix_db_access_SQLite(sqlite_params) == 0
def test_mysql_shared_tables():
try:
errors = integrity.check_shared_tables(db_mysql)
integrity.fix_shared_tables(db_mysql, errors)
except:
traceback.print_exc()
assert False
def test_sqlite_shared_tables():
try:
errors = integrity.check_shared_tables(db_sqlite)
integrity.fix_shared_tables(db_sqlite, errors)
except:
traceback.print_exc()
assert False
def test_mysql_UDF():
integrity.fix_UDF_MySQL(db_mysql)
rows = db_mysql.query('SELECT decodeI
|
P(1234567890)')
assert rows.first().values()[0] == '73.150.2.210'
rows = db_mysql.query('SELECT encodeIP(12,34,56,78)')
assert rows.first().values()[0] == 203569230L
def test_sqlite_UDF():
integrity.fix_UDF_SQLite(db_sqlite)
rows = db_sqlite.query('SELECT decodeIP(1234567890)')
assert rows.first().values()[0] == '73.150.2.210'
rows = db_sqlite.query('SELECT encodeIP(12,34,56,78)')
assert rows.first().values()[0] == 203569230L
def test
|
_mysql_def_subscription():
try:
errors = integrity.check_default_subscription(db_mysql)
integrity.fix_default_subscription(db_mysql, errors)
except:
traceback.print_exc()
assert False
def test_sqlite_def_subscription():
try:
errors = integrity.check_default_subscription(db_sqlite)
integrity.fix_default_subscription(db_sqlite, errors)
except:
traceback.print_exc()
assert False
def test_mysql_subscriptions():
try:
errors = integrity.check_default_subscription(db_mysql)
integrity.fix_default_subscription(db_mysql, errors)
except:
traceback.print_exc()
assert False
def test_sqlite_subscriptions():
try:
errors = integrity.check_subscriptions(db_sqlite)
integrity.fix_subscriptions(db_sqlite, errors)
except:
traceback.print_exc()
assert False
def test_mysql_settings():
try:
errors = integrity.check_settings(db_mysql)
integrity.fix_settings(db_mysql, errors)
except:
traceback.print_exc()
assert False
def test_sqlite_settings():
try:
errors = integrity.check_settings(db_sqlite)
integrity.fix_settings(db_sqlite, errors)
except:
traceback.print_exc()
assert False
def test_mysql_datasources():
try:
errors = integrity.check_data_sources(db_mysql)
integrity.fix_data_sources(db_mysql, errors)
except:
traceback.print_exc()
assert False
def test_sqlite_datasources():
try:
errors = integrity.check_data_sources(db_sqlite)
integrity.fix_data_sources(db_sqlite, errors)
except:
traceback.print_exc()
assert False
def test_mysql_session():
try:
errors = integrity.check_sessions_table(db_mysql)
integrity.fix_sessions_table(db_mysql, errors)
except:
traceback.print_exc()
assert False
def test_sqlite_session():
try:
errors = integrity.check_sessions_table(db_sqlite)
integrity.fix_sessions_table(db_sqlite, errors)
except:
traceback.print_exc()
assert False
def test_check_and_fix_integrity_mysql():
mysqlconfig = {
'dbn': "mysql",
'db': 'sam_integrity_test',
'host': "localhost",
'user': "root",
'pw': constants.dbconfig['pw'],
'port': 3306
}
db, dbq = common.get_db(mysqlconfig)
try:
integrity.check_and_fix_integrity(db, mysqlconfig)
finally:
db.query("drop database sam_integrity_test")
def test_check_and_fix_integrity_sqlite():
sqliteconfig = {
'dbn': "sqlite",
'db': '',
'host': "localhost",
'user': "root",
'pw': constants.dbconfig['pw'],
'port': 3306
}
db, dbq = common.get_db(sqliteconfig)
integrity.check_and_fix_integrity(db, sqliteconfig)
|
iwob/pysv
|
pysv/smt_common.py
|
Python
|
mit
| 3,263
| 0.005823
|
from pysv import ast_utils
from pysv import ssa_converter
from pysv import utils
from pysv import loops
from pysv import interm
from pysv.smt2 import ProgramSmt2
def get_code_in_smt2(code, code_pre, code_post, program_vars, env, holes_decls = None):
|
"""Converts source codes of specification elements into program in SMT-LIB 2.0 language.
:param code: (str) Source code (in arbitrary language) of the program.
:param code_pre: (str) Source code (in arbitrary language) of the expression representing all *pre-conditions*.
:param code_post
|
: (str) Source code (in arbitrary language) of the expression representing all *post-conditions*.
:param program_vars: (ProgramVars) Information about variables and their types.
:param env: (Options) Options of the currently realized task.
:param holes_decls: (list[HoleDecl]) Declarations of all holes in the case of synthesis scenario.
:return: (ProgramSmt2) Program in the SMT-LIB 2.0 language.
"""
if env.lang == utils.Options.PYTHON:
return convert_py_to_smt2(code, code_pre, code_post, program_vars, env, holes_decls)
elif env.lang == utils.Options.SMT2:
code = processHoles(code, holes_decls)
main = ProgramSmt2([code]) # this works because SMT lib is a functional language
return main, ProgramSmt2([code_pre]), ProgramSmt2([code_post])
else:
raise Exception(str(env.lang) + ": unsupported language!")
def processHoles(code, holes_decls):
"""Finds all hole symbols in the SMT-LIB code of the program and replaces them with
appropriate references to their synthesis functions. Does nothing in case of
verification.
:param code: (str) Source code (in arbitrary language) of the program.
:param holes_decls: (list[HoleDecl]) Declarations of all holes.
:return: (str) Source code with SMT replacement of holes by appropriate functions.
"""
if holes_decls is None or len(holes_decls) == 0:
return code
else:
code = code.replace(")", " )")
for h in holes_decls:
if h.id in code:
code = code.replace(h.id+" ", h.get_function_call()+" ")
code = code.replace(" )", ")")
return code
def convert_py_to_smt2(code, code_pre, code_post, program_vars, env, holes_decls):
# Python source code --> internal abstract program representation.
ib, pre, post = ast_utils.process_source_code(code, code_pre, code_post, holes_decls)
utils.logger.debug('\n\n******** PROGRAM REPR ********:\n' + str(ib))
# Handling of loops
ib = interm.ProgramInterm(loops.unroll_loops(ib.src, n=env.loop_unrolling_level))
# Abstract program representation --> abstract program representation in SSA form.
if env.ssa_enabled:
ib, post = ssa_converter.convert(ib, post, program_vars)
program_vars.add_marked_variables(ib.src.collect_variables()) # Updating variable list
# Producing SMT-LIB code for program's elements.
ib_smt2 = ib.to_smt2(env)
pre_smt2 = pre.to_smt2(env)
post_smt2 = post.to_smt2(env)
return ib_smt2, pre_smt2, post_smt2
def write_script_to_file(script, env):
if env.save_script_to_file:
with open('script.smt2', 'w') as file_:
file_.write(script)
|
Tenchi2xh/cursebox
|
cursebox/__main__.py
|
Python
|
mit
| 1,969
| 0.000542
|
# -*- encoding: utf-8 -*-
from .cursebox import Cursebox
from .colors import colors
from .constants import EVENT_SKIP
from .utils import hex_to_rgb
logo = [u" █ ",
u"█▀█ █ █ █▀█ █▀▀ █▀█ █▀▄ █▀█ █▄█",
u"█ █ █ █ ▀▀█ █▄█ █ █ █ █ ▄█▄",
u"█▄█ █▄█ █ ▄▄█ █▄▄ █▄█ █▄█ █ █"]
grey = colors.from_rgb((127, 127, 127))
rainbow = ["ffffff", "ffaaaa", "ff5555", "ff0000",
"ff6d00", "ffda00", "b6ff00", "48ff00",
"00ff24", "00ff91", "00ffff", "0091ff",
"0024ff", "4800ff", "b600ff", "ff00da",
"ff006d", "ff0000", "ff5555", "ffaaaa"]
prompt = "cursebox v1.0 - Press any key to exit"
def demo():
l_width, l_height = len(logo[0]), len(logo)
x_s = 0.4
palette = [colors.from_rgb(hex_to_rgb(hex)) for hex in rainbow]
padding = [colors.white] * (int(x_s * l_width) + 3)
palette = padding + palette + padding
with Cursebox(blocking_events=Fals
|
e) as cb:
widt
|
h, height = cb.width, cb.height
def draw_logo(t):
for y0, line in enumerate(logo):
y1 = (height - l_height) / 2 + y0
for x0, char in enumerate(line):
x1 = x0 + (width - l_width) / 2
offset = int(t + y0 + x_s * x0) % len(palette)
cb.put(x=x1, y=y1, text=char,
fg=palette[offset],
bg=colors.transparent)
t = 0
l = 100
cb.put(x=(width - len(prompt)) / 2,
y=(height + l_height) / 2 + 1,
text=prompt, fg=grey, bg=colors.transparent)
while cb.poll_event() == EVENT_SKIP:
draw_logo(t if t < len(palette) else 0)
t += 1
if t > l + len(palette):
t = 0
if __name__ == "__main__":
demo()
|
alexis-roche/nireg
|
nireg/histogram_registration.py
|
Python
|
bsd-3-clause
| 23,075
| 0.000563
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
Intensity-based image registration
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import numpy as np
import scipy.ndimage as nd
from nibabel import Nifti1Image
from .optimizer import configure_optimizer
from .affine import inverse_affine, subgrid_affine, affine_transforms
from .chain_transform import ChainTransform
from .similarity_measures import similarity_measures as builtin_simi
from ._register import _joint_histogram
MAX_INT = np.iinfo(np.intp).max
# Module globals
VERBOSE = os.environ.get('NIREG_DEBUG_PRINT', False) # enables online print statements
CLAMP_DTYPE = 'short' # do not edit
NPOINTS = 64 ** 3
# Dictionary of interpolation methods (partial volume, trilinear,
# random)
interp_methods = {'pv': 0, 'tri': 1, 'rand': -1}
def unpack(val, numtype):
try:
tmp = numtype(val)
out = (tmp, tmp)
except:
out = (numtype(val[0]), numtype(val[1]))
return out
class HistogramRegistration(object):
"""
A class to reprensent a generic intensity-based image registration
algorithm.
"""
def __init__(self, from_img, to_img,
from_mask=None,
to_mask=None,
bins=256,
spacing=None,
similarity='crl1',
interp='pv',
sigma=0,
renormalize=False,
dist=None):
"""Creates a new histogram registration object.
Parameters
----------
from_img : nibabel image
`From` image
to_img : nibabel image
`To` image
from_mask : array-like
Mask to apply to the `from` image
to_mask : array-like
Mask to apply to the `to` image
bins : integer or sequence
Number of histogram bins to represent the `from` and `to`
image, respectively. If float, the same binning is applied
to both images.
spacing : None or sequence
A sequence of three integers representing the subsampling
factors applied to the `from` image grid for faster
similarity computation. If None, the spacing is set
automatically so as to trade off between registration
accuracy and computation time.
similarity : str or callable
Cost-function for assessing image similarity. If a string,
one of 'cc': correlation coefficient, 'cr': correlation
ratio, 'crl1': L1-norm based correlation ratio, 'mi': mutual
information, 'nmi': normalized mutual information, 'slr':
supervised log-likelihood ratio. If a callable, it should
take a two-dimensional array representing the image joint
histogram as an input and return a float.
dist: None or array-like
Joint intensity probability distribution model for use with the
'slr' measure. Should be of shape (from_bins, to_bins).
interp : str
Interpolation method. One of 'pv': Partial volume, 'tri':
Trilinear, 'rand': Random interpolation. See ``joint_histogram.c``
sigma : float or sequence
Standard deviation(s) in millimeters of isotropic Gaussian
kernels used to smooth the `from` and `to` images,
respectively. If float, the same kernel size is applied to
both images. If 0, no smoothing is applied.
|
"""
# Binning sizes
from_bins, to_bins = unpack(bins, int)
# Smoothing kernel sizes
self._from_sigma, self._to_sigma = unpack(sigma, float)
# Clamping of the `from` image. The number of bins may be
# overriden if unnecessarily large.
data
|
, from_bins_adjusted = clamp(from_img,
from_bins,
mask=from_mask,
sigma=self._from_sigma)
if not similarity == 'slr':
from_bins = from_bins_adjusted
self._from_img = Nifti1Image(data, from_img.get_affine())
# Set field of view in the `from` image with potential
# subsampling for faster similarity evaluation. This also sets
# the _from_data and _vox_coords attributes
if spacing == None:
npoints = NPOINTS
else:
npoints = None
if from_mask == None:
corner, size = (0, 0, 0), None
else:
corner, size = smallest_bounding_box(from_mask)
self.set_fov(spacing=spacing, corner=corner, size=size,
npoints=npoints)
# Clamping of the `to` image including padding with -1
data, to_bins_adjusted = clamp(to_img,
to_bins,
mask=to_mask,
sigma=self._to_sigma)
if not similarity == 'slr':
to_bins = to_bins_adjusted
self._to_data = -np.ones(np.array(to_img.shape) + 2, dtype=CLAMP_DTYPE)
self._to_data[1:-1, 1:-1, 1:-1] = data
self._to_inv_affine = inverse_affine(to_img.get_affine())
# Joint histogram: must be double contiguous as it will be
# passed to C routines which assume so
self._joint_hist = np.zeros([from_bins, to_bins], dtype='double')
# Set default registration parameters
self._set_interp(interp)
self._set_similarity(similarity, renormalize, dist=dist)
def _get_interp(self):
return list(interp_methods.keys())[\
list(interp_methods.values()).index(self._interp)]
def _set_interp(self, interp):
self._interp = interp_methods[interp]
interp = property(_get_interp, _set_interp)
def set_fov(self, spacing=None, corner=(0, 0, 0), size=None,
npoints=None):
"""
Defines a subset of the `from` image to restrict joint
histogram computation.
Parameters
----------
spacing : sequence (3,) of positive integers
Subsampling of image in voxels, where None (default) results
in the subsampling to be automatically adjusted to roughly
match a cubic grid with `npoints` voxels
corner : sequence (3,) of positive integers
Bounding box origin in voxel coordinates
size : sequence (3,) of positive integers
Desired bounding box size
npoints : positive integer
Desired number of voxels in the bounding box. If a `spacing`
argument is provided, then `npoints` is ignored.
"""
if spacing is None and npoints is None:
spacing = [1, 1, 1]
if size is None:
size = self._from_img.shape
slicer = lambda c, s, sp:\
tuple([slice(c[i], s[i] + c[i], sp[i]) for i in range(3)])
# Adjust spacing to match desired field of view size
if spacing is not None:
fov_data = self._from_img.get_data()[slicer(corner, size, spacing)]
else:
fov_data = self._from_img.get_data()[slicer(corner, size, [1, 1, 1])]
spacing = ideal_spacing(fov_data, npoints=npoints)
fov_data = self._from_img.get_data()[slicer(corner, size, spacing)]
self._from_data = fov_data
self._from_npoints = (fov_data >= 0).sum()
self._from_affine = subgrid_affine(self._from_img.get_affine(),
slicer(corner, size, spacing))
# We cache the voxel coordinates of the clamped image
self._from_spacing = spacing
self._vox_coords =\
np.indices(self._from_data.shape).transpose((1, 2, 3, 0))
def _set_similarity(self, similarity, renormalize=False, dist=None):
if similarity in builtin_simi:
if similarity == 'slr':
if dist is None:
raise ValueError('slr measure requires a joint intensity distribution model, '
'see `dist` argument of HistogramRegistratio
|
tchellomello/home-assistant
|
tests/components/brother/test_sensor.py
|
Python
|
apache-2.0
| 10,659
| 0.000281
|
"""Test sensor of Brother integration."""
from datetime import datetime, timedelta
import json
from homeassistant.components.brother.const import UNIT_PAGES
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_ENTITY_ID,
ATTR_ICON,
ATTR_UNIT_OF_MEASUREMENT,
DEVICE_CLASS_TIMESTAMP,
PERCENTAGE,
STATE_UNAVAILABLE,
)
from homeassistant.setup import async_setup_component
from homeassistant.util.dt import UTC, utcnow
from tests.async_mock import patch
from tests.common import async_fire_time_changed, load_fixture
from tests.components.brother import init_integration
ATTR_REMAINING_PAGES = "remaining_pages"
ATTR_COUNTER = "counter"
async def test_sensors(hass):
"""Test states of the sensors."""
test_time = datetime(2019, 11, 11, 9, 10, 32, tzinfo=UTC)
with patch(
"homeassistant.components.brother.sensor.utcnow", return_value=test_time
):
await init_integration(hass)
registry = await hass.helpers.entity_registry.async_get_registry()
state = hass.states.get("sensor.hl_l2340dw_status")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:printer"
assert state.state == "waiting"
entry = registry.async_get("sensor.hl_l2340dw_status")
assert entry
assert entry.unique_id == "0123456789_status"
state = hass.states.get("sensor.hl_l2340dw_black_toner_remaining")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:printer-3d-nozzle"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.state == "75"
entry = registry.async_get("sensor.hl_l2340dw_black_toner_remaining")
assert entry
assert entry.unique_id == "0123456789_black_toner_remaining"
state = hass.states.get("sensor.hl_l2340dw_cyan_toner_remaining")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:printer-3d-nozzle"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.state == "10"
entry = registry.async_get("sensor.hl_l2340dw_cyan_toner_remaining")
assert entry
assert entry.unique_id == "0123456789_cyan_toner_remaining"
state = hass.states.get("sensor.hl_l2340dw_magenta_toner_remaining")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:printer-3d-nozzle"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.state == "8"
entry = registry.async_get("sensor.hl_l2340dw_magenta_toner_remaining")
assert entry
assert entry.unique_id == "0123456789_magenta_toner_remaining"
state = hass.states.get("sensor.hl_l2340dw_yellow_toner_remaining")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:printer-3d-nozzle"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.state == "2"
entry = registry.async_get("sensor.hl_l2340dw_yellow_toner_remaining")
assert entry
assert entry.unique_id == "0123456789_yellow_toner_remaining"
state = hass.states.get("sensor.hl_l2340dw_drum_remaining_life")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:chart-donut"
assert state.attributes.get(ATTR_REMAINING_PAGES) == 11014
assert state.attributes.get(ATTR_COUNTER) == 986
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.state == "92"
entry = registry.async_get("sensor.hl_l2340dw_drum_remaining_life")
assert entry
assert entry.unique_id == "0123456789_drum_remaining_life"
state
|
= hass.states.get("sensor.hl_l2340dw_black_drum_remaining_life")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:chart-donut"
assert state.attributes.get(ATTR_REMAINING_PAGES) == 16389
assert state.attributes.get(
|
ATTR_COUNTER) == 1611
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.state == "92"
entry = registry.async_get("sensor.hl_l2340dw_black_drum_remaining_life")
assert entry
assert entry.unique_id == "0123456789_black_drum_remaining_life"
state = hass.states.get("sensor.hl_l2340dw_cyan_drum_remaining_life")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:chart-donut"
assert state.attributes.get(ATTR_REMAINING_PAGES) == 16389
assert state.attributes.get(ATTR_COUNTER) == 1611
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.state == "92"
entry = registry.async_get("sensor.hl_l2340dw_cyan_drum_remaining_life")
assert entry
assert entry.unique_id == "0123456789_cyan_drum_remaining_life"
state = hass.states.get("sensor.hl_l2340dw_magenta_drum_remaining_life")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:chart-donut"
assert state.attributes.get(ATTR_REMAINING_PAGES) == 16389
assert state.attributes.get(ATTR_COUNTER) == 1611
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.state == "92"
entry = registry.async_get("sensor.hl_l2340dw_magenta_drum_remaining_life")
assert entry
assert entry.unique_id == "0123456789_magenta_drum_remaining_life"
state = hass.states.get("sensor.hl_l2340dw_yellow_drum_remaining_life")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:chart-donut"
assert state.attributes.get(ATTR_REMAINING_PAGES) == 16389
assert state.attributes.get(ATTR_COUNTER) == 1611
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.state == "92"
entry = registry.async_get("sensor.hl_l2340dw_yellow_drum_remaining_life")
assert entry
assert entry.unique_id == "0123456789_yellow_drum_remaining_life"
state = hass.states.get("sensor.hl_l2340dw_fuser_remaining_life")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:water-outline"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.state == "97"
entry = registry.async_get("sensor.hl_l2340dw_fuser_remaining_life")
assert entry
assert entry.unique_id == "0123456789_fuser_remaining_life"
state = hass.states.get("sensor.hl_l2340dw_belt_unit_remaining_life")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:current-ac"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.state == "97"
entry = registry.async_get("sensor.hl_l2340dw_belt_unit_remaining_life")
assert entry
assert entry.unique_id == "0123456789_belt_unit_remaining_life"
state = hass.states.get("sensor.hl_l2340dw_pf_kit_1_remaining_life")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:printer-3d"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.state == "98"
entry = registry.async_get("sensor.hl_l2340dw_pf_kit_1_remaining_life")
assert entry
assert entry.unique_id == "0123456789_pf_kit_1_remaining_life"
state = hass.states.get("sensor.hl_l2340dw_page_counter")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:file-document-outline"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == UNIT_PAGES
assert state.state == "986"
entry = registry.async_get("sensor.hl_l2340dw_page_counter")
assert entry
assert entry.unique_id == "0123456789_page_counter"
state = hass.states.get("sensor.hl_l2340dw_duplex_unit_pages_counter")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:file-document-outline"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == UNIT_PAGES
assert state.state == "538"
entry = registry.async_get("sensor.hl_l2340dw_duplex_unit_pages_counter")
assert entry
assert entry.unique_id == "0123456789_duplex_unit_pages_counter"
state = hass.states.get("sensor.hl_l2340dw_b_w_counter")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:file-document-outline"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == UNIT_PAGES
assert state.state == "709"
entry = registry.async_get("sensor.hl_l2340dw_b_w_counter")
assert entry
assert entry.unique_id == "0123456789_b/w_counter"
state = h
|
ingadhoc/purchase
|
purchase_stock_ux/models/purchase_order_line.py
|
Python
|
agpl-3.0
| 7,593
| 0
|
##############################################################################
# For copyright and license notices, see __manifest__.py file in module root
# directory
###
|
###########################################################################
from odoo import
|
models, fields, api, _
from odoo.exceptions import UserError
from odoo.tools import float_compare
from lxml import etree
import logging
_logger = logging.getLogger(__name__)
class PurchaseOrderLine(models.Model):
_inherit = 'purchase.order.line'
delivery_status = fields.Selection([
('no', 'Not purchased'),
('to receive', 'To Receive'),
('received', 'Received'),
],
string='Delivery Status',
compute='_compute_delivery_status',
store=True,
readonly=True,
copy=False,
default='no'
)
vouchers = fields.Char(
compute='_compute_vouchers'
)
qty_on_voucher = fields.Float(
compute="_compute_qty_on_voucher",
string="On Voucher",
digits='Product Unit of Measure',
)
qty_returned = fields.Float(
string='Returned',
copy=False,
default=0.0,
readonly=True,
compute='_compute_qty_returned'
)
@api.depends_context('voucher')
def _compute_qty_on_voucher(self):
# al calcular por voucher no tenemos en cuenta el metodo de facturacion
# es decir, que calculamos como si fuese metodo segun lo recibido
voucher = self._context.get('voucher', False)
if not voucher:
self.update({'qty_on_voucher': 0.0})
return
lines = self.filtered(
lambda x: x.order_id.state in ['purchase', 'done'])
moves = self.env['stock.move'].search([
('id', 'in', lines.mapped('move_ids').ids),
('state', '=', 'done'),
('picking_id.vouchers', 'ilike', voucher[0]),
])
for line in lines:
line.qty_on_voucher = sum(moves.filtered(
lambda x: x.id in line.move_ids.ids).mapped('product_uom_qty'))
def button_cancel_remaining(self):
# la cancelación de kits no está bien resuelta ya que odoo
# solo computa la cantidad entregada cuando todo el kit se entregó.
# Cuestión que, por ahora, desactivamos la cancelación de kits.
bom_enable = 'bom_ids' in self.env['product.template']._fields
for rec in self:
old_product_qty = rec.product_qty
# TODO tal vez cambiar en v10
# en este caso si lo bloqueamos ya que si llegan a querer generar
# nc lo pueden hacer con el buscar líneas de las facturas
# y luego lo pueden terminar cancelando
if rec.qty_invoiced > rec.qty_received:
raise UserError(_(
'You can not cancel remianing qty to receive because '
'there are more product invoiced than the received. '
'You should correct invoice or ask for a refund'))
if bom_enable:
bom = self.env['mrp.bom']._bom_find(
product=rec.product_id)
if bom and bom.type == 'phantom':
raise UserError(_(
"Cancel remaining can't be called for Kit Products "
"(products with a bom of type kit)."))
rec.product_qty = rec.qty_received
to_cancel_moves = rec.move_ids.filtered(
lambda x: x.state not in ['done', 'cancel'])
to_cancel_moves._cancel_quantity()
rec.order_id.message_post(
body=_(
'Cancel remaining call for line "%s" (id %s), line '
'qty updated from %s to %s') % (
rec.name, rec.id, old_product_qty, rec.product_qty))
def _compute_vouchers(self):
for rec in self:
rec.vouchers = ', '.join(rec.mapped(
'move_ids.picking_id.voucher_ids.display_name'))
@api.depends(
'order_id.state', 'qty_received', 'qty_returned', 'product_qty',
'order_id.force_delivered_status')
def _compute_delivery_status(self):
precision = self.env['decimal.precision'].precision_get(
'Product Unit of Measure')
for line in self:
if line.state not in ('purchase', 'done'):
line.delivery_status = 'no'
continue
if line.order_id.force_delivered_status:
line.delivery_status = line.order_id.force_delivered_status
continue
if float_compare(
(line.qty_received + line.qty_returned), line.product_qty,
precision_digits=precision) == -1:
line.delivery_status = 'to receive'
elif float_compare(
(line.qty_received + line.qty_returned), line.product_qty,
precision_digits=precision) >= 0:
line.delivery_status = 'received'
else:
line.delivery_status = 'no'
@api.onchange('product_qty')
def _onchange_product_qty(self):
if (
self.state == 'purchase' and
self.product_id.type in ['product', 'consu'] and
self.product_qty < self._origin.product_qty):
warning_mess = {
'title': _('Ordered quantity decreased!'),
'message': (
'¡Está reduciendo la cantidad pedida! Recomendamos usar'
' el botón para cancelar remanente y'
' luego setear la cantidad deseada.'),
}
self.product_qty = self._origin.product_qty
return {'warning': warning_mess}
return {}
@api.depends('order_id.state', 'move_ids.state')
def _compute_qty_returned(self):
for line in self:
qty = 0.0
for move in line.move_ids.filtered(
lambda m: m.state == 'done' and
m.location_id.usage != 'supplier' and m.to_refund):
qty += move.product_uom._compute_quantity(
move.product_uom_qty,
line.product_uom)
line.qty_returned = qty
# Overwrite the origin method to introduce the qty_on_voucher
def action_add_all_to_invoice(self):
for rec in self:
rec.invoice_qty = rec.qty_on_voucher or (
rec.qty_to_invoice + rec.invoice_qty)
@api.model
def fields_view_get(self, view_id=None, view_type='form',
toolbar=False, submenu=False):
"""
If we came from invoice, we send in context 'force_line_edit'
and we change tree view to make editable and also field qty
"""
res = super().fields_view_get(
view_id=view_id, view_type=view_type,
toolbar=toolbar, submenu=submenu)
if self._context.get('force_line_edit') and view_type == 'tree':
doc = etree.XML(res['arch'])
placeholder = doc.xpath("//field[1]")[0]
placeholder.addprevious(
etree.Element('field', {
'name': 'qty_on_voucher',
'readonly': '1',
# on enterprise view is not refres
# 'invisible': "not context.get('voucher', False)",
}))
res['fields'].update(self.fields_get(
['qty_on_voucher']))
res['arch'] = etree.tostring(doc)
return res
|
bt3gl/NetAna-Complex-Network-Analysis
|
src/calculate_features_advanced/road.py
|
Python
|
mit
| 577
| 0.015598
|
#!/usr/bin/env python
__author__ = "Mari Wahl"
__copyright__ = "Copyright 2014, The Cogent
|
Project"
__credits__ = ["Mari Wahl"]
__license__ = "GPL"
__version__ = "4.1"
__maintainer__ = "Mari Wahl"
__email__ = "marina.w4hl@gmail.com"
from helpers import running, constants
# change here for type of net:
NETWORK_FILES = constants.NETWORK_FILES_UN_ROAD
TYPE_NET_DIR = "road/"
def main():
running.sampling(NETWORK_FILES, TYPE_NET_DIR, [])
print("All graphs for " + TYPE_NET_DIR + " were processed.
|
The end! \n")
if __name__ == '__main__':
main()
|
wubr2000/googleads-python-lib
|
examples/dfp/v201505/audience_segment_service/get_first_party_audience_segments.py
|
Python
|
apache-2.0
| 2,062
| 0.009214
|
#!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either ex
|
press or impli
|
ed.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example gets all first party audience segments.
To create first party audience segments, run create_audience_segments.py.
"""
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize client object.
client = dfp.DfpClient.LoadFromStorage()
# Initialize appropriate service.
audience_segment_service = client.GetService(
'AudienceSegmentService', version='v201505')
# Create statement object to only select first party audience segments.
values = [{
'key': 'type',
'value': {
'xsi_type': 'TextValue',
'value': 'FIRST_PARTY'
}
}]
query = 'WHERE Type = :type'
statement = dfp.FilterStatement(query, values)
# Get audience segments by statement.
while True:
response = audience_segment_service.getAudienceSegmentsByStatement(
statement.ToStatement())
if 'results' in response:
segments = response['results']
for segment in segments:
print ('Audience segment with id \'%s\' and name \'%s\' of size '
'%s was found. ' %
(segment['id'], segment['name'], segment['size']))
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
|
devs1991/test_edx_docmode
|
venv/lib/python2.7/site-packages/django_openid_auth/management/commands/openid_cleanup.py
|
Python
|
agpl-3.0
| 1,732
| 0
|
# django-openid-auth - OpenID integration for django.contrib.auth
#
# Copyright (C) 2009-2013 Canonical Ltd.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECI
|
AL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DA
|
TA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import unicode_literals
from django.core.management.base import NoArgsCommand
from django_openid_auth.store import DjangoOpenIDStore
class Command(NoArgsCommand):
help = 'Clean up stale OpenID associations and nonces'
def handle_noargs(self, **options):
store = DjangoOpenIDStore()
store.cleanup()
|
tkchafin/scripts
|
findBreaksVCF.py
|
Python
|
gpl-3.0
| 3,606
| 0.044925
|
#!/usr/bin/python
import re
import sys
import os
import getopt
import vcf
def main():
params = parseArgs()
vfh = vcf.Reader(open(params.vcf, 'r'))
#grab contig sizes
contigs = dict()
for c,s in vfh.contigs.items():
contigs[s.id] = s.length
regions = list()
this_chrom = None
start = int()
stop = int()
count = 0
for rec in vfh:
if not this_chrom:
this_chrom = rec.CHROM
start = 1
stop = 1
count = 0
#If we entered new chromosome, submit old break
elif this_chrom != rec.CHROM:
t = tuple([this_chrom, start, contigs[this_chrom]])
regions.append(t)
this_chrom = rec.CHROM
start = 1
stop = 1
count = 0
#if this SNP is parsimony-informative
if rec.is_snp and not rec.is_monomorphic:
#Check if parsimony-informative
if is_PIS(rec):
count+=1
#if this is the final PIS, submit region to list
if count == params.force:
stop = rec.POS
t = tuple([this_chrom, start, stop])
regions.append(t)
start = stop + 1
count = 0
t = tuple([this_chrom, start, contigs[this_chrom]])
regions.append(t)
print("Writing regions to out.regions...")
write_regions("out.regions", regions)
#Function to write list of regions tuples, in GATK format
def write_regions(f, r):
with open(f, 'w') as fh:
try:
for reg in r:
ol = str(reg[0]) + ":" + str(reg[1]) + "-" + str(reg[2]) + "\n"
fh.write(ol)
except IOError as e:
print("Could not read file %s: %s"%(f,e))
sys.exit(1)
except Exception as e:
print("Unexpected error reading file %s: %s"%(f,e))
sys.exit(1)
finally:
fh.close()
#Function to check pyVCF record for if parsimony informative or not
def is_PIS(r):
ref=0
alt=0
for call in r.samples:
if call.gt_type:
if call.gt_type == 0:
ref += 1
elif call.gt_type == 1:
alt += 1
elif call.gt_type == 2:
alt += 1
ref += 1
if ref >= 2 and alt >= 2:
return(True)
if ref <= 2 and alt <= 2:
return(False)
#Object to parse command-line arguments
class parseArgs():
def __init__(self):
#Define options
try:
options, remainder = getopt.getopt(sys.argv[1:], 'v:f:h', \
["vcf=" "help", "force="])
except getopt.GetoptError as err:
print(err)
self.display_help("\nExiting because getopt returned non-zero exit status.")
#Default values for params
#Input params
self.vcf=None
self.force=100000
#First pass to see if help menu was called
for o, a in options:
if o in ("-h", "-help", "--help"):
self.display_help("Exiting because help menu was called.")
#Second pass to set all args.
for opt, arg_raw in options:
arg = arg_raw.replace(" ","")
arg = arg.strip()
opt = opt.replace("-","")
#print(opt,arg)
if opt in ('v', 'vcf'):
self.vcf = arg
elif opt in ('f','force'):
self.force=int(arg)
elif opt in (
|
'h', 'help'):
pass
else:
assert False, "Unhandled option %r"%opt
#Check mandit
|
ory options are set
if not self.vcf:
self.display_help("Must provide VCF file <-v,--vcf>")
def display_help(self, message=None):
if message is not None:
print()
print (message)
print ("\nfindBreaksVCF.py\n")
print ("Contact:Tyler K. Chafin, University of Arkansas,tkchafin@uark.edu")
print ("\nUsage: ", sys.argv[0], "-v <input.vcf> -f <100000>\n")
print ("Description: Breaks chromosomes into chunks of X parsimony-informative sites, for running MDL")
print("""
Arguments:
-v,--vcf : VCF file for parsing
-f,--force : Number of PIS to force a break
-h,--help : Displays help menu
""")
print()
sys.exit()
#Call main function
if __name__ == '__main__':
main()
|
nismod/energy_demand
|
energy_demand/validation/lad_validation.py
|
Python
|
mit
| 33,133
| 0.003501
|
"""Compare gas/elec demand on Local Authority Districts with modelled demand
"""
import os
import operator
import logging
import copy
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from energy_demand.basic import lookup_tables
from energy_demand import enduse_func
from energy_demand.profiles import load_profile
from energy_demand.validation import elec_national_data
from energy_demand.read_write import data_loader
from energy_demand.basic import date_prop
from energy_demand.plotting import fig_load_profile_dh_multiple
from energy_demand.plotting import basic_plot_functions
from energy_demand.plotting import plotting_styles
def map_LAD_2011_2015(lad_data):
"""Map LAD 2015 values to LAD 2011.
Arguments
-----------
lad_data : dict
LAD 2015 data
Returns
--------
mapped_lads : dict
LAD 2011 census data lads
"""
mapped_lads = copy.deepcopy(lad_data)
mapped_lads.keys()
try:
# E41000324 (City of London, Westminster) splits
# to E09000001 (City of London) and E09000033 (Westminster)
mapped_lads['E41000324'] = lad_data['E09000001'] + lad_data['E09000033']
del mapped_lads['E09000001']
del mapped_lads['E09000033']
except:
pass
try:
# E41000052 (Cornwall, Isles of Scilly) splits
# to E06000052 (Cornwall) and E06000053 (Isles of Scilly) (edited)
mapped_lads['E41000052'] = lad_data['E06000052'] + lad_data['E06000053']
del mapped_lads['E06000052']
del mapped_lads['E06000053']
except:
pass
try:
# missing S12000013 (Na h-Eileanan Siar)
# and S12000027 (Shetland Islands)
del mapped_lads['S12000013']
del mapped_lads['S12000027']
except:
pass
return mapped_lads
def temporal_validation(
result_paths,
ed_fueltype_yh,
elec_factored_yh,
plot_criteria
):
"""National hourly electricity data is validated with
the summed modelled hourly demand for all regions.
Because the total annual modelled and real demands
do not match (because of different data sources
and because Northern Ireland is not included in the
validation data) a correction factor is used.
Arguments
---------
result_paths : dict
Paths
ed_fueltype_yh : array
Fuel type specific yh energy demand
plot_criteria : bool
Criteria to show plots or not
"""
# ----------------
# Plot a full year
# ----------------
days_to_plot = list(range(0, 365))
elec_national_data.compare_results(
'validation_temporal_electricity_8760h.pdf',
result_paths['data_results_validation'],
elec_factored_yh,
ed_fueltype_yh,
'all_submodels',
days_to_plot,
plot_crit=plot_criteria)
# Plot four weeks (one of each season)
winter_week = list(range(
date_prop.date_to_yearday(2015, 1, 12), date_prop.date_to_yearday(2015, 1, 19))) #Jan
spring_week = list(range(
date_prop.date_to_yearday(2015, 5, 11), date_prop.date_to_yearday(2015, 5, 18))) #May
summer_week = list(range(
date_prop.date_to_yearday(2015, 7, 13), date_prop.date_to_yearday(2015, 7, 20))) #Jul
autumn_week = list(range(
date_prop.date_to_yearday(2015, 10, 12), date_prop.date_to_yearday(2015, 10, 19))) #Oct
days_to_plot = winter_week + spring_week + summer_week + autumn_week
elec_national_data.compare_res
|
ults(
'validation_temporal_electricity_weeks_selection.pdf',
result_paths['data_results_validation'],
elec_factored_yh,
ed_fueltype_yh,
'all_submodels',
days_to_plot,
plot_crit=plot_criteria)
return
def spatial_valid
|
ation_lad_level(
disaggregated_fuel,
data_results_validation,
paths,
regions,
reg_coord,
plot_crit
):
"""Spatial validation
"""
fuel_elec_regs_yh = {}
fuel_gas_regs_yh = {}
fuel_gas_residential_regs_yh = {}
fuel_gas_non_residential_regs_yh = {}
fuel_elec_residential_regs_yh = {}
fuel_elec_non_residential_regs_yh = {}
lookups = lookup_tables.basic_lookups()
# -------------------------------------------
# Spatial validation
# -------------------------------------------
subnational_elec = data_loader.read_lad_demands(paths['val_subnational_elec'])
subnational_elec_residential = data_loader.read_lad_demands(paths['val_subnational_elec_residential'])
subnational_elec_non_residential = data_loader.read_lad_demands(paths['val_subnational_elec_non_residential'])
subnational_gas = data_loader.read_lad_demands(paths['val_subnational_gas'])
subnational_gas_residential = data_loader.read_lad_demands(paths['val_subnational_gas_residential'])
subnational_gas_non_residential = data_loader.read_lad_demands(paths['val_subnational_gas_non_residential'])
logging.info("compare total II {} {}".format(sum(subnational_gas.values()), sum(subnational_gas_residential.values())))
# Create fueltype secific dict
for region in regions:
fuel_elec_regs_yh[region] = disaggregated_fuel['tot_disaggregated_regs'][region][lookups['fueltypes']['electricity']]
fuel_elec_residential_regs_yh[region] = disaggregated_fuel['tot_disaggregated_regs_residenital'][region][lookups['fueltypes']['electricity']]
fuel_elec_non_residential_regs_yh[region] = disaggregated_fuel['tot_disaggregated_regs_non_residential'][region][lookups['fueltypes']['electricity']]
fuel_gas_regs_yh[region] = disaggregated_fuel['tot_disaggregated_regs'][region][lookups['fueltypes']['gas']]
fuel_gas_residential_regs_yh[region] = disaggregated_fuel['tot_disaggregated_regs_residenital'][region][lookups['fueltypes']['gas']]
fuel_gas_non_residential_regs_yh[region] = disaggregated_fuel['tot_disaggregated_regs_non_residential'][region][lookups['fueltypes']['gas']]
# ----------------------------------------
# Remap demands between 2011 and 2015 LADs
# ----------------------------------------
subnational_elec = map_LAD_2011_2015(subnational_elec)
subnational_elec_residential = map_LAD_2011_2015(subnational_elec_residential)
subnational_elec_non_residential = map_LAD_2011_2015(subnational_elec_non_residential)
subnational_gas = map_LAD_2011_2015(subnational_gas)
subnational_gas_residential = map_LAD_2011_2015(subnational_gas_residential)
subnational_gas_non_residential = map_LAD_2011_2015(subnational_gas_non_residential)
fuel_elec_regs_yh = map_LAD_2011_2015(fuel_elec_regs_yh)
fuel_elec_residential_regs_yh = map_LAD_2011_2015(fuel_elec_residential_regs_yh)
fuel_elec_non_residential_regs_yh = map_LAD_2011_2015(fuel_elec_non_residential_regs_yh)
fuel_gas_regs_yh = map_LAD_2011_2015(fuel_gas_regs_yh)
fuel_gas_residential_regs_yh = map_LAD_2011_2015(fuel_gas_residential_regs_yh)
fuel_gas_non_residential_regs_yh = map_LAD_2011_2015(fuel_gas_non_residential_regs_yh)
logging.info("compare total {} {}".format(
sum(fuel_gas_residential_regs_yh.values()), sum(fuel_gas_regs_yh.values())))
# --------------------------------------------
# Correct REAL Values that sum is the same
# ----------------------------------------------
data_inputlist = [
(fuel_elec_residential_regs_yh, subnational_elec_residential), # domestic
(fuel_elec_non_residential_regs_yh, subnational_elec_non_residential)] # nondomestics
spatial_validation_multiple(
reg_coord=reg_coord,
input_data=data_inputlist,
regions=regions,
fueltype_str='elec',
fig_name=os.path.join(data_results_validation, 'validation_multiple_elec.pdf'),
label_points=False,
plotshow=plot_crit)
data_inputlist = [
(fuel_gas_residential_regs_yh, subnational_gas_residential), # domestic
(fuel_gas_non_residential_regs_yh, subnational_gas_non_residential)] # nondomestics
spatial_validation_multiple(
reg_coord=reg_coord,
input_data=data_inputlist,
|
fedora-infra/bodhi
|
bodhi-server/bodhi/server/buildsys.py
|
Python
|
gpl-2.0
| 29,649
| 0.001889
|
# Copyright 2007-2019 Red Hat, Inc. and others.
#
# This file is part of Bodhi.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Define tools for interacting with the build system and a fake build system for development."""
from functools import wraps
from threading import Lock
import logging
import os
import time
import typing
import backoff
import koji
if typing.TYPE_CHECKING: # pragma: no cover
from bodhi.server.config import BodhiConfig # noqa: 401
log = logging.getLogger('bodhi')
_buildsystem = None
_buildsystem_login_lock = Lock()
# URL of the koji hub
_koji_hub = None
def multicall_enabled(func: typing.Callable[..., typing.Any]) -> typing.Callable[..., typing.Any]:
"""
Decorate the given callable to enable multicall handling.
This is used by DevBuildsys methods.
Args:
func: The function to wrap.
Returns:
A wrapped version of func.
"""
@wraps(func)
def wrapper(self, *args, **kwargs) -> typing.Any:
"""
If multicall is enabled, store the results from func on self.
If multicall is not enabled, just call func and return its results as per usual.
"""
if
|
not self.multicall:
return func(self, *args, **kwargs)
# disable multicall during execution, so that inner func calls to other
# methods don't append their results as well
|
self._multicall = False
result = func(self, *args, **kwargs)
self.multicall_result.append([result])
self._multicall = True
return wrapper
class DevBuildsys:
"""A dummy buildsystem instance used during development and testing."""
_side_tag_data = [{'id': 1234, 'name': 'f17-build-side-1234'},
{'id': 7777, 'name': 'f17-build-side-7777'}]
__untag__ = [] # type: typing.List[typing.Tuple[str, str]]
__moved__ = [] # type: typing.List[typing.Tuple[str, str, str]]
__added__ = [] # type: typing.List[typing.Tuple[str, str]]
__tagged__ = {} # type: typing.Mapping[str, typing.List[str]]
__rpms__ = [] # type: typing.List[typing.Dict[str, object]]
__tags__ = [] # type: typing.List[typing.Tuple[str, typing.Mapping[str, typing.Any]]]
__side_tags__ = _side_tag_data # type: typing.List[typing.Dict[str, object]]
__removed_side_tags__ = [] # type: typing.List[typing.Dict[str, object]]
_build_data = {'build_id': 16058,
'completion_time': '2007-08-24 23:26:10.890319',
'completion_ts': 1187997970,
'creation_event_id': 151517,
'creation_time': '2007-08-24 19:38:29.422344',
'extra': None,
'epoch': None,
'owner_id': 388,
'owner_name': 'lmacken',
'package_id': 8,
'state': 1,
'tag_id': 19,
'task_id': 127621}
def __init__(self):
"""Initialize the DevBuildsys."""
self._multicall = False
self.multicall_result = []
@property
def _side_tag_ids_names(self):
return {id_or_name
for taginfo in self._side_tag_data
for id_or_name in (taginfo['id'], taginfo['name'])}
@property
def multicall(self) -> bool:
"""
Return the value of self._multicall.
Returns:
object: The value of self._multicall.
"""
return self._multicall
@multicall.setter
def multicall(self, value: bool):
"""
Set the _multicall attribute to the given value.
Args:
value: The value to set the _multicall attribute to.
"""
self._multicall = value
self.multicall_result = []
@classmethod
def clear(cls):
"""Clear the state of the class variables."""
cls.__untag__ = []
cls.__moved__ = []
cls.__added__ = []
cls.__tagged__ = {}
cls.__rpms__ = []
cls.__tags__ = []
cls.__side_tags__ = list(cls._side_tag_data)
def multiCall(self):
"""Emulate Koji's multiCall."""
result = self.multicall_result
self.multicall = False
return result
def moveBuild(self, from_tag: str, to_tag: str, build: str, *args, **kw):
"""Emulate Koji's moveBuild."""
if to_tag is None:
raise RuntimeError('Attempt to tag {} with None.'.format(build))
log.debug("moveBuild(%s, %s, %s)" % (from_tag, to_tag, build))
DevBuildsys.__moved__.append((from_tag, to_tag, build))
@multicall_enabled
def tagBuild(self, tag: str, build: str, *args, **kw):
"""Emulate Koji's tagBuild."""
if tag is None:
raise RuntimeError('Attempt to tag {} with None.'.format(build))
log.debug("tagBuild(%s, %s)" % (tag, build))
DevBuildsys.__added__.append((tag, build))
@multicall_enabled
def untagBuild(self, tag: str, build: str, *args, **kw):
"""Emulate Koji's untagBuild."""
if tag is None:
raise RuntimeError('Attempt to untag {} with None.'.format(build))
log.debug("untagBuild(%s, %s)" % (tag, build))
DevBuildsys.__untag__.append((tag, build))
def ssl_login(self, *args, **kw):
"""Emulate Koji's ssl_login."""
log.debug("ssl_login(%s, %s)" % (args, kw))
def taskFinished(self, task: int) -> bool:
"""Emulate Koji's taskFinished."""
return True
def getTaskInfo(self, task: int) -> typing.Mapping[str, int]:
"""Emulate Koji's getTaskInfo."""
return {'state': koji.TASK_STATES['CLOSED']}
def getTaskRequest(self, task_id: int) -> typing.List[typing.Union[str, typing.Mapping]]:
"""Emulate Koji's getTaskRequest."""
return [
'git://pkgs.fedoraproject.org/rpms/bodhi?#2e994ca8b3296e62e8b0aadee1c5c0649559625a',
'f17-candidate', {}]
def listPackages(self) -> typing.List[typing.Mapping[str, typing.Union[int, str]]]:
"""Emulate Koji's listPackages."""
return [
{'package_id': 2625, 'package_name': 'nethack'},
]
@multicall_enabled
def getBuild(self, build='TurboGears-1.0.2.2-2.fc17', other=False, testing=False):
"""Emulate Koji's getBuild."""
# needed to test against non-existent builds
if 'youdontknowme' in build:
return None
if 'gnome-backgrounds-3.0-1.fc17' in build:
return {'name': 'gnome-backgrounds',
'nvr': 'gnome-backgrounds-3.0-1.fc17',
'package_name': 'gnome-backgrounds',
'release': '1.fc17',
'tag_name': 'f17-build-side-7777',
'version': '3.0'}
theid = 16058
if other and not testing:
theid = 16059
elif other and testing:
theid = 16060
data = self._build_data.copy()
data['id'] = theid
if 'noowner' in build:
del data['owner_name']
name, version, release = build.rsplit("-", 2)
release_tokens = release.split(".")
for token in release_tokens:
# Starting to hardcode some dev buildsys bits for docker.
# See https://github.com/fedora-infra/bodhi/pull/1543
if token.endswith("container") or token.endswith("flatpak"):
fedora_release = "f" + (token
.replace("fc", "")
|
hpproliant/ironic
|
ironic/tests/unit/drivers/modules/amt/test_management.py
|
Python
|
apache-2.0
| 10,845
| 0
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test class for AMT ManagementInterface
"""
import mock
from oslo_config import cfg
from ironic.common import boot_devices
from ironic.common import exception
from ironic.conductor import task_manager
from ironic.drivers.modules.amt import common as amt_common
from ironic.drivers.modules.amt import management as amt_mgmt
from ironic.drivers.modules.amt import resource_uris
from ironic.tests.unit.conductor import mgr_utils
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.db import utils as db_utils
from ironic.tests.unit.drivers.modules.drac import utils as test_utils
from ironic.tests.unit.drivers import third_party_driver_mock_specs \
as mock_specs
from ironic.tests.unit.objects import utils as obj_utils
INFO_DICT = db_utils.get_test_amt_info()
CONF = cfg.CONF
@mock.patch.object(amt_common, 'pywsman', spec_set=mock_specs.PYWSMAN_SPEC)
class AMTManagementInteralMethodsTestCase(db_base.DbTestCase):
|
def setUp(self):
super(AMTManagementInteralMethodsTestCase, self).setUp()
mgr_utils.mock_the_extension_manager(driver='fake_amt')
self.node = obj_utils.create_test_node(self.context,
|
driver='fake_amt',
driver_info=INFO_DICT)
def test__set_boot_device_order(self, mock_client_pywsman):
namespace = resource_uris.CIM_BootConfigSetting
device = boot_devices.PXE
result_xml = test_utils.build_soap_xml([{'ReturnValue': '0'}],
namespace)
mock_xml = test_utils.mock_wsman_root(result_xml)
mock_pywsman = mock_client_pywsman.Client.return_value
mock_pywsman.invoke.return_value = mock_xml
amt_mgmt._set_boot_device_order(self.node, device)
mock_pywsman.invoke.assert_called_once_with(
mock.ANY, namespace, 'ChangeBootOrder', mock.ANY)
def test__set_boot_device_order_fail(self, mock_client_pywsman):
namespace = resource_uris.CIM_BootConfigSetting
device = boot_devices.PXE
result_xml = test_utils.build_soap_xml([{'ReturnValue': '2'}],
namespace)
mock_xml = test_utils.mock_wsman_root(result_xml)
mock_pywsman = mock_client_pywsman.Client.return_value
mock_pywsman.invoke.return_value = mock_xml
self.assertRaises(exception.AMTFailure,
amt_mgmt._set_boot_device_order, self.node, device)
mock_pywsman.invoke.assert_called_once_with(
mock.ANY, namespace, 'ChangeBootOrder', mock.ANY)
mock_pywsman = mock_client_pywsman.Client.return_value
mock_pywsman.invoke.return_value = None
self.assertRaises(exception.AMTConnectFailure,
amt_mgmt._set_boot_device_order, self.node, device)
def test__enable_boot_config(self, mock_client_pywsman):
namespace = resource_uris.CIM_BootService
result_xml = test_utils.build_soap_xml([{'ReturnValue': '0'}],
namespace)
mock_xml = test_utils.mock_wsman_root(result_xml)
mock_pywsman = mock_client_pywsman.Client.return_value
mock_pywsman.invoke.return_value = mock_xml
amt_mgmt._enable_boot_config(self.node)
mock_pywsman.invoke.assert_called_once_with(
mock.ANY, namespace, 'SetBootConfigRole', mock.ANY)
def test__enable_boot_config_fail(self, mock_client_pywsman):
namespace = resource_uris.CIM_BootService
result_xml = test_utils.build_soap_xml([{'ReturnValue': '2'}],
namespace)
mock_xml = test_utils.mock_wsman_root(result_xml)
mock_pywsman = mock_client_pywsman.Client.return_value
mock_pywsman.invoke.return_value = mock_xml
self.assertRaises(exception.AMTFailure,
amt_mgmt._enable_boot_config, self.node)
mock_pywsman.invoke.assert_called_once_with(
mock.ANY, namespace, 'SetBootConfigRole', mock.ANY)
mock_pywsman = mock_client_pywsman.Client.return_value
mock_pywsman.invoke.return_value = None
self.assertRaises(exception.AMTConnectFailure,
amt_mgmt._enable_boot_config, self.node)
class AMTManagementTestCase(db_base.DbTestCase):
def setUp(self):
super(AMTManagementTestCase, self).setUp()
mgr_utils.mock_the_extension_manager(driver='fake_amt')
self.info = INFO_DICT
self.node = obj_utils.create_test_node(self.context,
driver='fake_amt',
driver_info=self.info)
def test_get_properties(self):
expected = amt_common.COMMON_PROPERTIES
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertEqual(expected, task.driver.get_properties())
@mock.patch.object(amt_common, 'parse_driver_info', spec_set=True,
autospec=True)
def test_validate(self, mock_drvinfo):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.driver.management.validate(task)
mock_drvinfo.assert_called_once_with(task.node)
@mock.patch.object(amt_common, 'parse_driver_info', spec_set=True,
autospec=True)
def test_validate_fail(self, mock_drvinfo):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
mock_drvinfo.side_effect = iter(
[exception.InvalidParameterValue('x')])
self.assertRaises(exception.InvalidParameterValue,
task.driver.management.validate,
task)
def test_get_supported_boot_devices(self):
expected = [boot_devices.PXE, boot_devices.DISK, boot_devices.CDROM]
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertEqual(
sorted(expected),
sorted(task.driver.management.
get_supported_boot_devices(task)))
def test_set_boot_device_one_time(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.management.set_boot_device(task, 'pxe')
self.assertEqual('pxe',
task.node.driver_internal_info["amt_boot_device"])
self.assertFalse(
task.node.driver_internal_info["amt_boot_persistent"])
def test_set_boot_device_persistent(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.management.set_boot_device(task, 'pxe',
persistent=True)
self.assertEqual('pxe',
task.node.driver_internal_info["amt_boot_device"])
self.assertTrue(
task.node.driver_internal_info["amt_boot_persistent"])
def test_set_boot_device_fail(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaises(exception.InvalidParameterValue,
task.driver.management.set_boot_device,
|
wcmckee/hamiltoncomputerclub.org.nz
|
static/cache/.mako.tmp/comments_helper_googleplus.tmpl.py
|
Python
|
mit
| 2,205
| 0.004989
|
# -*- encoding:utf-8 -*-
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 8
_modified_time = 1399593695.26071
_enable_loop = True
_template_filename = u'/usr/lib/python2.7/site-packages/nikola/data/themes/base/templates/comments_helper_googleplus.tmpl'
_template_uri = u'comments_helper_googleplus.tmpl'
_source_encoding = 'utf-8'
_exports = ['comment_form', 'comment_link', 'comment_link_script']
def render_body(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
__M_writer = context.writer()
# SOURCE LINE 9
__M_writer(u'\n\n')
# SOURCE LINE 14
__M_writer(u'\n\n')
# SOURCE LINE 17
__M_writer(u'\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_comment_form(context,url,title,identifier):
__M_caller = context.caller_stack._push_frame()
try:
__M_writer = context.writer()
# SOURCE LINE 2
__M_writer(u'\n<script src="https://apis.google.com/js/plusone.js"></script>\n<div class="g-comments"\n data-href="')
# SOURCE LINE 5
__M_writer(unicode(url))
__M_writer(u'"\n data-first_party_property="BLOGGER"\n data-view_type="FILTERED_POSTMOD">\n</div>\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_comment_link(context,link,identifier):
__M_caller = context.caller_stack._push_frame()
try:
__M_writer = context.writer()
# SOURCE LINE 11
__M_writer(u'\n<div class="g-commentcount" data-href="')
# SOURCE LINE 12
__M_writer(unicode(link))
__M_writer(u'"></div>\n<script src="https://apis.google.com/js/plusone.js"></script>\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_comment_link_script(context):
__M_caller = context
|
.caller_stack._push_frame()
try:
__M_writer = context.writer()
# SOURCE LINE 16
__M_writer(u'\n')
return ''
|
finally:
context.caller_stack._pop_frame()
|
verma-varsha/zulip
|
zerver/lib/i18n.py
|
Python
|
apache-2.0
| 2,570
| 0.002724
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import operator
from django.conf import settings
from django.utils import translation
from django.utils.translation import ugettext as _
from django.utils.lru_cache import lru_cache
from six.moves import urllib, zip_longest, zip, range
from typing import Any, List, Dict, Optional, Text
import os
import ujson
def with_language(string, language):
# type: (Text, Text) -> Text
"""
This is an expensive function. If you are using it in a loop, it will
make your code slow.
"""
old_language = translation.get_language()
translation.activate(language)
result = _(string)
translation.activate(old_language)
return result
@lru_cache()
def get_language_list():
# type: () -> List[Dict[str, Any]]
path = os.path.join(settings.STATIC_ROOT, 'locale', 'language_name_map.json')
with open(path, 'r') as reader:
languages = ujson.load(reader)
return languages['name_map']
def get_language_list_for_templates(default_language):
# type: (Text) -> List[Dict[str, Dict[str, str]]]
language_list = [l for l in get_language_list()
if 'percent_translated' not in l or
l['percent_translated'] >= 5.]
formatted_list = []
lang_len = len(language_list)
firsts_end = (lang_len // 2) + operator.mod(lang_len, 2)
fir
|
sts = list(range(0, firsts_end))
seconds = list(range(firsts_end, lang_len))
assert len(firsts) + len(seconds) == lang_len
for row in zip_longest(firsts, seconds):
item = {}
for position, ind in zip(['first', 'second'], row):
if ind is None:
continue
lang = language_list[ind]
|
percent = name = lang['name']
if 'percent_translated' in lang:
percent = u"{} ({}%)".format(name, lang['percent_translated'])
item[position] = {
'name': name,
'code': lang['code'],
'percent': percent,
'selected': True if default_language == lang['code'] else False
}
formatted_list.append(item)
return formatted_list
def get_language_name(code):
# type: (str) -> Optional[Text]
for lang in get_language_list():
if lang['code'] == code:
return lang['name']
return None
def get_available_language_codes():
# type: () -> List[Text]
language_list = get_language_list()
codes = [language['code'] for language in language_list]
return codes
|
MaxTyutyunnikov/lino
|
obsolete/sales.old/fixtures/demo.py
|
Python
|
gpl-3.0
| 7,084
| 0.025692
|
# -*- coding: UTF-8 -*-
## Copyright 2008-2011 Luc Saffre
## This file is part of the Lino project.
## Lino is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 3 of the License, or
## (at your option) any later version.
## Lino is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with Lino; if not, see <http://www.gnu.org/licenses/>.
import time
from datetime import date
from dateutil import parser as dateparser
from lino import dd
from lino.modlib.sales import utils
from lino.utils.instantiator import Instantiator, i2d
def objects():
Company = dd.resolve_model("contacts.Company")
Customer = dd.resolve_model('sales.Customer')
products = dd.resolve_app('products')
sales = dd.resolve_app('sales')
salesrule = Instantiator(sales.SalesRule).build
#customer = Instantiator(Customer).build
imode = Instantiator(sales.InvoicingMode,
"id channel name advance_days journal").build
for c in Company.objects.filter(country_id='BE'):
yield c.contact_ptr.insert_child(Customer)
paymentterm = Instantiator(sales.PaymentTerm,"id name").build
yield paymentterm("pp","Prepayment",days=7)
yield paymentterm("cash","Cash")
yield paymentterm("7","7 days net",days=7)
pt15 = paymentterm("15","15 days net",days=15)
yield pt15
yield paymentterm("30","30 days net",days=30)
shippingmode = Instantiator(sales.ShippingMode,"id name").build
yield shippingmode("ta","take away")
yield shippingmode("rm","regular mail")
#~ for company in Company.objects.all():
#~ yield Customer(company=company)
#~ for person in Person.objects.all():
#~ yield Customer(person=person)
#ORD = journals.get_journal_by_docclass(Order)
#INV = journals.get_journal_by_docclass(Invoice)
ORD = sales.Order.create_journal("ORD",name="Orders",printed_name="Order # %d")
#ORD = journals.get_journal("ORD")
#INV = journals.get_journal("INV")
yield ORD
INV = sales.Invoice.create_journal("INV",\
account=ledger.Account.objects.get(match="4000"),
name="Invoices",printed_name="Invoice # %d")
#~ INV = sales.Invoice.create_journal("INV",account="4000",name="Invoices")
yield INV
imode_e = imode('e','E','By e-mail',2,INV,template='order_invoice.odt',build_method='appyodt')
yield imode_e
imode_p = imode('p','P','By snail mail',10,INV,template='order_invoice.odt',build_method='appyodt')
yield imode_p
yield salesrule(imode='e',shipping_mode="ta",payment_term="7")
#~ Company = resolve_model('contacts.Company')
#Person = resolve_model('contacts.Person')
#company1 = Company.objects.get(name__startswith="Ausdemwald")
#dubois = Person.objects.get(last_name__startswith="Dubois")
furniture = products.ProductCat.objects.get(pk
|
=1) # name="Furniture")
hosting = products.Product.objects.get(pk=5)
#~ order = Instantiator(sales.Order,
#~ "company creation_date start_date cycle imode",
#~ payment_term="30",journal=ORD).build
#~ invoice = Instantiator(sales.Invoice,
#~ "company creation_date imode",
#~ payment_term="30",journal=INV).build
o = ORD.create_document(
customer=Customer.objects.all()[0],
#~ co
|
mpany=Company.objects.get(pk=1),
creation_date=i2d(20080923),start_date=i2d(20080924),
cycle="M",imode=imode_e,
sales_remark="monthly order")
#~ o = order(1,"2008-09-23","2008-09-24","M","e",sales_remark="monthly order")
o.add_item(hosting,1)
yield o
o = ORD.create_document(
customer=Customer.objects.all()[1],
#~ company=Company.objects.get(pk=2),
creation_date=i2d(20080923),start_date=i2d(20080924),
cycle="M",imode=imode_e,
sales_remark="Customer 2 gets 50% discount")
#~ o = order(2,"2008-09-23","2008-09-24","M","e",
#~ sales_remark="Company 2 gets 50% discount")
o.add_item(hosting,1,discount=50)
yield o
utils.make_invoices(make_until=date(2008,10,28))
i = INV.create_document(
customer=Customer.objects.all()[1],
#~ company=Company.objects.get(pk=2),
creation_date=i2d(20081029),
imode=imode_e,
sales_remark="first manual invoice")
#~ i = invoice(2,"2008-10-29","e",
#~ sales_remark="first manual invoice")
i.add_item(1,1)
i.add_item(2,4)
yield i
utils.make_invoices(make_until=date(2009,04,11))
i = INV.create_document(
customer=Customer.objects.all()[2],
#~ company=Company.objects.get(pk=3),
creation_date=i2d(20090411),
imode=imode_e,
sales_remark="second manual invoice")
#~ i = invoice(3,date(2009,04,11),"e",
#~ sales_remark="second manual invoice")
i.add_item(3,1)
i.add_item(4,4)
yield i
#d = '2009-04-12'
#d = '20090412'
d = i2d(20090412)
#d = date(2009,4,12)
#~ o2 = order(4,d,d,"Y","p",sales_remark="yearly order")
o2 = ORD.create_document(
customer=Customer.objects.all()[3],
#~ company=Company.objects.get(pk=4),
creation_date=d,start_date=d,
cycle="Y",imode=imode_p,
sales_remark="yearly order")
o2.add_item(3,1)
o2.add_item(4,4)
#print o2
#o2.save()
yield o2
utils.make_invoices(make_until=d)
#~ i = invoice(4,date(2009,04,13),"e",
#~ sales_remark="third manual invoice with discount")
i = INV.create_document(
customer=Customer.objects.all()[3],
#~ company=Company.objects.get(pk=4),
creation_date=i2d(20090413),
imode=imode_e,
sales_remark="third manual invoice with discount")
i.add_item(3,1,discount=10)
i.add_item(4,4,discount=5)
yield i
utils.make_invoices(make_until=date(2009,05,14))
#~ order = Instantiator(sales.Order,journal=ORD,cycle='M',imode='e',payment_term="15").build
i = 0
for cust in Customer.objects.order_by('id'):
i += 1
#~ for i in range(10):
#for i in range(29):
#~ o = order(
#~ company=i+1,creation_date=date(2009,6,1+i),
#~ sales_remark="range demo #%d" % i)
o = ORD.create_document(
cycle='M',imode=imode_e,payment_term=pt15,
customer=cust,
#~ company=Company.objects.get(pk=i+1),
creation_date=date(2009,6,i),
sales_remark="range demo #%d" % i)
yield o
yield o.add_item(5,1,unit_price=1.7*i)
utils.make_invoices(make_until=date(2009,7,1))
utils.make_invoices(make_until=date(2009,8,1))
utils.make_invoices(make_until=date(2009,10,1))
|
griest024/PokyrimTools
|
pyffi-develop/pyffi/spells/cgf/check.py
|
Python
|
mit
| 9,081
| 0.002313
|
"""Module which contains all spells that check something in a cgf file."""
# --------------------------------------------------------------------------
# ***** BEGIN LICENSE BLOCK *****
#
# Copyright (c) 2007-2012, NIF File Format Library and Tools.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the NIF File Format Library and Tools
# project nor the names of its contributors may be used to endorse
# or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# ***** END LICENSE BLOCK *****
# --------------------------------------------------------------------------
from tempfile import TemporaryFile
from pyffi.formats.cgf import CgfFormat
from pyffi.spells.cgf import CgfSpell
# XXX do something about this...
from pyffi.utils.mathutils import *
class SpellReadWrite(CgfSpell):
"""Like the original read-write spell, but with additional file size
check."""
SPELLNAME = "check_readwrite"
def dataentry(self):
self.toaster.msgblockbegin("writing to temporary file")
f_tmp = TemporaryFile()
try:
total_padding = self.data.write(f_tmp)
# comparing the files will usually be different because blocks may
# have been written back in a different order, so cheaply just compare
# file sizes
self.toaster.msg("comparing file sizes")
self.stream.seek(0, 2)
f_tmp.seek(0, 2)
if self.stream.tell() != f_tmp.tell():
self.toaster.msg("original size: %i" % self.stream.tell())
self.toaster.msg("written size: %i" % f_tmp.tell())
self.toaster.msg("padding: %i" % total_padding)
if self.stream.tell() > f_tmp.tell() or self.stream.tell() + total_padding < f_tmp.tell():
f_tmp.seek(0)
f_debug = open("debug.cgf", "wb")
f_debug.write(f_tmp.read(-1))
f_debug.close()
raise Exception('write check failed: file sizes differ by more than padding')
finally:
f_tmp.close()
self.toaster.msgblockend()
# spell is finished: prevent recursing into the tree
return False
class SpellCheckTangentSpace(CgfSpell):
"""This spell checks the tangent space calculation.
Only useful for debugging.
"""
SPELLNAME = "check_tangentspace"
SENSITIVITY = 0.1 # admissible float error (relative to one)
def datainspect(self):
return self.inspectblocktype(CgfFormat.MeshChunk)
def branchinspect(self, branch):
return isinstance(branch, (CgfFormat.MeshChunk, CgfFormat.NodeChunk))
def branchentry(self, branch):
if not isinstance(branch, CgfFormat.MeshChunk):
# keep recursing
return True
# get tangents and normals
if not (branch.normals_data and branch.tangents_data):
return True
oldtangents = [tangent for tangent in branch.tangents_data.tangents]
self.toaster.msg("recalculating new tangent space")
branch.update_tangent_space()
|
newtangents = [tangent for tangent in branch.tangents_data.tangents]
self.toaster.msgblockbegin("validating and checking old with new")
for norm, oldtangent, newtangent in zip(branch.normals_data.normals,
oldtangents
|
, newtangents):
#self.toaster.msg("*** %s ***" % (norm,))
# check old
norm = (norm.x, norm.y, norm.z)
tan = tuple(x / 32767.0
for x in (oldtangent[0].x,
oldtangent[0].y,
oldtangent[0].z))
bin = tuple(x / 32767.0
for x in (oldtangent[1].x,
oldtangent[1].y,
oldtangent[1].z))
if abs(vecNorm(norm) - 1) > self.SENSITIVITY:
self.toaster.logger.warn("normal has non-unit norm")
if abs(vecNorm(tan) - 1) > self.SENSITIVITY:
self.toaster.logger.warn("oldtangent has non-unit norm")
if abs(vecNorm(bin) - 1) > self.SENSITIVITY:
self.toaster.logger.warn("oldbinormal has non-unit norm")
if (oldtangent[0].w != oldtangent[1].w):
raise ValueError(
"inconsistent oldtangent w coordinate (%i != %i)"
% (oldtangent[0].w, oldtangent[1].w))
if not (oldtangent[0].w in (-32767, 32767)):
raise ValueError(
"invalid oldtangent w coordinate (%i)" % oldtangent[0].w)
if oldtangent[0].w > 0:
cross = vecCrossProduct(tan, bin)
else:
cross = vecCrossProduct(bin, tan)
crossnorm = vecNorm(cross)
if abs(crossnorm - 1) > self.SENSITIVITY:
# a lot of these...
self.toaster.logger.warn("tan and bin not orthogonal")
self.toaster.logger.warn("%s %s" % (tan, bin))
self.toaster.logger.warn("(error is %f)"
% abs(crossnorm - 1))
cross = vecscalarMul(cross, 1.0/crossnorm)
if vecDistance(norm, cross) > self.SENSITIVITY:
self.toaster.logger.warn(
"norm not cross product of tangent and binormal")
#self.toaster.logger.warn("norm = %s" % (norm,))
#self.toaster.logger.warn("tan = %s" % (tan,))
#self.toaster.logger.warn("bin = %s" % (bin,))
#self.toaster.logger.warn("tan bin cross prod = %s" % (cross,))
self.toaster.logger.warn(
"(error is %f)" % vecDistance(norm, cross))
# compare old with new
if sum((abs(oldtangent[0].x - newtangent[0].x),
abs(oldtangent[0].y - newtangent[0].y),
abs(oldtangent[0].z - newtangent[0].z),
abs(oldtangent[0].w - newtangent[0].w),
abs(oldtangent[1].x - newtangent[1].x),
abs(oldtangent[1].y - newtangent[1].y),
abs(oldtangent[1].z - newtangent[1].z),
abs(oldtangent[1].w - newtangent[1].w))) > self.SENSITIVITY * 32767.0:
ntan = tuple(x / 32767.0 for x in (newtangent[0].x, newtangent[0].y, newtangent[0].z))
nbin = tuple(x / 32767.0 for x in (newtangent[1].x, newtangent[1].y, newtangent[1].z))
self.toaster.logger.warn("old and new tangents differ substantially")
self.toaster.logger.warn("old tangent")
self.toaster.logger.warn
|
Purg/SMQTK
|
python/smqtk/tests/utils/file_utils/test_FileModificationMonitor.py
|
Python
|
bsd-3-clause
| 6,758
| 0.000444
|
import atexit
import os
import tempfile
import time
import threading
import unittest
import nose.tools
import smqtk.utils.file_utils
__author__ = "paul.tunison@kitware.com"
class TestFileModificationMonitor (unittest.TestCase):
def _mk_test_fp(self):
fd, fp = tempfile.mkstemp()
os.close(fd)
atexit.register(lambda: os.remove(fp))
return fp
def test_monitor_stop(self):
# test that monitor stops when its told to
fp = self._mk_test_fp()
has_triggered = [False]
def cb(filepath):
has_triggered[0] = True
nose.tools.assert_equal(filepath, fp)
interval = 0.01
monitor = smqtk.utils.file_utils.FileModificationMonitor(fp, interval,
0.5, cb)
nose.tools.assert_true(monitor.stopped())
monitor.start()
try:
nose.tools.assert_false(has_triggered[0])
nose.tools.assert_true(monitor.is_alive())
nose.tools.assert_false(monitor.stopped())
monitor.stop()
# If thread hasn't entered while loop yet, it will immediately kick
# out, otherwise its sleeping for the given interval.
monitor.join(interval*2)
nose.tools.assert_false(has_triggered[0])
nose.tools.assert_false(monitor.is_alive())
finally:
if monitor.is_alive():
print "WARNING :: Forcing thread stop by removing filepath var"
monitor.filepath = None
def test_short_file_copy(self):
# where "short" means effectively instantaneous file creation / copy
# / touch.
#
# procedure:
# - create a file via mkstemp
# - create file monitor with detection callback and non-zero settle
# time.
# - touch file
# - check that callback was NOT triggered immediately
# - wait settle time / 2, check that cb NOT triggered yet
# - wait settle time / 4, check that cb NOT triggered yet
# - wait settle time / 4, check that cb HAS been called.
fp = self._mk_test_fp()
has_triggered = [False]
def cb(filepath):
has_triggered[0] = True
nose.tools.assert_equal(filepath, fp)
interval = 0.01
settle = 0.1
monitor = smqtk.utils.file_utils.FileModificationMonitor(fp, interval,
settle, cb)
try:
monitor.start()
# file not touched, should still be waiting
nose.tools.assert_equal(monitor.state, monitor.STATE_WAITING)
nose.tools.assert_false(has_triggered[0])
time.sleep(interval)
smqtk.utils.file_utils.touch(fp)
time.sleep(interval*2)
monitor._log.info('checking')
nose.tools.assert_false(has_triggered[0])
nose.tools.assert_equal(monitor.state, monitor.STATE_WATCHING)
time.sleep(settle / 2.)
monitor._log.info('checking')
nose.tools.assert_equal(monitor.state, monitor.STATE_WATCHING)
nose.tools.assert_false(has_triggered[0])
time.sleep(settle / 4.)
monitor._log.info('checking')
nose.tools.assert_equal(monitor.state, monitor.STATE_WATCHING)
nose.tools.assert_false(has_triggered[0])
time.sleep(settle / 4.)
monitor._log.info('checking')
nose.tools.assert_true(has_triggered[0])
finally:
monitor.stop()
monitor.join()
def test_long_file_wait(self):
# procedure:
# - create a file via mkstemp
# - create file monitor with detection callback and non-zero settle
# time.
# - setup/start thread that appends to file at an interval that is
# less than settle time
# - wait and check that cb hasn't been called a few times
# - stop appender thread
# - check that cb called after settle period
fp = self._mk_test_fp()
has_triggered = [False]
append_interval = 0.02
monitor_interval = 0.01
monitor_settle = 0.1
def cb(filepath):
has_triggered[0] = True
nose.tools.assert_equal(filepath, fp)
class AppendThread (threading.Thread):
def __init__(self):
super(AppendThread, self).__init__()
self._s = threading.Event()
def stop(self):
self._s.set()
def stopped(self):
return self._s.is_set()
def run(self):
while not self.stopped():
with open(fp, 'a') as f:
f.write('0')
time.slee
|
p(append_interval)
m_thread = smqtk.utils.file_utils.FileModificationMonitor(fp,
|
monitor_interval,
monitor_settle,
cb)
a_thread = AppendThread()
try:
m_thread.start()
a_thread.start()
time.sleep(monitor_settle)
m_thread._log.info('checking')
nose.tools.assert_false(m_thread.stopped())
nose.tools.assert_false(has_triggered[0])
nose.tools.assert_equal(m_thread.state, m_thread.STATE_WATCHING)
time.sleep(monitor_settle)
m_thread._log.info('checking')
nose.tools.assert_false(m_thread.stopped())
nose.tools.assert_false(has_triggered[0])
nose.tools.assert_equal(m_thread.state, m_thread.STATE_WATCHING)
a_thread.stop()
time.sleep(monitor_settle)
m_thread._log.info('checking')
nose.tools.assert_true(has_triggered[0])
finally:
a_thread.stop()
m_thread.stop()
def test_invalid_params(self):
fp = self._mk_test_fp()
# Invalid path value
nose.tools.assert_raises(
ValueError,
smqtk.utils.file_utils.FileModificationMonitor,
'/not/real', 1, 1, lambda p: None
)
# Invalid timers values
nose.tools.assert_raises(
ValueError,
smqtk.utils.file_utils.FileModificationMonitor,
fp, -1, 1, lambda p: None
)
nose.tools.assert_raises(
ValueError,
smqtk.utils.file_utils.FileModificationMonitor,
fp, 1, -1, lambda p: None
)
|
vedujoshi/os_tempest
|
tempest/common/waiters.py
|
Python
|
apache-2.0
| 5,919
| 0
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from tempest.common.utils import misc as misc_utils
from tempest import config
from tempest import exceptions
from tempest.openstack.common import log as logging
CONF = config.CONF
LOG = logging.getLogger(__name__)
def _console_dump(client, server_id):
try:
resp, output = client.get_console_output(server_id, None)
LOG.debug("Console Output for Server %s:\n%s" % (
server_id, output))
except exceptions.NotFound:
LOG.debug("Server %s: doesn't have a console" % server_id)
pass
# NOTE(afazekas): This function needs to know a token and a subject.
def wait_for_server_status(client, server_id, status, ready_wait=True,
extra_timeout=0, raise_on_error=True):
"""Waits for a server to reach a given status."""
def _get_task_state(body):
if client.service == CONF.compute.catalog_v3_type:
task_state = body.get("os-extended-status:task_state", None)
else:
task_state = body.get('OS-EXT-STS:task_state', None)
return task_state
# NOTE(afazekas): UNKNOWN status possible on ERROR
# or in a very early stage.
resp, body = client.get_server(server_id)
old_status = server_status = body['status']
old_task_state = task_state = _get_task_state(body)
start_time = int(time.time())
timeout = client.build_timeout + extra_timeout
while True:
# NOTE(afazekas): Now the BUILD status only reached
# between the UNKNOWN->ACTIVE transition.
# TODO(afazekas): enumerate and validate the stable status set
if status == 'BUILD' and server_status != 'UNKNOWN':
return
if server_status == status:
if ready_wait:
if status == 'BUILD':
return
# NOTE(afazekas): The instance is in "ready for action state"
# when no task in progress
# NOTE(afazekas): Converted to string bacuse of the XML
# responses
if str(task_state) == "None":
# without state api extension 3 sec usually enough
|
time.sleep(CONF.compute.ready_wait)
return
else:
return
time.sleep(client.build_interval)
resp, body = client.get_server(server_id)
server_status = body['status']
task_state = _get_task_state(body)
if (server_status != old_status) or (task_state != old_task_state):
LOG.info('State tra
|
nsition "%s" ==> "%s" after %d second wait',
'/'.join((old_status, str(old_task_state))),
'/'.join((server_status, str(task_state))),
time.time() - start_time)
if (server_status == 'ERROR') and raise_on_error:
_console_dump(client, server_id)
raise exceptions.BuildErrorException(server_id=server_id)
timed_out = int(time.time()) - start_time >= timeout
if timed_out:
expected_task_state = 'None' if ready_wait else 'n/a'
message = ('Server %(server_id)s failed to reach %(status)s '
'status and task state "%(expected_task_state)s" '
'within the required time (%(timeout)s s).' %
{'server_id': server_id,
'status': status,
'expected_task_state': expected_task_state,
'timeout': timeout})
message += ' Current status: %s.' % server_status
message += ' Current task state: %s.' % task_state
caller = misc_utils.find_test_caller()
if caller:
message = '(%s) %s' % (caller, message)
_console_dump(client, server_id)
raise exceptions.TimeoutException(message)
old_status = server_status
old_task_state = task_state
def wait_for_image_status(client, image_id, status):
"""Waits for an image to reach a given status.
The client should have a get_image(image_id) method to get the image.
The client should also have build_interval and build_timeout attributes.
"""
resp, image = client.get_image(image_id)
start = int(time.time())
while image['status'] != status:
time.sleep(client.build_interval)
resp, image = client.get_image(image_id)
if image['status'] == 'ERROR':
raise exceptions.AddImageException(image_id=image_id)
# check the status again to avoid a false negative where we hit
# the timeout at the same time that the image reached the expected
# status
if image['status'] == status:
return
if int(time.time()) - start >= client.build_timeout:
message = ('Image %(image_id)s failed to reach %(status)s '
'status within the required time (%(timeout)s s).' %
{'image_id': image_id,
'status': status,
'timeout': client.build_timeout})
message += ' Current status: %s.' % image['status']
caller = misc_utils.find_test_caller()
if caller:
message = '(%s) %s' % (caller, message)
raise exceptions.TimeoutException(message)
|
giocastagno/I.W._Delpuppo_Kopech_Castagno
|
turismo/sitio/migrations/0001_initial.py
|
Python
|
mit
| 2,730
| 0.003297
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-25 01:27
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
|
]
operations = [
migrations.CreateModel(
name='Dia',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('descripcion', models.CharField(max_length=1000)),
],
),
migrations.CreateModel(
name='Estado',
fields=[
|
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('descripcion', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='Itinerario',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('titulo', models.CharField(max_length=50)),
('texto_general', models.CharField(max_length=1000)),
('foto_general', models.ImageField(default='sitio/imagenes/none/no-img.png', upload_to='sitio/imagenes/')),
('fecha', models.DateTimeField()),
('fecha_salida', models.DateField()),
('fecha_llegada', models.DateField()),
('estado', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='sitio.Estado')),
],
),
migrations.CreateModel(
name='Pais',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=50)),
],
),
migrations.AddField(
model_name='itinerario',
name='pais_destino',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='sitio.Pais'),
),
migrations.AddField(
model_name='itinerario',
name='usuario',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='dia',
name='itinerario',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='sitio.Itinerario'),
),
]
|
ESS-LLP/erpnext-healthcare
|
erpnext/stock/doctype/delivery_note/delivery_note.py
|
Python
|
gpl-3.0
| 20,437
| 0.026423
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import frappe.defaults
from erpnext.controllers.selling_controller import SellingController
from erpnext.stock.doctype.batch.batch import set_batch_nos
from erpnext.stock.doctype.serial_no.serial_no import get_delivery_note_serial_no
from frappe import _
from frappe.contacts.doctype.address.address import get_company_address
from frappe.desk.notifications import clear_doctype_notifications
from frappe.model.mapper import get_mapped_doc
from frappe.model.utils import get_fetch_values
from frappe.utils import cint, flt
form_grid_templates = {
"items": "templates/form_grid/item_grid.html"
}
class DeliveryNote(SellingController):
def __init__(self, *args, **kwargs):
super(DeliveryNote, self).__init__(*args, **kwargs)
self.status_updater = [{
'source_dt': 'Delivery Note Item',
'target_dt': 'Sales Order Item',
'join_field': 'so_detail',
'target_field': 'delivered_qty',
'target_parent_dt': 'Sales Order',
'target_parent_field': 'per_delivered',
'target_ref_field': 'qty',
'source_field': 'qty',
'percent_join_field': 'against_sales_order',
'status_field': 'delivery_status',
'keyword': 'Delivered',
'second_source_dt': 'Sales Invoice Item',
'second_source_field': 'qty',
'second_join_field': 'so_detail',
'overflow_type': 'delivery',
'second_source_extra_cond': """ and exists(select name from `tabSales Invoice`
where name=`tabSales Invoice Item`.parent and update_stock = 1)"""
},
{
'source_dt': 'Delivery Note Item',
'target_dt': 'Sales Invoice Item',
'join_field': 'si_detail',
'target_field': 'delivered_qty',
'target_parent_dt': 'Sales Invoice',
'target_ref_field': 'qty',
'source_field': 'qty',
'percent_join_field': 'against_sales_invoice',
'overflow_type': 'delivery',
'no_tolerance': 1
},
{
'source_dt': 'Delivery Note Item',
'target_dt': 'Sales Order Item',
'join_field': 'so_detail',
'target_field': 'returned_qty',
'target_parent_dt': 'Sales Order',
'source_field': '-1 * qty',
'extra_cond': """ and exists (select name from `tabDelivery Note` where name=`tabDelivery Note Item`.parent and is_return=1)"""
}]
def before_print(self):
def toggle_print_hide(meta, fieldname):
df = meta.get_field(fieldname)
if self.get("print_without_amount"):
df.set("__print_hide", 1)
else:
df.delete_key("__print_hide")
item_meta = frappe.get_meta("Delivery Note Item")
print_hide_fields = {
"parent": ["grand_total", "rounded_total", "in_words", "currency", "total", "taxes"],
"items": ["rate", "amount", "discount_amount", "price_list_rate", "discount_percentage"]
}
for key, fieldname in print_hide_fields.items():
for f in fieldname:
toggle_print_hide(self.meta if key == "parent" else item_meta, f)
super(DeliveryNote, self).before_print()
def set_actual_qty(self):
for d in self.get('items'):
if d.item_code and d.warehouse:
actual_qty = frappe.db.sql("""select actual_qty from `tabBin`
where item_code = %s and warehouse = %s""", (d.item_code, d.warehouse))
d.actual_qty = actual_qty and flt(actual_qty[0][0]) or 0
def so_required(self):
"""check in manage account if sales order required or not"""
if frappe.db.get_value("Selling Settings", None, 'so_required') == 'Yes':
for d in self.get('items'):
if not d.against_sales_order:
frappe.throw(_("Sales Order required for Item {0}").format(d.item_code))
def validate(self):
self.validate_posting_time()
super(DeliveryNote, self).validate()
self.set_status()
self.so_required()
self.validate_proj_cust()
self.check_close_sales_order("against_sales_order")
self.validate_for_items()
self.validate_warehouse()
self.validate_uom_is_integer("stock_uom", "stock_qty")
self.validate_uom_is_integer("uom", "qty")
self.validate_with_previous_doc()
if self._action != 'submit' and not self.is_return:
set_batch_nos(self, 'warehouse', True)
from erpnext.stock.doctype.packed_item.packed_item import make_packing_list
make_packing_list(self)
self.update_current_stock()
if not self.installation_status: self.installation_status = 'Not Installed'
def validate_with_previous_doc(self):
super(DeliveryNote, self).validate_with_previous_doc({
"Sales Order": {
"ref_dn_field": "against_sales_order",
"compare_fields": [["customer", "="], ["company", "="], ["project", "="], ["currency", "="]]
},
"Sales Order Item": {
"ref_dn_field": "so_detail",
"compare_fields": [["item_code", "="], ["uom", "="], ["conversion_factor", "="]],
"is_child_table": True,
"allow_duplicate_prev_row_id": True
},
"Sales Invoice": {
"ref_dn_field": "against_sales_invoice",
"compare_fields": [["customer", "="], ["company", "="], ["project", "="], ["currency", "="]]
},
"Sales Invoice Item": {
"ref_dn_field": "si_detail",
"compare_fields": [["item_code", "="], ["uom", "="], ["conversion_factor", "="]],
"is_child_table": True,
"allow_duplicate_prev_row_id": True
},
})
if cint(frappe.db.get_single_value('Selling Settings', 'maintain_same_sales_rate')) \
and not self.is_return:
self.validate_rate_with_reference_doc([["Sales Order", "against_sales_order", "so_detail"],
["Sales Invoice", "against_sales_invoice", "si_detail"]])
def validate_proj_cust(self):
"""check for does customer belong to same project as entered.."""
if self.project and self.customer:
res = frappe.db.sql("""select name from `tabProject`
where name = %s and (customer = %s or
ifnull(customer,'')='')""", (self.project, self.customer))
if not res:
frappe.throw(_("Customer {0} does not belong to project {1}").format(self.customer, self.project))
def validate_for_items(self):
check_list, chk_dupl_itm = [], []
if cint(frappe.db.get_single_value("Selling Settings", "allow_multiple_items")):
return
for d in self.get('items'):
e = [d.item_code, d.description, d.warehouse, d.against_sales_order or d.against_sales_invoice, d.batch_no or '']
f = [d.item_code, d.description, d.against_sales_order or d.against_sales_invoice]
if frappe.db.get_value("Item", d.item_code, "is_stock_item") == 1:
if e in check_list:
frappe.msgprint(_("Note: Item {0} entered multiple times").format(d.item_code))
else:
check_list.append(e)
else:
if f in chk_dupl_itm:
frappe.msgprint(_("Note: Item {0} entered multiple times").format(d.item_code))
else:
chk_dupl_itm.append(f)
def validate_warehouse(self):
super(DeliveryNote, self).validate_warehouse()
for d in self.get_item_list():
if frappe.db.get_value("Item", d['item_code'], "is_stock_item") == 1:
if not d['warehouse']:
frappe.throw(_("Warehouse required for stock Item {0}").format(d["item_code"]))
def update_current_stock(self):
if self.get("_action") and self._action != "update_after_submit":
for d in self.get('items'):
d.actual_qty = frappe.db.get_value("Bin", {"item_code": d.item_code,
"warehouse": d.warehouse}, "actual_qty")
for d in self.get('packed_items'):
bin_qty = frappe.db.get_value("Bin", {"item_code": d.item_code,
"warehouse": d.warehouse}, ["actual_qty", "projected_qty"], as_dict=True)
if bin_qty:
d.actual_qty = flt(bin_qty.actual_qty)
d.projected_qty = flt(bin_qty.projected_qty)
def on_submit(self):
self.validate_packed_qty()
# Check for Approving Authority
frappe.get_doc('Authorization Control').validate_approving_authority(self.doctype, self.company, self.base_grand_total, self)
# update delivered qty in sales order
self.update_prevdoc_status()
self.update_billing_status()
if not self.is_return:
se
|
lf.chec
|
k_credit_limit()
elif self.issue_credit_note:
self.make_return_invoice()
# Updating stock ledger should always be called after updating prevdoc status,
# because updating reserved qty in bin depends upon updated delivered qty in SO
self.update_stock_ledger()
self.make_gl_entries()
def on_cancel(self)
|
ujvl/ray-ng
|
python/ray/tests/utils.py
|
Python
|
apache-2.0
| 4,953
| 0
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import fnmatch
import os
import subprocess
import sys
import tempfile
import time
import psutil
import ray
class RayTestTimeoutException(Exception):
"""Exception used to identify timeouts from test utilities."""
pass
def _pid_alive(pid):
"""Check if the process with this PID is alive or not.
Args:
pid: The pid to check.
Returns:
This returns false if the process is dead. Otherwise, it returns true.
"""
try:
os.kill(pid, 0)
return True
except OSError:
return False
def wait_for_pid_to_exit(pid, timeout=20):
start_time = time.time()
while time.time() - start_time < timeout:
if not _pid_alive(pid):
return
time.sleep(0.1)
raise RayTestTimeoutException(
"Timed out while waiting for process to exit.")
def wait_for_children_of_pid(pid, num_children=1, timeout=20):
p = psutil.Process(pid)
start_time = time.time()
while time.time() - start_time < timeout:
num_alive = len(p.children(recursive=False))
if num_alive >= num_children:
return
time.sleep(0.1)
raise RayTestTimeoutException(
"Timed out while waiting for process children to start "
"({}/{} started).".format(num_alive, num_children
|
))
def wait_for_children_of_pid_to_exit(pid, timeout=20):
children = psutil.Process(pid).children()
if len(children) == 0:
return
_, alive = psutil.wait_procs(children, timeout=timeout)
if len(alive) > 0:
raise RayTestTimeoutException(
"Timed out while waiting
|
for process children to exit."
" Children still alive: {}.".format([p.name() for p in alive]))
def kill_process_by_name(name, SIGKILL=False):
for p in psutil.process_iter(attrs=["name"]):
if p.info["name"] == name:
if SIGKILL:
p.kill()
else:
p.terminate()
def run_string_as_driver(driver_script):
"""Run a driver as a separate process.
Args:
driver_script: A string to run as a Python script.
Returns:
The script's output.
"""
# Save the driver script as a file so we can call it using subprocess.
with tempfile.NamedTemporaryFile() as f:
f.write(driver_script.encode("ascii"))
f.flush()
out = ray.utils.decode(
subprocess.check_output(
[sys.executable, f.name], stderr=subprocess.STDOUT))
return out
def run_string_as_driver_nonblocking(driver_script):
"""Start a driver as a separate process and return immediately.
Args:
driver_script: A string to run as a Python script.
Returns:
A handle to the driver process.
"""
# Save the driver script as a file so we can call it using subprocess. We
# do not delete this file because if we do then it may get removed before
# the Python process tries to run it.
with tempfile.NamedTemporaryFile(delete=False) as f:
f.write(driver_script.encode("ascii"))
f.flush()
return subprocess.Popen(
[sys.executable, f.name], stdout=subprocess.PIPE)
def flat_errors():
errors = []
for job_errors in ray.errors(all_jobs=True).values():
errors.extend(job_errors)
return errors
def relevant_errors(error_type):
return [error for error in flat_errors() if error["type"] == error_type]
def wait_for_errors(error_type, num_errors, timeout=20):
start_time = time.time()
while time.time() - start_time < timeout:
if len(relevant_errors(error_type)) >= num_errors:
return
time.sleep(0.1)
raise RayTestTimeoutException("Timed out waiting for {} {} errors.".format(
num_errors, error_type))
def wait_for_condition(condition_predictor,
timeout_ms=1000,
retry_interval_ms=100):
"""A helper function that waits until a condition is met.
Args:
condition_predictor: A function that predicts the condition.
timeout_ms: Maximum timeout in milliseconds.
retry_interval_ms: Retry interval in milliseconds.
Return:
Whether the condition is met within the timeout.
"""
time_elapsed = 0
while time_elapsed <= timeout_ms:
if condition_predictor():
return True
time_elapsed += retry_interval_ms
time.sleep(retry_interval_ms / 1000.0)
return False
def recursive_fnmatch(dirpath, pattern):
"""Looks at a file directory subtree for a filename pattern.
Similar to glob.glob(..., recursive=True) but also supports 2.7
"""
matches = []
for root, dirnames, filenames in os.walk(dirpath):
for filename in fnmatch.filter(filenames, pattern):
matches.append(os.path.join(root, filename))
return matches
|
UBC-Victorious-410/project
|
tools/mock_pmd_parser.py
|
Python
|
mit
| 330
| 0.060606
|
import pickle
commits = {}
def main():
output = ""
for commit in range(0,3):
output += "c*\n"
for file in range(0,2):
smells = str(commit+file*2)
output += "class"+str(fi
|
le)+" smells="+smells+"\n"
result = open("mockpmdresult.txt","w")
result.write(output)
result.close()
if __name__ == "__main__":
main()
| |
cytec/SickRage
|
lib/github/PaginatedList.py
|
Python
|
gpl-3.0
| 7,862
| 0.003689
|
# -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Bill Mill <bill.mill@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2013 davidbrai <davidbrai@gmail.com> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it un
|
der #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABIL
|
ITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import github.GithubObject
class PaginatedListBase:
def __init__(self):
self.__elements = list()
def __getitem__(self, index):
assert isinstance(index, (int, slice))
if isinstance(index, (int, long)):
self.__fetchToIndex(index)
return self.__elements[index]
else:
return self._Slice(self, index)
def __iter__(self):
for element in self.__elements:
yield element
while self._couldGrow():
newElements = self._grow()
for element in newElements:
yield element
def _isBiggerThan(self, index):
return len(self.__elements) > index or self._couldGrow()
def __fetchToIndex(self, index):
while len(self.__elements) <= index and self._couldGrow():
self._grow()
def _grow(self):
newElements = self._fetchNextPage()
self.__elements += newElements
return newElements
class _Slice:
def __init__(self, theList, theSlice):
self.__list = theList
self.__start = theSlice.start or 0
self.__stop = theSlice.stop
self.__step = theSlice.step or 1
def __iter__(self):
index = self.__start
while not self.__finished(index):
if self.__list._isBiggerThan(index):
yield self.__list[index]
index += self.__step
else:
return
def __finished(self, index):
return self.__stop is not None and index >= self.__stop
class PaginatedList(PaginatedListBase):
"""
This class abstracts the `pagination of the API <http://developer.github.com/v3/#pagination>`_.
You can simply enumerate through instances of this class::
for repo in user.get_repos():
print repo.name
You can also index them or take slices::
second_repo = user.get_repos()[1]
first_repos = user.get_repos()[:10]
If you want to iterate in reversed order, just do::
for repo in user.get_repos().reversed:
print repo.name
And if you really need it, you can explicitely access a specific page::
some_repos = user.get_repos().get_page(0)
some_other_repos = user.get_repos().get_page(3)
"""
def __init__(self, contentClass, requester, firstUrl, firstParams, headers=None):
PaginatedListBase.__init__(self)
self.__requester = requester
self.__contentClass = contentClass
self.__firstUrl = firstUrl
self.__firstParams = firstParams or ()
self.__nextUrl = firstUrl
self.__nextParams = firstParams or {}
self.__headers = headers
if self.__requester.per_page != 30:
self.__nextParams["per_page"] = self.__requester.per_page
self._reversed = False
self.__totalCount = None
@property
def totalCount(self):
if not self.__totalCount:
self._grow()
return self.__totalCount
def _getLastPageUrl(self):
headers, data = self.__requester.requestJsonAndCheck(
"GET",
self.__firstUrl,
parameters=self.__nextParams,
headers=self.__headers
)
links = self.__parseLinkHeader(headers)
lastUrl = links.get("last")
return lastUrl
@property
def reversed(self):
r = PaginatedList(self.__contentClass, self.__requester, self.__firstUrl, self.__firstParams)
r.__reverse()
return r
def __reverse(self):
self._reversed = True
lastUrl = self._getLastPageUrl()
if lastUrl:
self.__nextUrl = lastUrl
def _couldGrow(self):
return self.__nextUrl is not None
def _fetchNextPage(self):
headers, data = self.__requester.requestJsonAndCheck(
"GET",
self.__nextUrl,
parameters=self.__nextParams,
headers=self.__headers
)
data = data if data else []
self.__nextUrl = None
if len(data) > 0:
links = self.__parseLinkHeader(headers)
if self._reversed:
if "prev" in links:
self.__nextUrl = links["prev"]
elif "next" in links:
self.__nextUrl = links["next"]
self.__nextParams = None
if 'items' in data:
self.__totalCount = data['total_count']
data = data["items"]
content = [
self.__contentClass(self.__requester, headers, element, completed=False)
for element in data if element is not None
]
if self._reversed:
return content[::-1]
return content
def __parseLinkHeader(self, headers):
links = {}
if "link" in headers:
linkHeaders = headers["link"].split(", ")
for linkHeader in linkHeaders:
(url, rel) = linkHeader.split("; ")
url = url[1:-1]
rel = rel[5:-1]
links[rel] = url
return links
def get_page(self, page):
params = dict(self.__firstParams)
if page != 0:
params["page"] = page + 1
if self.__requester.per_page != 30:
params["per_page"] = self.__requester.per_page
headers, data = self.__requester.requestJsonAndCheck(
"GET",
self.__firstUrl,
parameters=params,
headers=self.__headers
)
if 'items' in data:
self.__totalCount = data['total_count']
data = data["items"]
return [
self.__contentClass(self.__requester, headers, element, completed=False)
for element in data
]
|
ICGC-TCGA-PanCancer/pancancer-sandbox
|
pcawg_metadata_parser/pc_report-embl-dkfz_summary_counts.py
|
Python
|
gpl-2.0
| 8,651
| 0.007282
|
#!/usr/bin/env python
import sys
import os
import glob
import json
import re
from argparse import ArgumentParser
from argparse import RawDescriptionHelpFormatter
def init_report_dir(metadata_dir, report_name):
report_dir = metadata_dir + '/reports/' + report_name
if not os.path.exists(report_dir):
os.makedirs(report_dir)
return report_dir
def generate_report(metadata_dir, report_name):
count_types = [
"live_aligned_embl-dkfz_variant_not_called_donors",
"live_embl-dkfz_variant_called_donors", # don't switch order
]
report_dir = init_report_dir(metadata_dir, report_name)
[dates, metadata_dirs] = get_metadata_dirs(metadata_dir, '2015-05-07')
data = [["Date", "To be called", "Called"]]
counts = []
today_donors = []
for ctype in count_types:
donor_counts = []
for md in metadata_dirs:
donors = set()
file_name_pattern = md + '/reports/gnos_repo_summary/' + ctype + '.*.txt'
files = glob.glob(file_name_pattern)
for f in files: donors.update(get_donors(f))
donor_counts.append(len(donors))
if len(donor_counts) == len(metadata_dirs):
today_donors.append(donors)
counts.append(donor_counts)
for i, d in enumerate(dates): data.append([d, counts[0][i], counts[1][i]])
with open(report_dir + '/summary_counts.json', 'w') as o: o.write(json.dumps(data))
compute_site_report_new(metadata_dir, report_dir, today_donors)
compute_site_report(metadata_dir, report_dir, today_donors)
def compute_site_report_new(metadata_dir, report_dir, today_donors):
compute_sites = {
"aws_ireland": set(),
"aws_oregon": set(),
"bsc": set(),
"dkfz": set(),
"ebi": set(),
"etri": set(),
"oicr": set(),
"pdc1_1": set(),
"pdc2_0": set(),
"tokyo": set(),
"ucsc": set(),
"sanger": set(),
"idash": set(),
"dkfz_hpc": set()
}
get_whitelists(compute_sites)
site_assigned_donors = set()
site_summary = {}
unassigned_uncalled_donors = set()
for c in compute_sites:
for d in compute_sites:
if c == d: continue
if compute_sites.get(c).intersection(compute_sites.get(d)):
# log overlap donors issue
print "WARN: overlap donors found between " + c + " and " + d \
+ ": " + ", ".join(compute_sites.get(c).intersection(compute_sites.get(d)))
if not site_summary.get(c):
site_summary[c] = {
'Called': len(compute_sites.get(c).intersection(today_donors[1])),
'To_be_called': len(compute_sites.get(c).intersection(today_donors[0])),
'Total': len(compute_sites.get(c))
}
site_assigned_donors.update(compute_sites.get(c))
# report WARN if the sum of Called and To_be_called not equal Total in site_summary
if not site_summary[c]['Called'] + site_summary[c]['To_be_called'] == site_summary[c]['Total']:
print "WARN: donors: " + ", ".join(compute_sites.get(c).difference(today_donors[0]).difference(today_donors[1])) + \
" found in whitelist are not ready to do embl-dkfz variant calling!!!"
site_summary['Unassigned'] = {
'Called': len(today_donors[1].difference(site_assigned_donors)),
'To_be_called': len(today_donors[0].difference(site_assigned_donors)),
'Total': len(today_donors[0].union(today_donors[1]).difference(site_assigned_donors))
}
unassigned_uncalled_donors = today_donors[0].difference(site_assigned_donors)
# today's counts
with open(report_dir + '/summary_compute_site_counts.json', 'w') as o: o.write(json.dumps(site_summary))
with open(report_dir + '/unassigned_uncalled_donors.txt', 'w') as o:
o.write('# Unassigned and uncalled donors\n')
o.write('# dcc_project_code' + '\t' + 'submitter_donor_id' + '\n')
o.write('\n'.join(unassigned_uncalled_donors) + '\n')
"""
# get all previous days counts
[dates, metadata_dirs] = get_metadata_dirs(metadata_dir, '2015-05-07')
site_summary_report = []
for i, md in reversed(list(enumerate(metadata_dirs))):
summary_site_count_file = md + '/reports/embl-dkfz_summary_counts/summary_compute_site_counts.json'
if not os.path.isfile(summary_site_count_file): continue
site_counts = json.load(open(summary_site_count_file))
site_summary_report.append([dates[i], site_counts])
|
with open(report_dir + '/hist
|
_summary_compute_site_counts.json', 'w') as o: o.write(json.dumps(site_summary_report))
"""
def compute_site_report(metadata_dir, report_dir, today_donors):
compute_sites = {
"aws_ireland": set(),
"aws_oregon": set(),
"bsc": set(),
"dkfz": set(),
"ebi": set(),
"etri": set(),
"oicr": set(),
"pdc1_1": set(),
"pdc2_0": set(),
"tokyo": set(),
"ucsc": set(),
"sanger": set(),
"idash": set(),
"dkfz_hpc": set()
}
get_whitelists(compute_sites)
completed_donors = {}
site_assigned_donors = set()
for c in compute_sites:
for d in compute_sites:
if c == d: continue
if compute_sites.get(c).intersection(compute_sites.get(d)):
# log overlap donors issue
print "WARN: overlap donors found between " + c + " and " + d \
+ ": " + ", ".join(compute_sites.get(c).intersection(compute_sites.get(d)))
completed_donors[c] = compute_sites.get(c).intersection(today_donors[1])
site_assigned_donors.update(completed_donors[c])
site_not_assigned_donors = today_donors[1].difference(site_assigned_donors)
#print completed_donors
#print site_not_assigned_donors
site_summary = {}
for c in completed_donors: site_summary[c] = len(completed_donors.get(c))
# today's counts
with open(report_dir + '/summary_site_counts.json', 'w') as o: o.write(json.dumps(site_summary))
# get all previous days counts
[dates, metadata_dirs] = get_metadata_dirs(metadata_dir, '2015-05-07')
site_summary_report = []
for i, md in reversed(list(enumerate(metadata_dirs))):
summary_site_count_file = md + '/reports/embl-dkfz_summary_counts/summary_site_counts.json'
if not os.path.isfile(summary_site_count_file): continue
site_counts = json.load(open(summary_site_count_file))
site_summary_report.append([dates[i], site_counts])
with open(report_dir + '/hist_summary_site_counts.json', 'w') as o: o.write(json.dumps(site_summary_report))
def get_whitelists(compute_sites):
whitelist_dir = '../pcawg-operations/variant_calling/dkfz_embl_workflow/whitelists/'
for c in compute_sites:
files = glob.glob(whitelist_dir + '/' + c + '/' + c + '.*.txt')
for f in files: compute_sites.get(c).update(get_donors(f))
def get_donors(fname):
donors = []
with open(fname) as f:
for d in f:
donors.append(d.rstrip())
return donors
def get_metadata_dirs(metadata_dir, start_date='2015-01-11'):
dirs = sorted(glob.glob(metadata_dir + '/../20*_???'))
dir_name = os.path.basename(metadata_dir)
ret_dirs = []
ret_dates = []
start = False
for d in dirs:
if '../' + start_date in d: start = True
if not start: continue
ret_dates.append( str.split(os.path.basename(d),'_')[0] )
ret_dirs.append(d)
if dir_name == os.path.basename(d): break
return [ret_dates, ret_dirs]
def main(argv=None):
if argv is None:
argv = sys.argv
else:
sys.argv.extend(argv)
parser = ArgumentParser(description="PCAWG Report Generator Gathering Counts",
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument("-m", "--metadata_dir", dest="metadata_dir",
help="Directory containing metadata manifest files", required=True)
args = parser.parse_args()
metadata_dir = args.metadata_dir # this dir contain
|
webostin/django-btc
|
tests/template_tests/filter_tests/test_phone2numeric.py
|
Python
|
bsd-3-clause
| 1,450
| 0.002069
|
from django.template.defaultfilters import phone2numeric_filter
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import render, setup
class Phone2numericTests(SimpleTestCase):
@setup({'phone2numeric01': '{{ a|phone2numeric }} {{ b|phone2numeric }}'})
def test_phone2numeric01(self):
output = render(
'phone2numeric01',
{'a': '<1-800-call-me>', 'b': mark_safe('<1-800-call-me>')},
)
self.assertEqual(output, '<1-800-2255-63> <1-800-2255-63>')
@setup({'phone2numeric02':
'{% autoescape off %}{{ a|phone2numeric }} {{ b|phone2numeric }}{% endautoescape %}'})
def test_phone2numeric02(self):
output = render(
'phone2numeric02',
{'a': '<1-8
|
00-call-me>', 'b': mark_safe('<1-800-call-me>')},
)
self.assertEqual(output, '<1-800-2255-63> <1-800-2255-63>')
@setup({'phone2numeric03': '{{ a|phone2numeric }}'})
def test_phone2numeric03(self):
output = render(
'phone2numeric03',
{'a': 'How razorback-jumping frogs can level six piqued gymnasts!'},
)
self.assertEqual(
output,
'469 729672225-586746
|
4 37647 226 53835 749 747833 49662787!'
)
class FunctionTests(SimpleTestCase):
def test_phone2numeric(self):
self.assertEqual(phone2numeric_filter('0800 flowers'), '0800 3569377')
|
mrshu/scikit-learn
|
sklearn/hmm.py
|
Python
|
bsd-3-clause
| 46,104
| 0.000022
|
# Hidden Markov Models
#
# Author: Ron Weiss <ronweiss@gmail.com>
# and Shiqiao Du <lucidfrontier.45@gmail.com>
# API changes: Jaques Grobler <jaquesgrobler@gmail.com>
"""
The :mod:`sklearn.hmm` module implements hidden Markov models.
**Warning:** :mod:`sklearn.hmm` is orphaned, undocumented and has known
numerical stability issues. If nobody volunteers to write documentation and
make it more stable, this module will be removed in version 0.11.
"""
import string
import warnings
import numpy as np
from .utils import check_random_state
from .utils.extmath import logsumexp
from .base import BaseEstimator
from .mixture import (
GMM, log_multivariate_normal_density, sample_gaussian,
distribute_covar_matrix_to_match_covariance_type, _validate_covars)
from . import cluster
from . import _hmmc
__all__ = ['GMMHMM',
'GaussianHMM',
'MultinomialHMM',
'decoder_algorithms',
'normalize']
ZEROLOGPROB = -1e200
EPS = np.finfo(float).eps
NEGINF = -np.inf
decoder_algorithms = ("viterbi", "map")
def normalize(A, axis=None):
""" Normalize the input array so that it sums to 1.
Parameters
----------
A: array, shape (n_samples, n_features)
Non-normalized input data
axis: int
dimension along which normalization is performed
Returns
-------
normalized_A: array, shape (n_samples, n_features)
A with values normalized (summing to 1) along the prescribed axis
WARNING: Modifies inplace the array
"""
A += EPS
Asum = A.sum(axis)
if axis and A.ndim > 1:
# Make sure we don't divide by zero.
Asum[Asum == 0] = 1
shape = list(A.shape)
shape[axis] = 1
Asum.shape = shape
return A / Asum
class _BaseHMM(BaseEstimator):
"""Hidden Markov Model base class.
Representation of a hidden Markov model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a HMM.
See the instance documentation for details specific to a
particular object.
Attributes
----------
n_components : int
Number of states in the model.
transmat : array, shape (`n_components`, `n_components`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_components`,)
Initial state occupation distribution.
transmat_prior : array, shape (`n_components`, `n_components`)
Matrix of prior transition probabilities between states.
startprob_prior : array, shape ('n_components`,)
Initial state occupation prior distribution.
algorithm : string, one of the decoder_algorithms
decoder algorithm
random_state: RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, 'm' for means, and 'c' for covars, etc.
Defaults to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, 'm' for means, and 'c' for
covars, etc. Defaults to all parameters.
See Also
--------
GMM : Gaussian mixture model
"""
# This class implements the public interface to all HMMs that
# derive from it, including all of the machinery for the
# forward-backward and Viterbi algorithms. Subclasses need only
# implement _generate_sample_from_state(), _compute_log_likelihood(),
# _init(), _initialize_sufficient_statistics(),
# _accumulate_sufficient_statistics(), and _do_mstep(), all of
# which depend on the specific emission distribution.
#
# Subclasses will probably also want to implement properties for
# the emission distribution parameters to expose them publically.
def __init__(self, n_components=1, startprob=None, transmat=None,
startprob_prior=None, transmat_prior=None,
algorithm="viterbi", random_state=None,
n_iter=10, thresh=1e-2, params=string.ascii_letters,
init_params=string.ascii_letters):
self.n_components = n_components
self.n_iter = n_iter
self.thresh = thresh
self.params = params
self.init_params = init_params
self.startprob_ = startprob
self.startprob_prior = startprob_prior
self.transmat_ = transmat
self.transmat_prior = transmat_prior
self._algorithm = algorithm
self.random_state = random_state
def eval(self, obs):
"""Compute the log probability under the model and compute posteriors
Implements rank and beam pruning in the forward-backward
algorithm to speed up inference in large models.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
logprob : float
Log likelihood of the sequence `obs`
posteriors: array_like, shape (n, n_components)
Posterior probabilities of each state for each
observation
See Also
--------
score : Compute the log probability under the model
decode : Find most likely state sequence corresponding to a `obs`
"""
obs = np.asarray(obs)
framelogprob = self._compute_log_likelihood(obs)
logprob, fwdlattice = self._do_forward_pass(framelogprob)
bwdlattice = self._do_backward_pass(framelogprob)
gamma = fwdlattice + bwdlattice
# gamma is guaranteed to be correctly normalized by logprob at
# all frames, unless we do approximate inference using pruning.
# So, we will normalize each frame explicitly in case we
# pruned too aggressively.
posteriors = np.exp(gamma.T - logsumexp(gamma, axis=1)).T
posteriors += np.finfo(np.float32).eps
posteriors /= np.sum(posteriors, axis=1).reshape((-1, 1))
return logprob, posteriors
def score(self, obs):
"""Compute the log probability under the model.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
|
-------
logprob : float
Log likelihood of the `obs`
See Also
|
--------
eval : Compute the log probability under the model and posteriors
decode : Find most likely state sequence corresponding to a `obs`
"""
obs = np.asarray(obs)
framelogprob = self._compute_log_likelihood(obs)
logprob, _ = self._do_forward_pass(framelogprob)
return logprob
def _decode_viterbi(self, obs):
"""Find most likely state sequence corresponding to `obs`.
Uses the Viterbi algorithm.
Parameters
----------
obs : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
viterbi_logprob : float
Log probability of the maximum likelihood path through the HMM
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation
See Also
--------
eval : Compute the log probability under the model and posteriors
score : Compute the log probability under the model
"""
obs = np.asarray(obs)
framelogprob = self._compute_log_likelihood(obs)
viterbi_logprob, state_sequence = self._do_viterbi_pass(framelogprob)
return viterbi_logprob
|
Jetpie/web-scraper
|
commercial_web_navigation/commercial_web_navigation/items.py
|
Python
|
mit
| 429
| 0.011655
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
"sample spider"
class DmozItem(scrapy.Item):
#
|
define the fields for your item here like:
# name = scrapy.Field()
title = scrapy.Field()
link = scrapy.Field()
desc = scrapy.Field()
class JDItem(scrapy.Item):
category = scr
|
apy.Field()
|
boyombo/django-stations
|
stations/api/urls.py
|
Python
|
mit
| 2,383
| 0
|
from django.conf.urls import url
from api import views
urlpatterns = [
url(r'stations/$', views.get_stations, name='api_stations'),
url(r'entry/(?P<station_id>\d+)/$', views.make_entry, name='api_entry'),
url(r'new/$', views.add_station, name='api_add_station'),
# Booking api
url(r'booking/(?P<resident_id>\d+)/$', views.booking, name='api_booking'),
url(r'book_profile/$', views.book_profile, name='api_book_profile'),
url(r'book_phone/$', views.book_phone, name='api_book_phone'),
url(r'book_code/$', views.book_code, name='api_book_code'),
# Insure api
url(r'insure/$', views.insure, name='api_insure'),
# Drugshare ap
|
i
url(r'register_pharm/$', views.register_pharm, name='api_register_pharm'),
url(r'make_token/(?P<device_id>\d+)/$',
views.make_token, name='api_make_token'),
url(r'add_device/$', views.add_device, name='api_add_device'),
url(r'get_profile/$', views.get_profile, name='api_get_profile'),
url(r'update_pharm/(?P<device_id>\d+)/$',
views.update_pharm, nam
|
e='api_update_pharm'),
url(r'add_outlet/(?P<device_id>\d+)/$',
views.add_outlet, name='api_add_outlet'),
url(r'delete_outlet/(?P<id>\d+)/$',
views.delete_outlet, name='api_delete_outlet'),
url(r'add_drug/$', views.add_drug, name='api_add_drug'),
url(r'edit_drug/(?P<id>\d+)/$', views.edit_drug, name='api_edit_drug'),
url(r'search_drug/(?P<device_id>\d+)/$',
views.search_drug, name='api_search_drug'),
url(r'wish_drug/(?P<device_id>\d+)/$',
views.wishlist_drug, name='api_wishlist_drug'),
url(r'stock_drug/(?P<device_id>\d+)/$',
views.stock_drug, name='api_stock_drug'),
url(r'remove_drug/(?P<id>\d+)/$',
views.remove_drug, name='api_remove_drug'),
url(r'recent_drugs/(?P<count>\d+)/$',
views.recent_drugs, name='api_recent_drugs'),
url(r'request_drug/(?P<drug_id>\d+)/$',
views.request_drug, name='api_request_drug'),
url(r'pending/(?P<device_id>\d+)/$',
views.pending_requests, name='api_pending_requests'),
url(r'accept/(?P<request_id>\d+)/$', views.accept, name='api_accept'),
url(r'reject/(?P<request_id>\d+)/$', views.reject, name='api_reject'),
url(r'drug_list/$', views.list_generic_drugs, name='api_drugs_list'),
url(r'feedback/(?P<id>\d+)/$', views.feedback, name='api_feedback'),
]
|
chriskuech/wavelab
|
pitchanalysis.py
|
Python
|
mit
| 5,174
| 0.018748
|
#!/usr/bin/env python
"""
pitchanalysis.py
--
Christopher Kuech
cjkuech@gmail.com
--
Requires:
Python 2.7
Instructions:
python pitchanalysis.py [wav-file-name]
"""
import matplotlib
from math import log
matplotlib.use("TkAgg")
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
import numpy as np
import pyaudio
import sys
from time import time, sleep
import Tkinter as tk
import wavelab
(WIDTH, HEIGHT) = (800, 500)
FNAME = './Bach.wav' if len(sys.argv) != 2 else sys.argv[1]
font = ('Helvetica', 14, 'bold')
CHUNK = 1024
def audioworker():
"""the function run on the audio thread"""
global frame
p = pyaudio.PyAudio()
stream = p.open(format=p.get_format_from_width(2),
channels=1, rate=4*44100, output=True)
# unknown why rate is off by 4x
while True:
stream.write(data[frame:frame+CHUNK].tostring())
frame = (frame + CHUNK) % len(wav)
stream.stop_stream()
stream.close()
p.terminate()
def graphicsworker():
"""the function run on the graphics thread"""
while True:
start = time()
p = ptype.get()
w = wsize.get()
wty = wtype.get()
# compute frequencies from clip
clip = data[frame:frame+w]
if wty == "hanning":
clip *= np.hanning(w)
elif wty == "hamming":
clip *= np.hamming(w)
freqs = wavelab.frequencies(clip)
# update plot
xs = np.sort(freqs.keys())
ys = np.array(map(freqs.get, xs))
axes.cla()
(xmax, ymin, ymax) = (10e4, 0.000001, 10e2)
# (xlim, ylim) = (_, (ymin, ymax)) = ((0, 1e4), (1e-3, 1e7))
axes.set_xscale("log")
axes.set_yscale("linear")
axes.set_xlim((1, xmax))
if p == "square":
# axes.set_yscale("linear")
axes.set_ylim((ymin**2, ymax**2))
ys = ys * ys
elif p == "dB":
# axes.set_yscale("log")
axes.set_ylim((log(ymin), log(ymax)))
ys = np.log(ys)
elif p == "-dB":
# axes.set_yscale("log")
axes.set_ylim((-log(ymax), -log(ymin)))
ys = -np.log(ys)
elif p == "linear":
# axes.set_yscale("linear")
axes.set_ylim((ymin, ymax))
axes.plot(xs, ys, 'r-')
canvas.show()
# pitch tracker
freq = max(freqs, key=lambda f: freqs[f])
pitch.set(wavelab.pitch(freq).replace('/','\n'))
# attempt to achieve 30fps animation (at best)
dt = time() - start
sleep(max(0, 1.0/30.0 - dt))
# read wave file
(framerate, wav) = wavelab.readwav(FNAME)
data = np.concatenate((wav, wav)) # avoid out of bounds
frame = 0
# create a GUI instance (do before any use of Tkinter)
root = tk.Tk()
root.wm_title("Frequency Spectrogram")
# these objects hold the variables from the widgets
wsize = tk.IntVar() # window size (in frames)
wsize.set(2205)
wtype = tk.StringVar() # type of windowing to use
wtype.set("rectangle")
ptype = tk.StringVar() # type of power to use
ptype.set("square")
pitch = tk.StringVar() # the current pitch
pitch.set("")
widgetps = lambda n, v: {'variable': v, 'text': n, 'value': n}
# returns the dict of kwargs that initialize a widget
# create the canvas widget and add it to the GUI
# canvas = tk.Canvas(root, borderwidth=0, width=WIDTH, height=HEIGHT, bg='#000')
# canvas.grid(row=0, column=0, columnspan=4)
# canvas.show()
canvasframe = tk.Frame(root, width=WIDTH, height=HEIGHT)
canvasframe.grid(row=0, column=0, columnspan=4)
figure = Figure()
axes = figure.add_axes( (0.1, 0.1, 0.8, 0.8), frameon=True,
xlabel="Frequency (Hz)", ylabel="Power")
canvas = FigureCanvasTkAgg(figure, canvasframe)
canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)
canvas.show()
# create the wtype controller and add it to the GUI
tk.Label(root, font=font, text="Windowing").grid(row=1,
|
column=0, pady=10)
wframe = tk.Frame(root)
wframe.grid(
|
row=2, column=0, pady=10, sticky="n")
tk.Radiobutton(wframe, **widgetps("rectangle", wtype)).grid(sticky="w", row=0)
tk.Radiobutton(wframe, **widgetps("hamming" , wtype)).grid(sticky="w", row=1)
tk.Radiobutton(wframe, **widgetps("hanning" , wtype)).grid(sticky="w", row=2)
# create the wsize controller and add it to the GUI
tk.Label(root, font=font, text="Window Size").grid(row=1, column=1, pady=10)
tk.Scale(root, variable=wsize, orient=tk.HORIZONTAL, from_=10, to=4410).grid(row=2, column=1, sticky="wen")
# create the ptype controller and add it to the GUI
tk.Label(root, font=font, text="Power").grid(row=1, column=2, pady=10)
pframe = tk.Frame(root)
pframe.grid(row=2, column=2, pady=10, sticky="n")
tk.Radiobutton(pframe, **widgetps("square", ptype)).grid(sticky="w", row=0)
tk.Radiobutton(pframe, **widgetps("dB", ptype)).grid(sticky="w", row=1)
tk.Radiobutton(pframe, **widgetps("-dB", ptype)).grid(sticky="w", row=2)
tk.Radiobutton(pframe, **widgetps("linear", ptype)).grid(sticky="w", row=3)
# create the area where the pitchlabel is displayed
tk.Label(root, font=font, text="Pitch").grid(row=1, column=3, pady=10)
(fontfamily, fontsize, fontweight) = font
pitchfont = (fontfamily, 24, fontweight)
pitchlabel = tk.Label(root, font=pitchfont, textvariable=pitch, width=7).grid(row=2, column=3)
# start the other threads
wavelab.thread(audioworker)
wavelab.thread(graphicsworker)
# start the main update loop for the GUI (and block)
tk.mainloop()
|
fedora-desktop-tests/nautilus
|
features/environment.py
|
Python
|
gpl-2.0
| 3,002
| 0.001332
|
# -*- coding: UTF-8 -*-
import os
from behave_common_steps import dummy, App
from dogtail.config import config
from time import sleep, localtime, strftime
import problem
import shutil
def before_all(context):
"""Setup nautilus stuff
Being executed before all features
"""
try:
# Cleanup abrt crashes
[x.delete() for x in problem.list()]
# Do the cleanup
os.system("python cleanup.py > /dev/null")
# Skip dogtail actions to print to stdout
config.logDebugToStdOut = False
config.typingDelay = 0.2
# Include assertion object
context.assertion = dummy()
# Kill initial setup
os.system("killall /usr/libexec/gnome-initial-setup")
# Store scenario start time for session logs
context.log_start_time = strftime("%Y-%m-%d %H:%M:%S", localtime())
context.app = App('nautilus', forceKill=False)
except Exception as e:
print("Error in before_all: %s" % e.message)
def after_step(context, step):
"""Teardown after each step.
Here we make screenshot and embed it (if one of formatters supports it)
"""
try:
if problem.list():
problems = problem.list()
for crash in problems:
if hasattr(context, "embed"):
context.embed('text/plain', "abrt has detected a crash: %s" % crash.reason)
else:
print("abrt has detected a crash: %s" % crash.reason)
# Crash was stored, so it is safe to remove it now
[x.delete() for x in problems]
# Make screnshot if step has failed
if hasattr(context, "embed"):
os.system("gnome-screenshot -f /tmp/screenshot.jpg")
context.embed('image/jpg', open("/tmp/screenshot.jpg", 'r').read())
# Test debugging - set DEBUG_ON_FAILURE to drop to ipdb on step failure
if os.environ.get('DEBUG_ON_FAILURE'):
import ipdb; ipdb.set_trace() # flake8: noqa
except Exception as e:
print("Error in after_step: %s" % e.message)
def after_scenario(context, scenario):
"""Teardown for each scenario
Kill nautilus (in order to make this reliable we send sigkill)
"""
try:
# Stop nautilus
os.system("killall nautilus &> /dev/null")
# Attach journalctl logs
if hasattr(context, "embed"):
os.system("sudo journalctl /usr/bin/gnome-session --no-pag
|
er -o cat --since='%s'> /tmp/journal-session.log" % context.log_start_time)
data = open("/tmp/journal-session.log", 'r').read()
if data:
|
context.embed('text/plain', data)
if hasattr(context, 'temp_dir'):
shutil.rmtree(context.temp_dir)
# Make some pause after scenario
sleep(1)
except Exception as e:
# Stupid behave simply crashes in case exception has occurred
print("Error in after_scenario: %s" % e.message)
|
qwiglydee/drf-mongo-filters
|
runtests.py
|
Python
|
gpl-2.0
| 751
| 0.001332
|
#!/usr/bin/env python
import sys
from django.conf import settings
from django.core.management import execute_from_command_line
from tests import mongoutils
settings.configure(
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memor
|
y:',
},
},
MONGO_DATABASES={
'default': {
'name': 'dumb',
},
},
INSTALLED_APPS=(
'tests',
),
MIDDLEWARE_CLASSES=(),
ROOT_URLCONF=None,
SECRET_KEY='foobar',
TEST_RUNNER='tests.mongoutils.TestRunner'
)
def runtests():
mongoutils.mongo_connect()
argv = sys.argv[:1] + ['test'] + sys.argv[1:]
execute_from_command_line(a
|
rgv)
if __name__ == '__main__':
runtests()
|
khozzy/pyalcs
|
tests/lcs/agents/test_PerceptionString.py
|
Python
|
gpl-3.0
| 1,123
| 0
|
from lcs.agents import PerceptionString
from lcs.representations import UBR
class TestPerceptionString:
def test_should_initialize_with_defaults(self):
assert len(PerceptionString("foo")) == 3
assert len(PerceptionString(['b', 'a', 'r'])) == 3
def test_should_create_empty_with_defaults(self):
# when
ps = PerceptionString.empty(3)
# then
assert len(ps) == 3
assert repr(ps) == '###'
def test_should_create_empty_for_ubr(self):
# given
length = 3
|
wildcard = UBR(0, 16)
# when
ps = PerceptionString.empty(length, wildcard, oktypes=(UBR,))
# then
assert len(ps) == 3
assert ps[0] == ps[1] == ps[2] == wildcard
assert ps[0] is not ps[1] is not ps[2]
def test_should_safely_modify_single_attribute(self):
# given
length = 3
wildcard = UBR(0, 16)
|
ps = PerceptionString.empty(length, wildcard, oktypes=(UBR, ))
# when
ps[0].x1 = 2
# then (check if objects are not stored using references)
assert ps[1].x1 == 0
|
LabD/ecs-deplojo
|
tests/conftest.py
|
Python
|
mit
| 3,378
| 0
|
import json
import os
from textwrap import dedent
import boto3
import moto
import pytest
from moto.ec2 import ec2_backend
from moto.ec2 import utils as ec2_utils
from ecs_deplojo.connection import Connection
from ecs_deplojo.task_definitions import TaskDefinition
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
@pytest.yield_fixture(scop
|
e="function")
def cluster():
with moto.mock_ecs(), moto.mock_ec2():
boto3.setup_default_session(region_name="eu-west-1")
ec2 = boto3.resource("ec2", region_name="eu-west-1")
ecs = boto3.client("ecs", region_name="eu-west-1")
known_amis = list(ec2_backend.describe_images())
|
test_instance = ec2.create_instances(
ImageId=known_amis[0].id, MinCount=1, MaxCount=1
)[0]
instance_id_document = json.dumps(
ec2_utils.generate_instance_identity_document(test_instance)
)
cluster = ecs.create_cluster(clusterName="default")
ecs.register_container_instance(
cluster="default", instanceIdentityDocument=instance_id_document
)
yield cluster
@pytest.fixture
def connection(cluster):
return Connection()
@pytest.fixture
def definition():
path = os.path.join(BASE_DIR, "files/default_taskdef.json")
with open(path, "r") as json_file:
return TaskDefinition(json.load(json_file))
@pytest.fixture
def default_config():
path = os.path.join(BASE_DIR, "files/default_config.yml")
with open(path, "r") as fh:
yield fh
@pytest.fixture
def example_project(tmpdir):
data = """
{
"family": "default",
"volumes": [],
"containerDefinitions": [
{
"name": "web-1",
"image": "${image}",
"essential": true,
"command": ["hello", "world"],
"memory": 256,
"cpu": 0,
"portMappings": [
{
"containerPort": 8080,
"hostPort": 0
}
]
},
{
"name": "web-2",
"image": "${image}",
"essential": true,
"command": ["hello", "world"],
"memory": 256,
"cpu": 0,
"portMappings": [
{
"containerPort": 8080,
"hostPort": 0
}
]
}
]
}
""".strip()
filename = tmpdir.join("task_definition.json")
filename.write(data)
data = dedent(
"""
---
cluster_name: default
environment:
DATABASE_URL: postgresql://
environment_groups:
group-1:
ENV_CODE: 12345
task_definitions:
web:
template: %(template_filename)s
environment_group: group-1
task_role_arn: my-test
overrides:
web-1:
memory: 512
portMappings:
- hostPort: 0
containerPort: 8080
protocol: tcp
services:
web:
task_definition: web
before_deploy:
- task_definition: web
container: web-1
command: manage.py migrate --noinput
after_deploy:
- task_definition: web
container: web-1
command: manage.py clearsessions
"""
% {"template_filename": filename.strpath}
)
filename = tmpdir.join("config.yml")
filename.write(data)
return filename
|
sunnychaudhari/gstudio
|
gnowsys-ndf/gnowsys_ndf/ndf/views/course.py
|
Python
|
agpl-3.0
| 54,618
| 0.005493
|
''' -- imports from python libraries -- '''
# from datetime import datetime
import datetime
import json
''' -- imports from installed packages -- '''
from django.http import HttpResponseRedirect # , HttpResponse uncomment when to use
from django.http import HttpResponse
from django.http import Http404
from django.shortcuts import render_to_response # , render uncomment when to use
from django.template import RequestContext
from django.template import TemplateDoesNotExist
from django.template.loader import render_to_string
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.contrib.sites.models import Site
try:
from bson import ObjectId
except ImportError: # old pymongo
from pymongo.objectid import ObjectId
''' -- imports from application folders/files -- '''
from gnowsys_ndf.settings import GAPPS, MEDIA_ROOT, GSTUDIO_TASK_TYPES
from gnowsys_ndf.ndf.models import NodeJSONEncoder
from gnowsys_ndf.ndf.models import Node, AttributeType, RelationType
from gnowsys_ndf.ndf.models import node_collection, triple_collection
from gnowsys_ndf.ndf.views.file import save_file
from gnowsys_ndf.ndf.templatetags.ndf_tags import edit_drawer_widget
from gnowsys_ndf.ndf.views.methods import get_node_common_fields, parse_template_data, get_execution_time, delete_node
from gnowsys_ndf.ndf.views.notify import set_notif_val
from gnowsys_ndf.ndf.views.methods import get_property_order_with_value
fro
|
m gnowsys_ndf.ndf.views.methods import create_gattribute, create_grelation, create_task
GST_COURSE = node_collection.one({'_type': "GSystemType", 'name': GAPPS[7]})
app = GST_COURSE
# @login_required
@get_execution_time
def course(request, group_id, course_id=None):
"""
* Renders a list of all 'courses' available within the database.
"""
ins_objectid = Objec
|
tId()
if ins_objectid.is_valid(group_id) is False:
group_ins = node_collection.find_one({'_type': "Group", "name": group_id})
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if group_ins:
group_id = str(group_ins._id)
else:
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if auth:
group_id = str(auth._id)
else:
pass
if course_id is None:
course_ins = node_collection.find_one({'_type': "GSystemType", "name": "Course"})
if course_ins:
course_id = str(course_ins._id)
if request.method == "POST":
# Course search view
title = GST_COURSE.name
search_field = request.POST['search_field']
course_coll = node_collection.find({'member_of': {'$all': [ObjectId(GST_COURSE._id)]},
'$or': [
{'$and': [
{'name': {'$regex': search_field, '$options': 'i'}},
{'$or': [
{'access_policy': u"PUBLIC"},
{'$and': [{'access_policy': u"PRIVATE"}, {'created_by': request.user.id}]}
]
}
]
},
{'$and': [
{'tags': {'$regex': search_field, '$options': 'i'}},
{'$or': [
{'access_policy': u"PUBLIC"},
{'$and': [{'access_policy': u"PRIVATE"}, {'created_by': request.user.id}]}
]
}
]
}
],
'group_set': {'$all': [ObjectId(group_id)]}
}).sort('last_update', -1)
# course_nodes_count = course_coll.count()
return render_to_response("ndf/course.html",
{'title': title,
'appId': app._id,
'searching': True, 'query': search_field,
'course_coll': course_coll, 'groupid': group_id, 'group_id':group_id
},
context_instance=RequestContext(request)
)
else:
# Course list view
title = GST_COURSE.name
course_coll = node_collection.find({'member_of': {'$all': [ObjectId(course_id)]},
'group_set': {'$all': [ObjectId(group_id)]},
'$or': [
{'access_policy': u"PUBLIC"},
{'$and': [
{'access_policy': u"PRIVATE"},
{'created_by': request.user.id}
]
}
]
})
template = "ndf/course.html"
variable = RequestContext(request, {'title': title, 'course_nodes_count': course_coll.count(), 'course_coll': course_coll, 'groupid':group_id, 'appId':app._id, 'group_id':group_id})
return render_to_response(template, variable)
@login_required
@get_execution_time
def create_edit(request, group_id, node_id=None):
"""Creates/Modifies details about the given quiz-item.
"""
ins_objectid = ObjectId()
if ins_objectid.is_valid(group_id) is False:
group_ins = node_collection.find_one({'_type': "Group","name": group_id})
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if group_ins:
group_id = str(group_ins._id)
else:
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if auth:
group_id = str(auth._id)
else:
pass
context_variables = {'title': GST_COURSE.name,
'group_id': group_id,
'groupid': group_id
}
if node_id:
course_node = node_collection.one({'_type': u'GSystem', '_id': ObjectId(node_id)})
else:
course_node = node_collection.collection.GSystem()
available_nodes = node_collection.find({'_type': u'GSystem', 'member_of': ObjectId(GST_COURSE._id),'group_set': ObjectId(group_id) })
nodes_list = []
for each in available_nodes:
nodes_list.append(str((each.name).strip().lower()))
if request.method == "POST":
# get_node_common_fields(request, course_node, group_id, GST_COURSE)
course_node.save(is_changed=get_node_common_fields(request, course_node, group_id, GST_COURSE))
return HttpResponseRedirect(reverse('course', kwargs={'group_id': group_id}))
else:
if node_id:
context_variables['node'] = course_node
context_variables['groupid'] = group_id
context_variables['group_id'] = group_id
context_variables['appId'] = app._id
context_variables['nodes_list'] = json.dumps(nodes_list)
return render_to_response("ndf/course_create_edit.html",
context_variables,
context_instance=RequestContext(request)
)
@login_required
@get_execution_time
def course_detail(request, group_id, _id):
ins_objectid = ObjectId()
if ins_objectid.is_valid(group_id) is False:
group_ins = node_collection.find_one({'_type': "Group", "name": group_id})
auth = nod
|
markrawlingson/SickRage
|
sickbeard/providers/iptorrents.py
|
Python
|
gpl-3.0
| 7,350
| 0.00449
|
# Author: seedboy
# URL: https://github.com/seedboy
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import re
from sickbeard.providers import generic
from sickbeard import logger
from sickbeard import tvcache
from sickrage.helper.exceptions import AuthException
from sickbeard.bs4_parser import BS4Parser
class IPTorrentsProvider(generic.TorrentProvider):
def __init__(self):
generic.TorrentProvider.__init__(self, "IPTorrents")
self.supportsBacklog = True
self.username = None
self.password = None
self.ratio = None
self.freeleech = False
self.minseed = None
self.minleech = None
self.cache = IPTorrentsCache(self)
self.urls = {'base_url': 'https://iptorrents.eu',
'login': 'https://iptorrents.eu/torrents/',
'search': 'https://iptorrents.eu/t?%s%s&q=%s&qf=#torrents'}
self.url = self.urls['base_url']
self.categories = '73=&60='
def isEnabled(self):
return self.enabled
def _checkAuth(self):
if not self.username or not self.password:
raise AuthException("Your authentication credentials for " + self.name + " are missing, check your config.")
return True
def _doLogin(self):
login_params = {'username': self.username,
'password': self.password,
'login': 'submit'}
self.getURL(self.urls['login'], timeout=30)
response = self.getURL(self.urls['login'], post_data=login_params, timeout=30)
if no
|
t response:
logger.log(u"Unable to connect to pr
|
ovider", logger.WARNING)
return False
if re.search('tries left', response):
logger.log(u"You tried too often, please try again after 1 hour! Disable IPTorrents for at least 1 hour", logger.WARNING)
return False
if re.search('Password not correct', response):
logger.log(u"Invalid username or password. Check your settings", logger.WARNING)
return False
return True
def _doSearch(self, search_params, search_mode='eponly', epcount=0, age=0, epObj=None):
results = []
items = {'Season': [], 'Episode': [], 'RSS': []}
freeleech = '&free=on' if self.freeleech else ''
if not self._doLogin():
return results
for mode in search_params.keys():
logger.log(u"Search Mode: %s" % mode, logger.DEBUG)
for search_string in search_params[mode]:
if mode != 'RSS':
logger.log(u"Search string: %s " % search_string, logger.DEBUG)
# URL with 50 tv-show results, or max 150 if adjusted in IPTorrents profile
searchURL = self.urls['search'] % (self.categories, freeleech, search_string)
searchURL += ';o=seeders' if mode != 'RSS' else ''
logger.log(u"Search URL: %s" % searchURL, logger.DEBUG)
data = self.getURL(searchURL)
if not data:
continue
try:
data = re.sub(r'(?im)<button.+?<[\/]button>', '', data, 0)
with BS4Parser(data, features=["html5lib", "permissive"]) as html:
if not html:
logger.log("No data returned from provider", logger.DEBUG)
continue
if html.find(text='No Torrents Found!'):
logger.log(u"Data returned from provider does not contain any torrents", logger.DEBUG)
continue
torrent_table = html.find('table', attrs={'class': 'torrents'})
torrents = torrent_table.find_all('tr') if torrent_table else []
#Continue only if one Release is found
if len(torrents) < 2:
logger.log(u"Data returned from provider does not contain any torrents", logger.DEBUG)
continue
for result in torrents[1:]:
try:
title = result.find_all('td')[1].find('a').text
download_url = self.urls['base_url'] + result.find_all('td')[3].find('a')['href']
size = self._convertSize(result.find_all('td')[5].text)
seeders = int(result.find('td', attrs={'class': 'ac t_seeders'}).text)
leechers = int(result.find('td', attrs = {'class' : 'ac t_leechers'}).text)
except (AttributeError, TypeError, KeyError):
continue
if not all([title, download_url]):
continue
#Filter unseeded torrent
if seeders < self.minseed or leechers < self.minleech:
if mode != 'RSS':
logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(title, seeders, leechers), logger.DEBUG)
continue
item = title, download_url, size, seeders, leechers
if mode != 'RSS':
logger.log(u"Found result: %s " % title, logger.DEBUG)
items[mode].append(item)
except Exception, e:
logger.log(u"Failed parsing provider. Error: %r" % ex(e), logger.ERROR)
#For each search mode sort all the items by seeders if available
items[mode].sort(key=lambda tup: tup[3], reverse=True)
results += items[mode]
return results
def seedRatio(self):
return self.ratio
def _convertSize(self, size):
size, modifier = size.split(' ')
size = float(size)
if modifier in 'KB':
size = size * 1024
elif modifier in 'MB':
size = size * 1024**2
elif modifier in 'GB':
size = size * 1024**3
elif modifier in 'TB':
size = size * 1024**4
return int(size)
class IPTorrentsCache(tvcache.TVCache):
def __init__(self, provider_obj):
tvcache.TVCache.__init__(self, provider_obj)
# Only poll IPTorrents every 10 minutes max
self.minTime = 10
def _getRSSData(self):
search_params = {'RSS': ['']}
return {'entries': self.provider._doSearch(search_params)}
provider = IPTorrentsProvider()
|
cloud-fan/spark
|
python/docs/source/conf.py
|
Python
|
apache-2.0
| 12,894
| 0.005584
|
# -*- coding: utf-8 -*-
#
# pyspark documentation build configuration file, created by
# sphinx-quickstart on Thu Aug 28 15:17:47 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shutil
import errno
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('.'))
# Remove previously generated rst files. Ignore errors just in case it stops
# generating whole docs.
shutil.rmtree(
"%s/reference/api" % os.path.dirname(os.path.abspath(__file__)), ignore_errors=True)
shutil.rmtree(
"%s/reference/pyspark.pandas/api" % os.path.dirname(os.path.abspath(__file__)),
ignore_errors=True)
try:
os.mkdir("%s/reference/api" % os.path.dirname(os.path.abspath(__file__)))
except OSError as e:
if e.errno != errno.EEXIST:
raise
try:
os.mkdir("%s/reference/pyspark.pandas/api" % os.path.dirname(
os.path.abspath(__file__)))
except OSError as e:
if e.errno != errno.EEXIST:
raise
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.2'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.mathjax',
'sphinx.ext.autosummary',
'nbsphinx', # Converts Jupyter Notebook to reStructuredText files for Sphinx.
# For ipython directive in reStructuredText files. It is generated by the notebook.
'IPython.sphinxext.ipython_console_highlighting',
'numpydoc', # handle NumPy documentation formatted docstrings.
'sphinx_plotly_directive', # For visualize plot result
]
# plotly plot directive
plotly_include_source = True
plotly_html_show_formats = False
plotly_html_show_source_link = False
plotly_pre_code = """import numpy as np
import pandas as pd
import pyspark.pandas as ps"""
numpydoc_show_class_members = False
# Links used globally in the RST files.
# These are defined here to allow link substitutions dynamically.
rst_epilog = """
.. |binder| replace:: Live Notebook
.. _binder: https://mybinder.org/v2/gh/apache/spark/{0}?filepath=python%2Fdocs%2Fsource%2Fgetting_started%2Fquickstart.ipynb
.. |examples| replace:: Examples
.. _examples: https://github.com/apache/spark/tree/{0}/examples/src/main/python
.. |downloading| replace:: Downloading
.. _downloading: https://spark.apache.org/docs/{1}/building-spark.html
.. |building_spark| replace:: Building Spark
.. _building_spark: https://spark.apache.org/docs/{1}/#downloading
""".format(
os.environ.get("GIT_HASH", "master"),
os.environ.get("RELEASE_VERSION", "latest"),
)
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'PySpark'
copyright = ''
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'master'
# The full version, including alpha/beta/rc tags.
release = os.environ.get('RELEASE_VERSION', version)
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', '.DS_Store', '**.ipynb_checkpoints']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for autodoc --------------------------------------------------
# Look at the first line of the docstring for function and method signatures.
autodoc_docstring_signature = True
autosummary_generate = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'pydata_sphinx_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "../../../docs/img/spark-logo-reverse.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = [
'css/pyspark.css',
]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = Fals
|
e
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will b
|
e output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which th
|
aubreyrjones/libesp
|
scons_local/scons-local-2.3.0/SCons/PathList.py
|
Python
|
mit
| 8,536
| 0.000937
|
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/PathList.py 2013/03/03 09:48:35 garyo"
__doc__ = """SCons.PathList
A module for handling lists of directory paths (the sort of things
that get set as CPPPATH, LIBPATH, etc.) with as much caching of data and
efficiency as we can while still keeping the evaluation delayed so that we
Do the Right Thing (almost) regardless of how the variable is specified.
"""
import os
import SCons.Memoize
import SCons.Node
import SCons.Util
#
# Variables to specify the different types of entries in a PathList object:
#
TYPE_STRING_NO_SUBST = 0 # string with no '$'
TYPE_STRING_SUBST = 1 # string containing '$'
TYPE_OBJECT = 2 # other object
def node_conv(obj):
"""
This is the "string conversion" routine that we have our substitutions
use to return Nodes, not strings. This relies on the fact that an
EntryProxy object has a get() method that returns the underlying
Node that it wraps, which is a bit of architectural dependence
that we might need to break or modify in the future in response to
additional requirements.
"""
try:
get = obj.get
except AttributeError:
if isinstance(obj, SCons.Node.Node) or SCons.Util.is_Sequence( obj ):
result = obj
else:
result = str(obj)
else:
result = get()
return result
class _PathList(object):
"""
An actual PathList object.
"""
def __init__(self, pathlist):
"""
Initializes a PathList object, canonicalizing the input and
pre-processing it for quicker substitution later.
The stored representation of the PathList is a list of tuples
containing (type, value), where the "type" is one of the TYPE_*
variables defined above. We distinguish between:
strings that contain no '$' and therefore need no
delayed-evaluation string substitution (we expect that there
will be many of these and that we therefore get a pretty
big win from avoiding string substitution)
strings that contain '$' and therefore need substitution
(the hard case is things like '${TARGET.dir}/include',
which require re-evaluation for every target + source)
other objects (which may be something like an EntryProxy
that needs a method called to return a Node)
Pre-identifying the type of each element in the PathList up-front
and storing the type in the list of tuples is intended to reduce
the amount of calculation when we actually do the substitution
over and over for each target.
"""
if SCons.Util.is_String(pathlist):
pathlist = pathlist.split(os.pathsep)
elif not SCons.Util.is_Sequence(pathlist):
pathlist = [pathlist]
pl = []
for p in pathlist:
try:
index = p.find('$')
except (AttributeError, TypeError):
type = TYPE_OBJECT
else:
if index == -1:
type = TYPE_STRING_NO_SUBST
else:
type = TYPE_STRING_SUBST
pl.append((type, p))
self.pathlist = tuple(pl)
def __len__(self): return len(self.pathlist)
def __getitem__(self, i): return self.pathlist[i]
def subst_path(self, env, target, source):
"""
Performs construction variable substitution on a pre-digested
PathList for a specific target and source.
"""
result = []
for type, value in self.pathlist:
if type == TYPE_STRING_SUBST:
value = env.subst(value, target=target, source=source,
conv=node_conv)
if SCons.Util.is_Sequence(value):
result.extend(SCons.Util.flatten(value))
elif value:
result.append(value)
elif type == TYPE_OBJECT:
value = node_conv(value)
if value:
result.append(value)
elif value:
result.append(value)
return tuple(result)
class PathListCache(object):
"""
A class to handle caching of PathList lookups.
This class gets instantiated once and then deleted from the namespace,
so it's used as a Singleton (although we don't enforce that in the
usual Pythonic ways). We could have just made the cache a dictionary
in the module namespace, but putting it in this class allows us to
use the same Memoizer pattern that we use elsewhere to count cache
hits and misses, which is very valuable.
Lookup keys in the cache are computed by the _PathList_key() method.
Cache lookup should be quick, so we don't spend cycles canonicalizing
all forms of the same lookup key. For example, 'x:y' and ['x',
'y'] logically represent the same list, but we don't bother to
split string representations and treat those two equivalently.
(Note, however, that we do, treat lists and tuples the same.)
The main type of duplication we're trying to catch will come from
looking up the same path list from two different clones of the
same construction environment. That is, given
env2 = env1.Clone()
both env1 and env2 will have the same CPPPATH value, and we can
cheaply avoid re-parsing both values of CPPPATH by using the
common value from this cache.
"""
if SCons.Memoize.use_memoizer:
__metaclass__ = SCons.Memoize.Memoized_Metaclass
memoizer_counters = []
def __init__(self):
self._memo = {}
def _PathList_key(self, pathlist):
"""
Returns the key for memoization of PathLists.
Note that we want this to be pretty quick, so we don't completely
canonicalize all forms of the same list. For example,
|
'dir1:$ROOT/dir2' and ['$ROOT/dir1', 'dir'] may logically
represent the same list if you're executing from $ROOT, but
we're not going to bother splitting strings into path elements,
or massaging strings into Nodes, to identify that equivalence.
We just want to eliminate obvious redundancy from the normal
case of re-using exactly the same cloned value for a path.
"""
if SCons.Util.is_Sequence(pathlist):
|
pathlist = tuple(SCons.Util.flatten(pathlist))
return pathlist
memoizer_counters.append(SCons.Memoize.CountDict('PathList', _PathList_key))
def PathList(self, pathlist):
"""
Returns the cached _PathList object for the specified pathlist,
creating and caching a new object as necessary.
"""
pathlist = self._PathList_key(pathlist)
try:
memo_dict = self._memo['PathList']
except KeyError:
memo_dict = {}
self._memo['PathList'] = memo_dict
else:
try:
|
ruediger/gcc-python-plugin
|
tests/plugin/functions/script.py
|
Python
|
gpl-3.0
| 3,300
| 0.003636
|
# Copyright 2011 David Malcolm <dmalcolm@redhat.com>
# Copyright 2011 Red Hat, Inc.
#
# This is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see
# <http://www.gnu.org/licenses/>.
# Verify examining details of functions
import gcc
from gccutils import pprint
def on_pass_execution(p, fn):
if p.name == '*warn_function_return':
assert isinstance(fn, gcc.Function)
print('fn: %r' % fn)
assert isinstance(fn.dec
|
l, gcc.FunctionDecl)
print('fn.decl.name: %r' % fn.decl.name)
assert isinstance(fn.decl, gcc.FunctionDecl)
#print(fn.decl.type)
#print(fn.dec
|
l.type.argument_types)
#pprint(fn.decl)
print('len(fn.local_decls): %r' % len(fn.local_decls))
for i, local in enumerate(fn.local_decls):
print('local_decls[%i]' % i)
print(' type(local): %r' % type(local))
print(' local.name: %r' % local.name)
# The "initial" only seems to be present for static variables
# with initializers. Other variables seem to get initialized
# in explicit gimple statements (see below)
if local.initial:
print(' local.initial.constant: %r' % local.initial.constant)
else:
print(' local.initial: %r' % local.initial)
print(' str(local.type): %r' % str(local.type))
#pprint(local)
#local.debug()
print('fn.funcdef_no: %r' % fn.funcdef_no)
print('fn.start: %r' % fn.start)
print('fn.end: %r' % fn.end)
assert isinstance(fn.cfg, gcc.Cfg) # None for some early passes
assert len(fn.cfg.basic_blocks) == 3
assert fn.cfg.basic_blocks[0] == fn.cfg.entry
assert fn.cfg.basic_blocks[1] == fn.cfg.exit
bb = fn.cfg.basic_blocks[2]
for i,stmt in enumerate(bb.gimple):
print('gimple[%i]:' % i)
print(' str(stmt): %r' % str(stmt))
print(' repr(stmt): %r' % repr(stmt))
if isinstance(stmt, gcc.GimpleAssign):
print(' str(stmt.lhs): %r' % str(stmt.lhs))
print(' [str(stmt.rhs)]: %r' % [str(item) for item in stmt.rhs])
#print(dir(stmt))
#pprint(stmt)
print('fn.decl.arguments: %r' % fn.decl.arguments)
for i, arg in enumerate(fn.decl.arguments):
print(' arg[%i]:' % i)
print(' arg.name: %r' % arg.name)
print(' str(arg.type): %r' % str(arg.type))
print('type(fn.decl.result): %r' % type(fn.decl.result))
print(' str(fn.decl.result.type): %r' % str(fn.decl.result.type))
gcc.register_callback(gcc.PLUGIN_PASS_EXECUTION,
on_pass_execution)
|
Programvareverkstedet/pensieve
|
pensieve.py
|
Python
|
gpl-2.0
| 789
| 0.017744
|
#!/usr/bin/python
impo
|
rt os
import threading
import time
import Queue
import signal
import subprocess
import collections
from json_get import generate_list
q = Queue.Queue()
ls = collections.d
|
eque( generate_list())
def showstuff():
while ( True ):
sb = subprocess.Popen(["feh", "-Z", "-g" ,"800x400",ls[0]])
while( True ):
a = q.get()
print a
if ( a == "stop" ):
sb.terminate()
exit()
elif ( a == "next"):
ls.rotate(1)
sb.terminate()
break
def amin():
showOff = threading.Thread(target=showstuff)
showOff.start()
for i in range(6):
time.sleep(5)
q.put("next")
time.sleep(2)
q.put("stop")
amin()
|
edusegzy/pychemqt
|
lib/mEoS/MD2M.py
|
Python
|
gpl-3.0
| 2,498
| 0.003205
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from lib.meos import MEoS
from lib import unidades
class MD2M(MEoS):
"""Multiparameter equation of state for decamethyltetrasiloxane"""
name = "decamethyltetrasiloxane"
CASNumber = "141-62-8"
formula = "C10H30Si4O3"
synonym = "MD2M"
rhoc = unidades.Density(284.1716396703609)
Tc = unidades.Temperature(599.40)
|
Pc = unidades.Pressure(1227.0, "kPa")
M = 310.685 # g/mol
Tt = unidades.Temperature(205.2)
Tb = unidades.Temperature(467.51)
f_acent = 0.668
momentoDipolar = unidades.DipoleMoment(1.12, "Debye")
id = 39
# id = 1837
CP1 = {"ao": 331.9,
"an": [], "pow": [],
"ao_exp": [], "exp": [],
"ao_hyp": [329620742.8, 0, 2556558319.0, 0],
"hyp": [795.1, 0, 1813.8, 0
|
]}
helmholtz1 = {
"__type__": "Helmholtz",
"__name__": "Helmholtz equation of state for MD2M of Colonna et al. (2006).",
"__doi__": {"autor": "Colonna, P., Nannan, N.R., and Guardone, A.",
"title": "Multiparameter equations of state for siloxanes: [(CH3)3-Si-O1/2]2-[O-Si-(CH3)2]i=1,…,3, and [O-Si-(CH3)2]6",
"ref": "Fluid Phase Equilibria 263:115-130, 2008",
"doi": "10.1016/j.fluid.2007.10.001"},
"R": 8.314472,
"cp": CP1,
"ref": "NBP",
"Tmin": Tt, "Tmax": 673.0, "Pmax": 30000.0, "rhomax": 3.033,
"Pmin": 0.0000005, "rhomin": 3.032,
"nr1": [1.33840331, -2.62939393, 0.4398383, -0.53496715, 0.1818844,
0.40774609e-3],
"d1": [1, 1, 1, 2, 3, 7],
"t1": [0.25, 1.125, 1.5, 1.375, 0.25, 0.875],
"nr2": [1.13444506, 0.5774631e-1, -0.5917498, -0.11020225,
-0.34942635e-1, 0.7646298e-2],
"d2": [2, 5, 1, 4, 3, 4],
"t2": [0.625, 1.75, 3.625, 3.625, 14.5, 12.0],
"c2": [1, 1, 2, 2, 3, 3],
"gamma2": [1]*6}
eq = helmholtz1,
_vapor_Pressure = {
"eq": 5,
"ao": [-0.10029e2, 0.44434e1, -0.36753e1, -0.68925e1, -0.32211e1],
"exp": [1.0, 1.5, 2.06, 3.5, 10.0]}
_liquid_Density = {
"eq": 1,
"ao": [0.12608e2, -0.32120e2, 0.33559e2, -0.11695e2, 0.76192],
"exp": [0.48, 0.64, 0.8, 1.0, 2.6]}
_vapor_Density = {
"eq": 3,
"ao": [-0.24684e1, -0.71262e1, -0.27601e2, -0.49458e2, -0.24106e2,
-0.19370e3],
"exp": [0.376, 0.94, 2.9, 5.9, 6.2, 13.0]}
|
enchuu/yaytp
|
video.py
|
Python
|
mit
| 1,966
| 0.006104
|
#!/usr/bin/env python
import subprocess
import time
from format import *
class Video():
""" Class to represent a Youtube video."""
def __init__(self, data):
self.id = data['id']
self.tit
|
le = data['title']
self.description = data['description']
self.user = data['uploader']
self.uploaded = time.strptime(data['uploaded'].replace(".000Z", "").replace("T", " "), "%Y-%m-%d %H:%M:%S")
self.views = int(data['viewCount'
|
]) if 'viewCount' in data else 0
self.rating = float(data['rating']) if 'rating' in data else 0
self.likes = int(data['likeCount']) if 'likeCount' in data else 0
self.dislikes = int(data['ratingCount']) - self.likes if 'ratingCount' in data else 0
self.comment_count = int(data['commentCount']) if 'commentCount' in data else 0
self.length = int(data['duration'])
def format_title_desc(self, number):
"""Formats information about the title and description of the video."""
title = str(number) + '. ' + self.title
desc = self.description
return (title, desc)
def format_info(self):
"""Formats other information about the video."""
user = ' ' + quick_fit_string(self.user, 21)
info1 = ' v:' + format_int(self.views, 4) + \
' t:' + quick_fit_string(format_time(self.length), 8)
info2 = ' l:' + format_int(self.likes, 4) + \
' d:' + format_int(self.dislikes, 4) + \
' r:' + quick_fit_string(str(self.rating), 4)
info3 = ' r:' + quick_fit_string(str(self.rating), 4) + \
' u:' + time.strftime('%d/%m/%y', self.uploaded)
return (user, info1, info3)
def play(self, player, args):
"""Opens the video in a video player"""
url = 'https://www.youtube.com/watch?v=' + self.id
player = subprocess.Popen([player] + args.split(' ') + [url], stderr=subprocess.DEVNULL)
|
prefetchnta/questlab
|
bin/x64bin/python/37/Lib/importlib/_bootstrap_external.py
|
Python
|
lgpl-2.1
| 60,574
| 0.000528
|
"""Core implementation of path-based import.
This module is NOT meant to be directly imported! It has been designed such
that it can be bootstrapped into Python as the implementation of import. As
such it requires the injection of specific modules and attributes in order to
work. One should use importlib as the public-facing version of this module.
"""
# IMPORTANT: Whenever making changes to this module, be sure to run a top-level
# `make regen-importlib` followed by `make` in order to get the frozen version
# of the module updated. Not doing so will result in the Makefile to fail for
# all others who don't have a ./python around to freeze the module in the early
# stages of compilation.
#
# See importlib._setup() for what is injected into the global namespace.
# When editing this code be aware that code executed at import time CANNOT
# reference any injected objects! This includes not only global code but also
# anything specified at the class level.
# Bootstrap-related code ######################################################
_CASE_INSENSITIVE_PLATFORMS_STR_KEY = 'win',
_CASE_INSENSITIVE_PLATFORMS_BYTES_KEY = 'cygwin', 'darwin'
_CASE_INSENSITIVE_PLATFORMS = (_CASE_INSENSITIVE_PLATFORMS_BYTES_KEY
+ _CASE_INSENSITIVE_PLATFORMS_STR_KEY)
def _make_relax_case():
if sys.platform.startswith(_CASE_INSENSITIVE_PLATFORMS):
if sys.platform.startswith(_CASE_INSENSITIVE_PLATFORMS_STR_KEY):
key = 'PYTHONCASEOK'
else:
key = b'PYTHONCASEOK'
def _relax_case():
"""True if filenames must be checked case-insensitively."""
return key in _os.environ
else:
def _relax_case():
"""True if filenames must be checked case-insensitively."""
return False
return _relax_case
def _w_long(x):
"""Convert a 32-bit integer to little-endian."""
return (int(x) & 0xFFFFFFFF).to_bytes(4, 'little')
def _r_long(int_bytes):
"""Convert 4 bytes in little-endian to an integer."""
return int.from_bytes(int_bytes, 'little')
def _path_join(*path_parts):
"""Replacement for os.path.join()."""
return path_sep.join([part.rstrip(path_separators)
for part in path_parts if part])
def _path_split(path):
"""Replacement for os.path.split()."""
if len(path_separators) == 1:
front, _, tail = path.rpartition(path_sep)
return front, tail
for x in reversed(path):
if x in path_separators:
front, tail = path.rsplit(x, maxsplit=1)
return front, tail
return '', path
def _path_stat(path):
"""Stat the path.
Made a separate function to make it easier to override in experiments
(e.g. cache stat results).
"""
return _os.stat(path)
def _path_is_mode_type(path, mode):
"""Test whether the path is the specified mode type."""
try:
stat_info = _path_stat(path)
except OSError:
return False
return (stat_info.st_mode & 0o170000) == mode
def _path_isfile(path):
"""Replacement for os.path.isfile."""
return _path_is_mode_type(path, 0o100000)
def _path_isdir(path):
"""Replacement for os.path.isdir."""
if not path:
path = _os.getcwd()
return _path_is_mode_type(path, 0o040000)
def _write_atomic(path, data, mode=0o666):
"""Best-effort function to write data to a path atomically.
Be prepared to handle a FileExistsError if concurrent writing of the
temporary file is attempted."""
# id() is used to generate a pseudo-random filename.
path_tmp = '{}.{}'.format(path, id(path))
fd = _os.open(path_tmp,
_os.O_EXCL | _os.O_CREAT | _os.O_WRONLY, mode & 0o666)
try:
# We first write data to a temporary file, and then use os.replace() to
# perform an atomic rename.
with _io.FileIO(fd, 'wb') as file:
file.write(data)
_os.replace(path_tmp, path)
except OSError:
try:
_os.unlink(path_tmp)
except OSError:
pass
raise
_code_type = type(_write_atomic.__code__)
# Finder/loader utility code ###############################################
# Magic word to reject .pyc files generated by other Python versions.
# It should change for each incompatible change to the bytecode.
#
# The value of CR and LF is incorporated so if you ever read or write
# a .pyc file in text mode the magic number will be wrong; also, the
# Apple MPW compiler swaps their values, botching string constants.
#
# There were a variety of old schemes for setting the magic number.
# The current working scheme is to increment the previous value by
# 10.
#
# Starting with the adoption of PEP 3147 in Python 3.2, every bump in magic
# number also includes a new "magic tag", i.e. a human readable string used
# to represent the magic number in __pycache__ directories. When you change
# the magic number, you must also set a new unique magic tag. Generally this
# can be named after the Python major version of the magic number bump, but
# it can really be anything, as long as it's different than anything else
# that's come before. The tags are included in the following table, starting
# with Python 3.2a0.
#
# Known values:
# Python 1.5: 20121
# Python 1.5.1: 20121
# Python 1.5.2: 20121
# Python 1.6: 50428
# Python 2.0: 50823
# Python 2.0.1: 50823
# Python 2.1: 60202
# Python 2.1.1: 60202
# Python 2.1.2: 60202
# Python 2.2: 60717
# Python 2.3a0: 62011
# Python 2.3a0: 62021
# Python 2.3a0: 62011 (!)
# Python 2.4a0: 62041
# Python 2.4a3: 62051
# Python 2.4b1: 62061
# Python 2.5a0: 62071
# Python 2.5a0: 62081 (ast-branch)
# Python 2.5
|
a0: 62091 (with)
# Python 2.5a0: 62092 (changed WITH_CLEANUP opcode)
# Python 2.5b3: 62101 (fix wrong code: for x, in ...)
# Python 2.5b3: 62111 (fix wrong code: x += yield)
# Python 2.5c1: 62121 (fix wrong lnotab with for loops and
# storing constants that should
|
have been removed)
# Python 2.5c2: 62131 (fix wrong code: for x, in ... in listcomp/genexp)
# Python 2.6a0: 62151 (peephole optimizations and STORE_MAP opcode)
# Python 2.6a1: 62161 (WITH_CLEANUP optimization)
# Python 2.7a0: 62171 (optimize list comprehensions/change LIST_APPEND)
# Python 2.7a0: 62181 (optimize conditional branches:
# introduce POP_JUMP_IF_FALSE and POP_JUMP_IF_TRUE)
# Python 2.7a0 62191 (introduce SETUP_WITH)
# Python 2.7a0 62201 (introduce BUILD_SET)
# Python 2.7a0 62211 (introduce MAP_ADD and SET_ADD)
# Python 3000: 3000
# 3010 (removed UNARY_CONVERT)
# 3020 (added BUILD_SET)
# 3030 (added keyword-only parameters)
# 3040 (added signature annotations)
# 3050 (print becomes a function)
# 3060 (PEP 3115 metaclass syntax)
# 3061 (string literals become unicode)
# 3071 (PEP 3109 raise changes)
# 3081 (PEP 3137 make __file__ and __name__ unicode)
# 3091 (kill str8 interning)
# 3101 (merge from 2.6a0, see 62151)
# 3103 (__file__ points to source file)
# Python 3.0a4: 3111 (WITH_CLEANUP optimization).
# Python 3.0b1: 3131 (lexical exception stacking, including POP_EXCEPT
#3021)
# Python 3.1a1: 3141 (optimize list, set and dict comprehensions:
# change LIST_APPEND and SET_ADD, add MAP_ADD #2183)
# Python 3.1a1: 3151 (optimize conditional branches:
# introduce POP_JUMP_IF_FALSE and POP_JUMP_IF_TRUE
#4715)
# Python 3.2a1: 3160 (add SETUP_WITH #6101)
# tag: cpython-32
# Python 3.2a2: 3170 (add DUP_TOP_TWO, remove DUP_TOPX an
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.