repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
sidnarayanan/PandaProd | Producer/python/utils/puppi_cff.py | Python | mit | 1,650 | 0.025455 | import FWCore.ParameterSet.Config as cms
# Copied from PhysicsTools.PatAlgos.slimming.puppiForMET_cff import makePuppiesFromMiniAOD
from CommonTools.PileupAlgos.Puppi_cff import puppi
puppi.candName = 'packedPFCandidates'
puppi.vertexName = 'offlineSlimmedPrimaryVertices'
#puppi.useExistingWeights = False # I still don't trust miniaod...
puppi.useExistingWeights = True
puppi.clonePackedCands = True # if !useExistingWeights, need to set this flag to make PuppiProducer create packed candidates
pfNoLepPUPPI = cms.EDFilter("CandPtrSelector",
src = cms.InputTag("packedPFCandidates"),
cut = cms.string("abs(pdgId) != 13 && abs(pdgId) != 11 && abs(pdgId) != 15")
)
puppiNoLep = puppi.clone(
candName = 'pfNoLepPUPPI',
useWeightsNoLep = True,
useExistingWeights = False
)
pfLeptonsPUPPET = cms.EDFilter("CandPtrSelector",
src = cms.InputTag("packedPFCandidates"),
cut = cms.string("abs(pdgId) == 13 || abs(pdgId) == 11 || abs(pdgId) == 15")
)
puppiMerge | d = cms.EDProducer("CandViewMerger",
src = cms.VInputTag('puppiNoLep', 'pfLeptonsPUPPET')
)
import PandaProd.Producer.utils.egmidconf as egmidconf
from CommonTools.PileupAlgos.PhotonPuppi_cff import puppiPhoton
puppiFor | MET = puppiPhoton.clone(
candName = 'packedPFCandidates',
photonName = 'slimmedPhotons',
runOnMiniAOD = True,
puppiCandName = 'puppiMerged',
useRefs = False, # need to perform dR matching because of "an issue in refs in PackedCandidates"
photonId = egmidconf.photonLooseId
)
puppiMETSequence = cms.Sequence(
puppi +
pfNoLepPUPPI +
pfLeptonsPUPPET +
puppiNoLep +
puppiMerged +
puppiForMET
)
|
elzaggo/pydoop | examples/avro/py/check_results.py | Python | apache-2.0 | 1,677 | 0.001193 | # BEGIN_COPYRIGHT
#
# Copyright 2009-2018 CRS4.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may | obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See th | e
# License for the specific language governing permissions and limitations
# under the License.
#
# END_COPYRIGHT
import sys
import os
import errno
from collections import Counter
from pydoop.utils.py3compat import iteritems
def iter_lines(path):
try:
contents = os.listdir(path)
except OSError as e:
if e.errno == errno.ENOTDIR:
contents = [path]
for name in contents:
with open(os.path.join(path, name)) as f:
for line in f:
yield line
def main(exp, res):
expected = {}
for l in iter_lines(exp):
p = l.strip().split(';')
expected.setdefault(p[1], Counter())[p[2]] += 1
computed = {}
for l in iter_lines(res):
p = l.strip().split('\t')
computed[p[0]] = eval(p[1])
if set(computed) != set(expected):
sys.exit("ERROR: computed keys != expected keys: %r != %r" % (
sorted(computed), sorted(expected)))
for k, v in iteritems(expected):
if computed[k] != v:
sys.exit("ERROR: %r: %r != %r" % (k, computed[k], dict(v)))
print('All is ok!')
if __name__ == '__main__':
main(sys.argv[1], sys.argv[2])
|
ubiqx-org/Carnaval | carnaval/smb/SMB_Core.py | Python | agpl-3.0 | 6,873 | 0.008148 | # -*- coding: utf-8 -*-
# ============================================================================ #
# SMB_Core.py
#
# Copyright:
# Copyright (C) 2014 by Christopher R. Hertel
#
# $Id: SMB_Core.py; 2019-04-14 16:25:33 -0500; Christopher R. Hertel$
#
# ---------------------------------------------------------------------------- #
#
# Description:
# Carnaval Toolkit: Core components.
#
# ---------------------------------------------------------------------------- #
#
# License:
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
#
# See Also:
# The 0.README file included with the distribution.
#
# ---------------------------------------------------------------------------- #
# This code was developed in participation with the
# Protocol Freedom Information Foundation.
# <www.protocolfreedom.org>
# ---------------------------------------------------------------------------- #
#
# References:
# [MS-CIFS] Microsoft Corporation, "Common Internet File System (CIFS)
# Protocol Specification"
# http://msdn.microsoft.com/en-us/library/ee442092.aspx
#
# [MS-SMB] Microsoft Corporation, "Server Message Block (SMB) Protocol
# Specification"
# http://msdn.microsoft.com/en-us/library/cc246231.aspx
#
# [MS-SMB2] Microsoft Corporation, "Server Message Block (SMB) Protocol
# Versions 2 and 3"
# http://msdn.microsoft.com/en-us/library/cc246482.aspx
#
# ============================================================================ #
#
"""Carnaval Toolkit: Core components
Classes, functions, and other objects used throughout this SMB protocol
implementation. Fundamental stuff.
"""
# Imports -------------------------------------------------------------------- #
#
# time.time() - Get the current system time.
# ErrorCodeExceptions - Provides the CodedError() class, upon which the
# SMBerror class is built.
#
from time import time
from common.ErrorCodeExceptions import CodedError
# Classes -------------------------------------------------------------------- #
#
class SMBerror( CodedError ):
"""SMB2/3 exceptions.
An exception class with an associated set of error codes, defined by
numbers (starting at 1000). The error codes are specific to this
exception class.
Class Attributes:
error_dict - A dictionary that maps error codes to descriptive
names. This dictionary defines the set of valid
SMBerror error codes.
Error Codes:
1000 - Warning message (operation succeded with caveats).
1001 - SMB Syntax Error encountered.
1002 - SMB Semantic Error encountered.
1003 - SMB Protocol mismatch ([<FF>|<FE>|<FD>]+"SMB" not found).
See Also: common.ErrorCodeExceptions.CodedError
Doctest:
>>> print SMBerror.errStr( 1002 )
SMB Semantic Error
>>> a, b = SMBerror.errRange()
>>> a < b
True
>>> SMBerror()
Traceback (most recent call last):
...
ValueError: Undefined error code: None.
>>> print SMBerror( 1003, "Die Flipperwaldt gersput" )
1003: SMB Protocol Mismatch; Die Flipperwaldt gersput.
"""
# This assignment is all that's needed to create the class:
error_dict = {
1000 : "Warning",
1001 : "SMB Syntax Error",
1002 : "SMB Semantic Error",
1003 : "SMB Protocol Mismatch"
}
class SMB_FileTime( object ):
"""FILETIME format time value handling.
FILETIME values are given in bozoseconds since the Windows Epoch. The
Windows Epoch is UTC midnight on 1-Jan-1601, and a bozosecond is equal
to 100 nanoseconds (or 1/10th of a | microsecond, or 10^-7 seconds).
There is no "official" prefix for 10^-7, so use of | the term
"bozosecond" is on your own recognizance.
FILETIME values are 64-bit unsigned integers, supporting a date range
from the Windows Epoch to 28-May-60056.
"""
# References:
# Implementing CIFS: The Common Internet File System
# Section 2.6.3.1 under "SystemTimeLow and SystemTimeHigh"
# http://ubiqx.org/cifs/SMB.html#SMB.6.3.1
# [MS-DTYP;2.3.3]
# Microsoft Corporation, "Windows Data Types", section 2.3.3
# https://msdn.microsoft.com/en-us/library/cc230324.aspx
# Wikipedia: 1601
# https://en.wikipedia.org/wiki/1601
# Wikipedia: NTFS
# https://en.wikipedia.org/wiki/NTFS
#
# Class Values:
# _EPOCH_DELTA_SECS - The number of seconds between the Windows Epoch
# and the Unix/POSIX/Linux/BSD/etc. Epoch.
_EPOCH_DELTA_SECS = 11644473600
@classmethod
def utcNow( cls ):
"""Return the current UTC time as a FILETIME value.
Output: An unsigned long integer representing the current time in
FILETIME format.
Notes: Yeah...the few nanoseconds it will take to run this code
means that by the time the result is actually returned it
is already a bit stale.
"""
return( long( round( time(), 7 ) * 10000000 ) + cls._EPOCH_DELTA_SECS )
# Functions ------------------------------------------------------------------ #
#
def SMB_Pad8( msglen=0 ):
"""Return the number of padding octets needed for 8-octet alignment.
Input: msglen - The length of the bytestream that may need to be
padded. It is assumed that this bytestream starts
on an 8-octet boundary (otherwise, the results are
somewhat meaningless).
Output: The number of octets required in order to pad to a multiple
of 8 octets. This, of course, will be in the range 0..7.
Doctest:
>>> for i in [-9, -2, 0, 3, 8, 9]:
... print "%2d ==> %d" % (i, SMB_Pad8( i ))
-9 ==> 1
-2 ==> 2
0 ==> 0
3 ==> 5
8 ==> 0
9 ==> 7
"""
return( (8 - (msglen % 8)) & 0x7 ) # 9% code, 91% documentation.
# ============================================================================ #
# Sean sat despondently on the edge of the Wankel rotary engine, as the
# two manicurists crafted a transistor radio using parts from a discarded
# Velociraptor.
# ============================================================================ #
|
home-assistant/home-assistant | homeassistant/components/recollect_waste/__init__.py | Python | apache-2.0 | 2,425 | 0.001649 | """The ReCollect Waste integration."""
from __future__ import annotations
from datetime import date, timedelta
from aiorecollect.client import Client, PickupEvent
from aiorecollect.errors import RecollectError
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import Platform
from homeassistant.core import HomeAssistant
from homeassistant.helpers import aiohttp_client
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from .const import CONF_PLACE_ID, CONF_SERVICE_ID, DOMAIN, LOGGER
DEFAULT_NAME = "recollect_waste"
DEFAULT_UPDATE_INTERVAL = timedelta(days=1)
PLATFORMS = [Platform.SENSOR]
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up RainMachine as config entry."""
session = aiohttp_client.async_get_clientsession(hass)
client = Client(
entry.data[CONF_PLACE_ID], ent | ry.data[CONF_SERVICE_ID], session=session
)
async def async_get_pickup_events() -> list[PickupEvent]:
"""Get the next pickup."""
try:
return await client.async_get_pickup_events(
start_date=date.today(), end_date=date.today() + timedelta(weeks=4)
)
except RecollectError as er | r:
raise UpdateFailed(
f"Error while requesting data from ReCollect: {err}"
) from err
coordinator = DataUpdateCoordinator(
hass,
LOGGER,
name=f"Place {entry.data[CONF_PLACE_ID]}, Service {entry.data[CONF_SERVICE_ID]}",
update_interval=DEFAULT_UPDATE_INTERVAL,
update_method=async_get_pickup_events,
)
await coordinator.async_config_entry_first_refresh()
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][entry.entry_id] = coordinator
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
entry.async_on_unload(entry.add_update_listener(async_reload_entry))
return True
async def async_reload_entry(hass: HomeAssistant, entry: ConfigEntry) -> None:
"""Handle an options update."""
await hass.config_entries.async_reload(entry.entry_id)
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload an RainMachine config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
|
erdinc-me/Defter3 | sub/remote/gmail/gmailApiWorkers.py | Python | gpl-3.0 | 14,818 | 0.00189 | # -*- coding: utf-8 -*-
__project_name__ = 'Defter3'
__author__ = 'Erdinç Yılmaz'
__date__ = '7/20/15'
from PyQt5.QtCore import QObject, pyqtSignal, pyqtSlot, QModelIndex
import base64
import email
# import quopri
# from googleapiclient.http import BatchHttpRequest
import httplib2
from apiclient import errors
# from apiclient.discovery import build
from googleapiclient.discovery import build
from oauth2client.client import flow_from_clientsecrets
# from oauth2client.tools import run
from oauth2client.tools import run_flow
from oauth2client.tools import argparser
from sub.remote.gmail.encryptedStorage import EncryptedStorage
import sub.remote.gmail.gmailApiMethods as gm
import argparse
######################################################################
######################################################################
class GmailAPIWorkerAuth(QObject):
# in
atConnectAndAuthorize = pyqtSignal(str, str, bytes, str)
atCancelAuth = pyqtSignal()
# out
atAuthorizedSuccesfully = pyqtSignal(object, object, str)
atFailed = pyqtSignal(str)
atLog = pyqtSignal(str, int, bool)
# ---------------------------------------------------------------------
def __init__(self, parent=None):
super(GmailAPIWorkerAuth, self).__init__(parent)
self.atConnectAndAuthorize.connect(self.authorize)
self.atCancelAuth.connect(self.cancel_auth)
# ---------------------------------------------------------------------
@pyqtSlot()
def cancel_auth(self):
# self.atLog.emit("!!! Autharization Cancelled !!! ", 3, False)
self.atFailed.emit()
# ---------------------------------------------------------------------
@pyqtSlot(str, str, bytes, str)
def authorize(self, clientSecretFile, storageFile, key, accountName):
try:
CLIENT_SECRET_FILE = clientSecretFile
# Check https://developers.google.com/gmail/api/auth/scopes for all available scopes
OAUTH_SCOPE = 'https://www.googleapis.com/auth/gmail.modify'
# Location of the credentials storage file
STORAGE = EncryptedStorage(storageFile, key)
# Start the OAuth flow to retrieve credentials
flow = flow_from_clientsecrets(CLIENT_SECRET_FILE, scope=OAUTH_SCOPE)
http = httplib2.Http()
# Try to retrieve credentials from storage or run the flow to generate them
credentials = STORAGE.get()
if credentials is None or credentials.invalid:
# credentials = run(flow, STORAGE, http=http)
parser = argparse.ArgumentParser(parents=[argparser])
flags = parser.parse_args()
credentials = run_flow(flow, STORAGE, flags, http=http)
# Authorize the httplib2.Http object with our credentials
# http = credentials.authorize(http)
# Build the Gmail service from discovery
# self.gmApi = build('gmail', 'v1', http=http)
self.atAuthorizedSuccesfully.emit(STORAGE, credentials, accountName)
except SystemExit as e:
# print e
self.atLog.emit("!!! Autharization Failed !!! {}".format(e), 3, False)
self.atFailed.emit(accountName)
return
# except errors.HttpError, error:
# print "asdasd",error
except Exception as e:
# print 'An error occurred: %s' % e
self.atLog.emit("!!! Autharization Failed !!! {}".format(e), 3, False)
self.atFailed.emit(None)
##### | #################################################################
class GmailAPIWorkerSearch(QObject):
# ## in
sThreadConnectToServer = pyqtSignal(object)
sThreadCancelFetch = pyqtSignal()
sThreadDisconnectFromServer = pyqtSignal()
sThreadUploadFile = pyqtSignal(str, str, str, str, QModelIndex)
sThreadDownloadFile = pyqtSignal(str, str)
sThreadSendFile | ToTrash = pyqtSignal(bool, str, QModelIndex, str)
sThreadCreateFolder = pyqtSignal(str, str, QModelIndex)
# ## out ###
stConnectedToServer = pyqtSignal(str)
stMessageIdListsRetrieved = pyqtSignal(dict)
# stFilesRetrieved = pyqtSignal(dict)
stDisconnectedFromServer = pyqtSignal()
stSetConnectButtonEnabled = pyqtSignal(bool)
stMessageUploaded = pyqtSignal(dict, QModelIndex)
# stMessageDownloaded = Signal(str, str, str)
stMessageDownloaded = pyqtSignal(str, str)
stMessageSentToTrash = pyqtSignal(QModelIndex)
stFolderCreated = pyqtSignal(str, str, str, QModelIndex)
stFinished = pyqtSignal()
stFailed = pyqtSignal()
stLog = pyqtSignal(str, int, bool)
# ---------------------------------------------------------------------
def __init__(self, parent=None):
super(GmailAPIWorkerSearch, self).__init__(parent)
self.sThreadConnectToServer.connect(self.connect_and_search)
self.sThreadDisconnectFromServer.connect(self.disconnect_from_server)
self.sThreadCancelFetch.connect(self.cancel_fetch)
self.sThreadUploadFile.connect(self.upload_file)
self.sThreadDownloadFile.connect(self.download_file)
self.sThreadSendFileToTrash.connect(self.send_file_to_trash)
self.sThreadCreateFolder.connect(self.create_folder)
self.is_cancel_fetch = False
self.is_disconnect = False
self.userId = "me"
# ---------------------------------------------------------------------
@pyqtSlot(object)
def connect_and_search(self, credentials):
self.stLog.emit('Connecting...', 0, False)
try:
http = httplib2.Http()
# Authorize the httplib2.Http object with our credentials
http = credentials.authorize(http)
# Build the Gmail service from discovery
self.gmApi = build('gmail', 'v1', http=http)
profile = gm.get_user_profile(self.gmApi, self.userId)
self.emailAddress = profile['emailAddress']
labels = gm.list_labels(self.gmApi, self.userId)
labelNameIDDict = dict([(label['name'], label['id']) for label in labels])
if 'Defter' in labelNameIDDict.keys():
defterLabelID = labelNameIDDict['Defter']
else:
defterLabel = gm.create_label(self.gmApi, self.userId, gm.make_label('Defter'))
defterLabelID = defterLabel['id']
# self.stLog.emit('Succesfully connected ...', 0, False)
self.stConnectedToServer.emit(self.emailAddress)
self.stLog.emit('Getting files please wait ...', 0, False)
labelIdsNamesToFetchDict = {labelNameIDDict[labelFullName]: labelFullName for labelFullName in labelNameIDDict.keys() if labelFullName.startswith('Defter/')}
labelIdsNamesToFetchDict[defterLabelID] = 'Defter'
self.stMessageIdListsRetrieved.emit(labelIdsNamesToFetchDict)
self.stFinished.emit()
# except errors.HttpError, error:
except Exception as e:
# print 'An error occurred: %s' % error
self.stLog.emit(("!!! LOGIN FAILED !!! {}".format(e)), 3, False)
# self.stSetConnectButtonEnabled.emit(True)
self.stFailed.emit()
self.stFinished.emit()
# ---------------------------------------------------------------------
@pyqtSlot(str, str, QModelIndex)
def create_folder(self, labelName, labelFullName, parent):
newLabel = gm.create_label(self.gmApi, self.userId, gm.make_label(labelFullName))
labelId = newLabel['id']
self.stFolderCreated.emit(labelId, labelName, labelFullName, parent)
# ---------------------------------------------------------------------
@pyqtSlot(str, str, str, str, QModelIndex)
def upload_file(self, labelId, subject, messageText, messageHtml, parent):
self.stLog.emit("{}: Uploading...".format(subject), 1, False)
msg_raw = gm.create_html_message(self.emailAddress, self.emailAddress, subject, messageText, messageHtml)
sentMsg = gm.send_message(self.gmApi, self.userId, msg_raw)
if sentMsg:
msg_labels = g |
sysadminmatmoz/odoo-clearcorp | TODO-8.0/hr_payroll_pay_commission/hr_payroll_pay_commission.py | Python | agpl-3.0 | 2,121 | 0.004715 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Original Module by SIESA (<http://www.siesacr.com>)
# Refactored by CLEARCORP S.A. (<http://clearcorp.co.cr>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# license, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
from openerp.osv import osv, fields
class Payment(osv.Model):
"""Commissions Payroll Payment"""
_name = 'hr.payroll.pay.commission.payment'
_description = __doc__
def _check_amount_paid(self, cr, uid, ids, context=None):
for payment in self.browse(cr, uid, ids, context=context):
if payment.amount_paid <= 0.0:
return False
return True
_columns = {
'commission_id': fields.many2one('sale.commission.commission', string='Commission'),
| 'invoice_id': fields.related('commission_id', 'invoice_id', type='many2one',
obj='account.invoice', string='Invoice', readonly=True),
'input_id': fields.many2one('hr.payslip.input', ondelete='restrict', string='Input'),
'slip_id':fields.related('input_id', 'payslip_id', type='many2one',
string='Payslip', obj='hr.payslip', readonly=True, store=True),
'amount_paid': fiel | ds.float('Amount Paid', digits=(16,2)),
}
_constraints = [(_check_amount_paid, 'Value must be greater or equal than 0.', ['amount_paid'])] |
DirectXMan12/nova-hacking | nova/api/openstack/compute/contrib/coverage_ext.py | Python | apache-2.0 | 11,724 | 0.000938 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License
# See: http://wiki.openstack.org/Nova/CoverageExtension for more information
# and usage explanation for this API extension
import os
import re
import socket
import sys
import telnetlib
import tempfile
from oslo.config import cfg
from webob import exc
from nova.api.openstack import extensions
from nova import baserpc
from nova import db
f | rom nova.openstack.common import log as logging
from nova.openstack.common.rpc import common as rpc_common
LOG = | logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'coverage_ext')
CONF = cfg.CONF
class CoverageController(object):
"""The Coverage report API controller for the OpenStack API."""
def __init__(self):
self.data_path = None
self.services = []
self.combine = False
self._cover_inst = None
self.host = CONF.host
super(CoverageController, self).__init__()
@property
def coverInst(self):
if not self._cover_inst:
try:
import coverage
if self.data_path is None:
self.data_path = tempfile.mkdtemp(prefix='nova-coverage_')
data_out = os.path.join(self.data_path, '.nova-coverage.api')
self._cover_inst = coverage.coverage(data_file=data_out)
except ImportError:
pass
return self._cover_inst
def _find_services(self, req):
"""Returns a list of services."""
context = req.environ['nova.context']
services = db.service_get_all(context)
hosts = []
for serv in services:
hosts.append({"service": serv["topic"], "host": serv["host"]})
return hosts
def _find_ports(self, req, hosts):
"""Return a list of backdoor ports for all services in the list."""
context = req.environ['nova.context']
ports = []
#TODO(mtreinish): Figure out how to bind the backdoor socket to 0.0.0.0
# Currently this will only work if the host is resolved as loopback on
# the same host as api-server
for host in hosts:
base = baserpc.BaseAPI(host['service'])
_host = host
try:
_host['port'] = base.get_backdoor_port(context, host['host'])
except rpc_common.UnsupportedRpcVersion:
_host['port'] = None
#NOTE(mtreinish): if the port is None then it wasn't set in
# the configuration file for this service. However, that
# doesn't necessarily mean that we don't have backdoor ports
# for all the services. So, skip the telnet connection for
# this service.
if _host['port']:
ports.append(_host)
else:
LOG.warning(_("Can't connect to service: %s, no port"
"specified\n"), host['service'])
return ports
def _start_coverage_telnet(self, tn, service):
data_file = os.path.join(self.data_path,
'.nova-coverage.%s' % str(service))
tn.write('import sys\n')
tn.write('from coverage import coverage\n')
tn.write("coverInst = coverage(data_file='%s') "
"if 'coverInst' not in locals() "
"else coverInst\n" % data_file)
tn.write('coverInst.skipModules = sys.modules.keys()\n')
tn.write("coverInst.start()\n")
tn.write("print 'finished'\n")
tn.expect([re.compile('finished')])
def _start_coverage(self, req, body):
'''Begin recording coverage information.'''
LOG.debug(_("Coverage begin"))
body = body['start']
self.combine = False
if 'combine' in body.keys():
self.combine = bool(body['combine'])
self.coverInst.skipModules = sys.modules.keys()
self.coverInst.start()
hosts = self._find_services(req)
ports = self._find_ports(req, hosts)
self.services = []
for service in ports:
try:
service['telnet'] = telnetlib.Telnet(service['host'],
service['port'])
# NOTE(mtreinish): Fallback to try connecting to lo if
# ECONNREFUSED is raised. If using the hostname that is returned
# for the service from the service_get_all() DB query raises
# ECONNREFUSED it most likely means that the hostname in the DB
# doesn't resolve to 127.0.0.1. Currently backdoors only open on
# loopback so this is for covering the common single host use case
except socket.error as e:
exc_info = sys.exc_info()
if 'ECONNREFUSED' in e and service['host'] == self.host:
service['telnet'] = telnetlib.Telnet('127.0.0.1',
service['port'])
else:
raise exc_info[0], exc_info[1], exc_info[2]
self.services.append(service)
self._start_coverage_telnet(service['telnet'], service['service'])
def _stop_coverage_telnet(self, tn):
tn.write("coverInst.stop()\n")
tn.write("coverInst.save()\n")
tn.write("print 'finished'\n")
tn.expect([re.compile('finished')])
def _check_coverage(self):
try:
self.coverInst.stop()
self.coverInst.save()
except AssertionError:
return True
return False
def _stop_coverage(self, req):
for service in self.services:
self._stop_coverage_telnet(service['telnet'])
if self._check_coverage():
msg = _("Coverage not running")
raise exc.HTTPNotFound(explanation=msg)
return {'path': self.data_path}
def _report_coverage_telnet(self, tn, path, xml=False):
if xml:
execute = str("coverInst.xml_report(outfile='%s')\n" % path)
tn.write(execute)
tn.write("print 'finished'\n")
tn.expect([re.compile('finished')])
else:
execute = str("output = open('%s', 'w')\n" % path)
tn.write(execute)
tn.write("coverInst.report(file=output)\n")
tn.write("output.close()\n")
tn.write("print 'finished'\n")
tn.expect([re.compile('finished')])
tn.close()
def _report_coverage(self, req, body):
self._stop_coverage(req)
xml = False
html = False
path = None
body = body['report']
if 'file' in body.keys():
path = body['file']
if path != os.path.basename(path):
msg = _("Invalid path")
raise exc.HTTPBadRequest(explanation=msg)
path = os.path.join(self.data_path, path)
else:
msg = _("No path given for report file")
raise exc.HTTPBadRequest(explanation=msg)
if 'xml' in body.keys():
xml = body['xml']
elif 'html' in body.keys():
if not self.combine:
msg = _("You can't use html reports without combining")
raise exc.HTTPBadRequest(explanation=msg)
html = body['html']
if self.combine:
data_out = os.path.join(self.data_path, '.nova-coverage')
import coverage
coverInst = coverage.coverage(data_file=data_out)
coverInst.combine()
if xml:
coverInst. |
rabimba/p2pScrapper | BitTorrent-5.2.2/BitTorrent/GUI_wx/LanguageSettings.py | Python | mit | 5,226 | 0.006123 | # The contents of this file are subject to the BitTorrent Open Source License
# Version 1.1 (the License). You may not copy or use this file, in either
# source code or executable form, except in compliance with the License. You
# may obtain a copy of the License at http://www.bittorrent.com/license/.
#
# Software distributed under the License is distributed on an AS IS basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
# written by Matt Chisholm
import wx
from BTL.defer import ThreadedDeferred
from BTL.language import languages, language_names
from BTL.platform import app_name
from BitTorrent.platform import read_language_file, write_language_file
from BitTorrent.GUI_wx import SPACING, VSizer, gui_wrap, text_wrappable
error_color = wx.Colour(192,0,0)
class LanguageSettings(wx.Panel):
def __init__(self, parent, *a, **k):
wx.Panel.__init__(self, parent, *a, **k)
self.sizer = VSizer()
self.SetSizer(self.sizer)
if 'errback' in k:
self.errback = k.pop('errback')
else:
self.errback = self.set_language_failed
# widgets
self.box = wx.StaticBox(self, label="Translate %s into:" % app_name)
self.language_names = ["System default",] + [language_names[l] for l in languages]
languages.insert(0, '')
self.languages = languages
self.choice = wx.Choice(self, choices=self.language_names)
self.Bind(wx.EVT_CHOICE, self.set_language, self.choice)
restart = wx.StaticText(self, -1,
"You must restart %s for the\nlanguage "
"setting to take effect." % app_name)
self.bottom_error = wx.StaticText(self, -1, '')
self.bottom_error.SetForegroundColour(error_color)
# sizers
self.box_sizer = wx.StaticBoxSizer(self.box, wx.VERTICAL)
# set menu selection and warning item if necessary
self.valid = True
lang = read_language_file()
if lang is not None:
try:
i = self.languages.index(lang)
self.choice.SetSelection(i)
except ValueError, e:
self.top_error = wx.StaticText(self, -1,
"This version of %s does not \nsupport the language '%s'."%(app_name,lang),)
self.top_error.SetForegroundColour(error_color)
self.box_sizer.Add(self.top_error, flag=wx.TOP|wx.LEFT|wx.RIGHT, border=SPACING)
# BUG add menu separator
# BUG change color of extra menu item
self.choice.Append(lang)
self.choice.SetSelection(len(self.languages))
self.valid = False
else:
self.choice.SetSelection(0)
# other sizers
self.box | _sizer.Add(self.choice, flag=wx.GROW|wx.ALL, border=SPACING)
self.box_sizer.Add(restart, flag= | wx.BOTTOM|wx.LEFT|wx.RIGHT, border=SPACING)
self.box_sizer.Add(self.bottom_error, flag=wx.BOTTOM|wx.LEFT|wx.RIGHT, border=SPACING)
# clear out bottom error
self.clear_error()
self.sizer.AddFirst(self.box_sizer, flag=wx.GROW)
self.sizer.Fit(self)
def set_language(self, *a):
index = self.choice.GetSelection()
if index >= len(self.languages):
return
l = self.languages[index]
if not self.valid:
self.choice.Delete(len(self.languages))
self.choice.SetSelection(index)
self.valid = True
self.box_sizer.Detach(0)
self.top_error.Destroy()
self.box_sizer.Layout()
self.sizer.Layout()
d = ThreadedDeferred(gui_wrap, write_language_file, l)
d.addErrback(lambda e: self.set_language_failed(e, l))
d.addCallback(lambda r: self.language_was_set())
def language_was_set(self, *a):
self.clear_error()
wx.MessageBox("You must restart %s for the language "
"setting to take effect." % app_name,
"%s translation" % app_name,
style=wx.ICON_INFORMATION)
def clear_error(self):
index = self.box_sizer.GetItem(self.bottom_error)
if index:
self.box_sizer.Detach(self.bottom_error)
self.bottom_error.SetLabel('')
self.refit()
def set_error(self, errstr):
index = self.box_sizer.GetItem(self.bottom_error)
if not index:
self.box_sizer.Add(self.bottom_error, flag=wx.BOTTOM|wx.LEFT|wx.RIGHT, border=SPACING)
self.bottom_error.SetLabel(errstr)
if text_wrappable: self.bottom_error.Wrap(250)
self.refit()
def set_language_failed(self, e, l):
errstr = 'Could not find translation for language "%s"' % l
wx.the_app.logger.error(errstr, exc_info=e)
errstr = errstr + '\n%s: %s' % (str(e[0]), unicode(e[1].args[0]))
self.set_error(errstr)
def refit(self):
self.box_sizer.Layout()
self.sizer.Layout()
#self.sizer.Fit(self)
self.GetParent().Fit()
|
svn2github/vbox | src/VBox/ValidationKit/testmanager/webui/wuifailurecategory.py | Python | gpl-2.0 | 4,643 | 0.012061 | # -*- coding: utf-8 -*-
# $Id$
"""
Test Manager WUI - Failure Categories Web content generator.
"""
__copyright__ = \
"""
Copyright (C) 2012-2014 Oracle Corporation
This file is part of VirtualBox Open Source Edition (OSE), as
available from http://www.virtualbox.org. This file is free software;
you can redistribute it and/or modify it under the terms of the GNU
General Public License (GPL) as published by the Free Software
Foundation, in version 2 as it comes in the "COPYING" file of the
VirtualBox OSE distribution. VirtualBox OSE is distributed in the
hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
The contents of this file may alternatively be used under the terms
of the Common Development and Distribution License Version 1.0
(CDDL) only, as it comes in the "COPYING.CDDL" file of the
VirtualBox OSE distribution, in which case the provisions of the
CDDL are applicable instead of those of the GPL.
You may elect to license modified versions of this file under the
terms and conditions of either the GPL or the CDDL or both.
"""
__version__ = "$Revision$"
# Validation Kit imports.
from testmanager.webui.wuicontentbase import WuiFormContentBase, WuiListContentBase, WuiTmLink
from testmanager.core.failurecategory import FailureCategoryData
from testmanager.webui.wuibase import WuiException
class WuiFailureCategory(WuiFormContentBase):
"""
WUI Failure Category HTML content generator.
"""
def __init__(self, oFailureCategoryData, sMode, oDisp):
"""
Prepare & initialize parent
"""
if sMode == WuiFormContentBase.ksMode_Add:
sTitle = 'Add Failure Category'
sSubmitAction = oDisp.ksActionFailureCategoryAdd
elif sMode == WuiFormContentBase.ksMode_Edit:
sTitle = 'Edit Failure Category'
sSubmitAction = oDisp.ksActionFailureCategoryEdit
else:
raise WuiException('Unknown parameter')
WuiFormContentBase.__init__(self, oFailureCategoryData, sMode, 'FailureCategory', oDisp, sTitle,
sSubmitAction = sSubmitAction, fEditable = False); ## @todo non-standard action names.
def _populateForm(self, oForm, oData):
"""
Construct an HTML form
"""
oForm.addIntRO (FailureCategoryData.ksParam_idFailureCategory, oData.idFailureCategory, 'Failure Category ID')
oForm.addTimestampRO(FailureCategoryData.ksParam_tsEffective, oData.tsEffective, 'Last changed')
oForm.addTimestampRO(FailureCategoryData.ksParam_tsExpire, oData.tsExpire, 'Expires (excl)')
oForm.addIntRO (FailureCategoryData.ksParam_uidAuthor, oData.uidAuthor, 'Changed by UID')
oForm.addText (FailureCategoryData.ksParam_sShort, oData.sShort, 'Short Description')
oForm.addText (FailureCategoryData.ksParam_sFull, oData.sFull, 'Full Description')
oForm.addSubmit()
return True
class WuiFailureCategoryList(WuiListContentBase):
"""
WUI Admin Failure Category Content Generator.
"""
def __init__(self, aoEntries, iPage, cItemsPerPage, tsEffective, fnDPrint, oDisp):
WuiListContentBase.__init__(self, aoEntries, iPage, cItemsPerPage, tsEffective,
sTitle = 'Failure Categories', sId = 'failureCategories',
fnDPrint = fnDPrint, oDisp = oDisp);
self._asColumnHeaders = ['ID', 'Short Description', 'Full Description', 'Actions' ]
self._asColumnAttribs = ['align="right"', 'align="center"', 'align="center"', 'align="center"']
def _formatListEntry(self, iEntry):
from testmanager.webui.wuiadmin import WuiAdmin
oEntry = self._aoEntries[iEntry]
return [ oEntry.idFailureCategory,
oEntry.sShort,
oEntry.sFull,
[ WuiTmLink('Modify', WuiAdmin.ksScriptName,
{ WuiAdmin.ksParamAction: WuiAdmin.ks | ActionFailureCategoryShowEdit,
FailureCategoryData.ksParam_idFailureCategory: oEntry.idFailureCategory }),
WuiTmLink('Remove', WuiAdmin.ksScriptName,
{ WuiAdmin.ksParamAction: WuiAdmin.ksActionFailureCategoryDel,
FailureCategoryData.ksParam_idFailureCategory: oEntry.idFailureCategory },
sConfirm = 'Do you really | want to remove failure cateogry #%d?' % (oEntry.idFailureCategory,)),
] ];
|
vvw/linearAlgebra-coursera | assignment 5/perspective_lab/perspective_lab.py | Python | mit | 4,055 | 0.015536 | from image_mat_util import *
from mat import Mat
from vec import Vec
import matutil
from solver import solve
## Task 1
def move2board(v):
'''
Input:
- v: a vector with domain {'y1','y2','y3'}, the coordinate representation of a point q.
Output:
- A {'y1','y2','y3'}-vector z, the coordinate representation
in whiteboard coordinates of the point p such that the line through the
origin and q intersects the whiteboard plane at p.
'''
return Vec({'y1','y2','y3'}, { key:val/v.f['y3'] for key, val in v.f.items() })
## Task 2
def make_equations(x1, x2, w1, w2):
'''
Input:
- x1 & x2: photo coordinates of a point on the board
- y1 & y2: whiteboard coordinates of a point on the board
Output:
- List [u,v] where u*h = 0 and v*h = 0
'''
domain = {(a, b) for a in {'y1', 'y2', 'y3'} for b in {'x1', 'x2', 'x3'}}
u = Vec(domain, {('y3','x1'):w1*x1,('y3','x2'):w1*x2,('y3','x3'):w1,('y1','x1'):-x1,('y1','x2'):-x2,('y1','x3'):-1})
v = Vec(domain, {('y3','x1'):w2*x1,('y3','x2'):w2*x2,('y3','x3'):w2,('y2','x1'):-x1,('y2','x2'):-x2,('y2','x3'):-1})
return [u, v]
## Task 3
H = Mat(({'y1', 'y3', 'y2'}, {'x2', 'x3', 'x1'}), {('y3', 'x1'): -0.7219356810710031, ('y2', 'x1'): -0.3815213180054361, ('y2', 'x2'): 0.7378180860600992, ('y1', 'x1'): 1.0, ('y2', 'x3'): 110.0231807477826, ('y3', 'x3'): 669.4762699006177, ('y1', 'x3'): -359.86096256684493, ('y3', 'x2'): -0.011690730864965311, ('y1', 'x2'): 0.05169340463458105})
## Task 4
def mat_move2board(Y):
'''
Input:
- Y: Mat instance, each column of which is a 'y1', 'y2', 'y3' vector
giving the whiteboard coordinates of a point q.
Output:
- Mat instance, each column of which is the corresponding point in the
whiteboard plane (the point of intersection with the whiteboard plane
of the line through the origin and q).
'''
col_dict = matutil.mat2coldict(Y)
new_col_dic = {}
for key, val in col_dict.items():
new_col_dic[key] = Vec(val.D, { k:v/val.f['y3'] for k, v in val.f.items() })
return matutil.coldict2mat(new_col_dic)
# import perspective_lab
# from mat import Mat
# import vecutil
# import matutil
# import image_mat_util
# from vec import Vec
# from GF2 import one
# from solver import solve
# row_dict = {}
# row_dict[0] = perspective_lab.make_equations(358, 36, 0, 0)[0]
# row_dict[1] = perspective_lab.make_equations(358, 36, 0, 0)[1]
# row_dict[2] = perspective_lab.make_equations(329, 597, 0, 1)[0]
# row_dict[3] = perspective_lab.make_equations(329, 597, 0, 1)[1]
# row_dict[4] = p | erspective_lab.make_equations(592, 157, 1, 0)[0]
# row_dict[5] = perspective_lab.make_equations(592, 157, 1, 0)[1]
# row_dict[6] = perspective_lab.make_equations(580, 483, 1, 1)[0]
# row_dict[7] = pers | pective_lab.make_equations(580, 483, 1, 1)[1]
# foo = perspective_lab.make_equations(0, 0, 0, 0)[0]
# foo[('y1', 'x1')] = 1
# foo[('y1', 'x3')] = 0
# row_dict[8] = foo
# M = matutil.rowdict2mat(row_dict)
# print(M)
# solve(M, vecutil.list2vec([0, 0, 0, 0, 0, 0, 0, 0, 1]))
# Y_in = Mat(({'y1', 'y2', 'y3'}, {0,1,2,3}),
# {('y1',0):2, ('y2',0):4, ('y3',0):8,
# ('y1',1):10, ('y2',1):5, ('y3',1):5,
# ('y1',2):4, ('y2',2):25, ('y3',2):2,
# ('y1',3):5, ('y2',3):10, ('y3',3):4})
# print(Y_in)
# print(perspective_lab.mat_move2board(Y_in))
# (X_pts, colors) = image_mat_util.file2mat('board.png', ('x1','x2','x3'))
# H = Mat(({'y1', 'y3', 'y2'}, {'x2', 'x3', 'x1'}), {('y3', 'x1'): -0.7219356810710031, ('y2', 'x1'): -0.3815213180054361, ('y2', 'x2'): 0.7378180860600992, ('y1', 'x1'): 1.0, ('y2', 'x3'): 110.0231807477826, ('y3', 'x3'): 669.4762699006177, ('y1', 'x3'): -359.86096256684493, ('y3', 'x2'): -0.011690730864965311, ('y1', 'x2'): 0.05169340463458105})
# Y_pts = H * X_pts
# Y_board = perspective_lab.mat_move2board(Y_pts)
# image_mat_util.mat2display(Y_board, colors, ('y1', 'y2', 'y3'),
# scale=100, xmin=None, ymin=None) |
lmazuel/azure-sdk-for-python | azure-mgmt-containerinstance/azure/mgmt/containerinstance/models/usage_name.py | Python | mit | 1,236 | 0 | # coding=utf-8 |
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if th | e code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class UsageName(Model):
"""The name object of the resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar value: The name of the resource
:vartype value: str
:ivar localized_value: The localized name of the resource
:vartype localized_value: str
"""
_validation = {
'value': {'readonly': True},
'localized_value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
'localized_value': {'key': 'localizedValue', 'type': 'str'},
}
def __init__(self):
super(UsageName, self).__init__()
self.value = None
self.localized_value = None
|
elfnor/sverchok | nodes/generators_extended/hilbert3d.py | Python | gpl-3.0 | 3,123 | 0.001281 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import numpy as np
import bpy
from bpy.props import IntProperty, FloatProperty
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.data_structure import updateNode
def hilbert(step, n):
def hilbert3(n):
if (n <= 0):
x, y, z = 0, 0, 0
else:
[xo, yo, zo] = hilbert3(n-1)
x = step * .5 * np.array([.5+zo, .5+yo, -.5+yo, -.5-xo, -.5-xo, -.5-yo, .5-yo, .5+zo])
| y = step * .5 * np.array([.5+xo, .5+zo, .5+zo, .5+yo, -.5+yo, -.5-zo, -.5-zo, -.5-xo])
z = step * .5 * np.array([.5+yo, -.5+xo, -.5+xo, .5-zo, .5-zo, -.5+xo, -.5+xo, .5-yo])
return [x, y, z]
vx, vy, vz = hilbert3(n)
vx = vx.flatten().tolist()
vy = vy.flatten().tolist()
vz = vz.flatten().tolist()
verts = [list(zip(vx, vy, vz))]
return verts
class Hilbert3dNode(bpy.types.Node, SverchCustomTreeNode):
| ''' Hilbert3d line '''
bl_idname = 'Hilbert3dNode'
bl_label = 'Hilbert3d'
bl_icon = 'OUTLINER_OB_EMPTY'
level_ = IntProperty(
name='level', description='Level',
default=2, min=1, max=5,
options={'ANIMATABLE'}, update=updateNode)
size_ = FloatProperty(
name='size', description='Size',
default=1.0, min=0.1,
options={'ANIMATABLE'}, update=updateNode)
def sv_init(self, context):
self.inputs.new('StringsSocket', "Level").prop_name = 'level_'
self.inputs.new('StringsSocket', "Size").prop_name = 'size_'
self.outputs.new('VerticesSocket', "Vertices")
self.outputs.new('StringsSocket', "Edges")
def process(self):
level_socket, size_socket = self.inputs
verts_socket, edges_socket = self.outputs
if verts_socket.is_linked:
Integer = int(level_socket.sv_get()[0][0])
Step = size_socket.sv_get()[0][0]
verts = hilbert(Step, Integer)
verts_socket.sv_set(verts)
if edges_socket.is_linked:
listEdg = []
r = len(verts[0])-1
for i in range(r):
listEdg.append((i, i+1))
edg = list(listEdg)
edges_socket.sv_set([edg])
def register():
bpy.utils.register_class(Hilbert3dNode)
def unregister():
bpy.utils.unregister_class(Hilbert3dNode)
|
SohKai/ChronoLogger | web/flask/lib/python2.7/site-packages/sqlalchemy/dialects/sqlite/base.py | Python | mit | 32,174 | 0.002797 | # sqlite/base.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Support for the SQLite database.
For information on connecting using a specific driver, see the documentation
section regarding that driver.
Date and Time Types
-------------------
SQLite does not have built-in DATE, TIME, or DATETIME types, and pysqlite does not provide
out of the box functionality for translating values between Python `datetime` objects
and a SQLite-supported format. SQLAlchemy's own :class:`~sqlalchemy.types.DateTime`
and related types provide date formatting and parsing functionality when SQlite is used.
The implementation classes are :class:`~.sqlite.DATETIME`, :class:`~.sqlite.DATE` and :class:`~.sqlite.TIME`.
These types represent dates and times as ISO formatted strings, which also nicely
support ordering. There's no reliance on typical "libc" internals for these functions
so historical dates are fully supported.
Auto Incrementing Behavior
--------------------------
Background on SQLite's autoincrement is at: http://sqlite.org/autoinc.html
Two things to note:
* The AUTOINCREMENT keyword is **not** required for SQLite tables to
generate primary key values automatically. AUTOINCREMENT only means that
the algorithm used to generate ROWID values should be slightly different.
* SQLite does **not** generate primary key (i.e. ROWID) values, even for
one column, if the table has a composite (i.e. multi-column) primary key.
This is regardless of the AUTOINCREMENT keyword being present or not.
To specifically render the AUTOINCREMENT keyword on the primary key
column when rendering DDL, add the flag ``sqlite_autoincrement=True``
to the Table construct::
Table('sometable', metadata,
Column('id', Integer, primary_key=True),
sqlite_autoincrement=True)
Transaction Isolation Level
---------------------------
:func:`.create_engine` accepts an ``isolation_level`` parameter which results in
the command ``PRAGMA read_uncommitted <level>`` being invoked for every new
connection. Valid values for this parameter are ``SERIALIZABLE`` and
``READ UNCOMMITTED`` corresponding to a value of 0 and 1, respectively.
See the section :ref:`pysqlite_serializable` for an important workaround
when using serializable isolation with Pysqlite.
Database Locking Behavior / Concurrency
---------------------------------------
Note that SQLite is not designed for a high level of concurrency. The database
itself, being a file, is locked completely during write operations and within
transactions, meaning exactly one connection has exclusive access to the database
during this period - all other connections will be blocked during this time.
The Python DBAPI specification also calls for a connection model that is always
in a transaction; there is no BEGIN method, only commit and rollback. This implies
that a SQLite DBAPI driver would technically allow only serialized access to a
particular database file at all times. The pysqlite driver attempts to ameliorate this by
deferring the actual BEGIN statement until the first DML (INSERT, UPDATE, or
DELETE) is received within a transaction. While this breaks serializable isolation,
it at least delays the exclusive locking inherent in SQLite's design.
SQLAlc | hemy's default mode of usage with the ORM is known
as "autocommit=False", which means the moment the :class:`.Session` begins to be
used, a transaction is begun. As the :class:`.Session` is used, the autoflush
featur | e, also on by default, will flush out pending changes to the database
before each query. The effect of this is that a :class:`.Session` used in its
default mode will often emit DML early on, long before the transaction is actually
committed. This again will have the effect of serializing access to the SQLite
database. If highly concurrent reads are desired against the SQLite database,
it is advised that the autoflush feature be disabled, and potentially even
that autocommit be re-enabled, which has the effect of each SQL statement and
flush committing changes immediately.
For more information on SQLite's lack of concurrency by design, please
see `Situations Where Another RDBMS May Work Better - High Concurrency <http://www.sqlite.org/whentouse.html>`_
near the bottom of the page.
.. _sqlite_foreign_keys:
Foreign Key Support
-------------------
SQLite supports FOREIGN KEY syntax when emitting CREATE statements for tables,
however by default these constraints have no effect on the operation
of the table.
Constraint checking on SQLite has three prerequisites:
* At least version 3.6.19 of SQLite must be in use
* The SQLite libary must be compiled *without* the SQLITE_OMIT_FOREIGN_KEY
or SQLITE_OMIT_TRIGGER symbols enabled.
* The ``PRAGMA foreign_keys = ON`` statement must be emitted on all connections
before use.
SQLAlchemy allows for the ``PRAGMA`` statement to be emitted automatically
for new connections through the usage of events::
from sqlalchemy.engine import Engine
from sqlalchemy import event
@event.listens_for(Engine, "connect")
def set_sqlite_pragma(dbapi_connection, connection_record):
cursor = dbapi_connection.cursor()
cursor.execute("PRAGMA foreign_keys=ON")
cursor.close()
.. seealso::
`SQLite Foreign Key Support <http://www.sqlite.org/foreignkeys.html>`_ -
on the SQLite web site.
:ref:`event_toplevel` - SQLAlchemy event API.
"""
import datetime, re
from sqlalchemy import sql, exc
from sqlalchemy.engine import default, base, reflection
from sqlalchemy import types as sqltypes
from sqlalchemy import util
from sqlalchemy.sql import compiler
from sqlalchemy import processors
from sqlalchemy.types import BLOB, BOOLEAN, CHAR, DATE, DATETIME, DECIMAL,\
FLOAT, REAL, INTEGER, NUMERIC, SMALLINT, TEXT, TIME, TIMESTAMP, VARCHAR
class _DateTimeMixin(object):
_reg = None
_storage_format = None
def __init__(self, storage_format=None, regexp=None, **kw):
super(_DateTimeMixin, self).__init__(**kw)
if regexp is not None:
self._reg = re.compile(regexp)
if storage_format is not None:
self._storage_format = storage_format
class DATETIME(_DateTimeMixin, sqltypes.DateTime):
"""Represent a Python datetime object in SQLite using a string.
The default string storage format is::
"%04d-%02d-%02d %02d:%02d:%02d.%06d" % (value.year,
value.month, value.day,
value.hour, value.minute,
value.second, value.microsecond)
e.g.::
2011-03-15 12:05:57.10558
The storage format can be customized to some degree using the
``storage_format`` and ``regexp`` parameters, such as::
import re
from sqlalchemy.dialects.sqlite import DATETIME
dt = DATETIME(
storage_format="%04d/%02d/%02d %02d-%02d-%02d-%06d",
regexp=re.compile("(\d+)/(\d+)/(\d+) (\d+)-(\d+)-(\d+)(?:-(\d+))?")
)
:param storage_format: format string which will be applied to the
tuple ``(value.year, value.month, value.day, value.hour,
value.minute, value.second, value.microsecond)``, given a
Python datetime.datetime() object.
:param regexp: regular expression which will be applied to
incoming result rows. The resulting match object is applied to
the Python datetime() constructor via ``*map(int,
match_obj.groups(0))``.
"""
_storage_format = "%04d-%02d-%02d %02d:%02d:%02d.%06d"
def bind_processor(self, dialect):
datetime_datetime = datetime.datetime
datetime_date = datetime.date
format = self._storage_format
def process(value):
if value is None:
return None
elif isinstance(value, datetime_datetime):
return format % (value.year, value.month, value.day,
value.hour, value.minute, value.second,
|
francisrod01/wrangling_mongodb | lesson 2/nytimes.py | Python | mit | 3,355 | 0.00149 | #!~/envs/python3/udacity_python_mongodb
import json
import codecs
import requests
import os
URL_MAIN = "http://api.nytimes.com/svc/"
URL_POPULAR = URL_MAIN + "mostpopular/v2/"
API_KEY = {
"popular": "",
"article": ""
}
DATA_DIR = "datasets"
POPULAR_FILE = "popular-{0}-{1}.json"
MOST_FILE = "most{0}/{1}/{2}.json"
popular_file = os.path.join(DATA_DIR, POPULAR_FILE)
most_file = os.path.join(DATA_DIR, MOST_FILE)
def get_from_file(kind, period):
filename = popular_file.format(kind, period)
with open(filename, "r") as f:
return json.loads(f.read())
def article_overview(kind, period):
data = get_from_file(kind, period)
titles = []
urls = []
for article in data:
section = article["section"]
title = article["title"]
titles.append({section: title})
if "media" in article:
for m in article["media"]:
| for mm in m["media-metadata"]:
if mm["format"] == "Standard Thumbnail":
urls.append(mm["url"])
return titles, urls
def query_site(url, target, offset):
# This will set up the query with the API key and offset
# Web services often use offset parameter to return data in small chunks
# NYTimes returns 20 articles per request, if you want the next 20
# You have to provide t | he offset parameter.
if API_KEY["popular"] == "" or API_KEY["article"] == "":
print("You need to register for NYTimes Developer account to run this program.")
print("See Instructor notes for more information.")
return False
params = {
"api-key": API_KEY[target],
"offset": offset
}
r = requests.get(url, params=params)
if r.status_code == requests.codes.ok:
return r.json()
else:
r.raise_for_status()
def get_popular(url, kind, days, section="all-sections", offset=0):
# This function will construct the query according to the requirements of the site
# and return the data, or print an error message if called incorrectly.
if days not in [1, 7, 30]:
print("Time period can be 1, 7, 30 days only.")
return False
if kind not in ["viewed", "shared", "emailed"]:
print("Kind can be only one of viewed/shared/emailed")
return False
url += most_file.format(kind, section, days)
data = query_site(url, "popular", offset)
return data
def save_file(kind, period):
# This will process all results, by calling the API repeatedly with supplied offset value,
# combine the data and then write all results in a file.
data = get_popular(URL_POPULAR, "viewed", 1)
num_results = data["num_results"]
full_data = []
with codecs.open(popular_file.format(kind, period), encoding='utf-8', mode='w') as v:
for offset in range(0, num_results, 20):
data = get_popular(URL_POPULAR, kind, period, offset=offset)
full_data += data["results"]
v.write(json.dumps(full_data, indent=2))
def test():
titles, urls = article_overview("viewed", 1)
assert len(titles) == 20
assert len(urls) == 30
assert titles[2] == {'Opinion': 'Professors, We Need You!'}
assert urls[20] == 'http://graphics8.nytimes.com/images/2014/02/17/sports/ICEDANCE/ICEDANCE-thumbStandard.jpg'
if __name__ == "__main__":
test()
|
tickbox-smc-ltd/xfero | src/xfero/db/manage_cots_pattern.py | Python | agpl-3.0 | 15,606 | 0.002371 | #!/usr/bin/env python
'''
**Purpose**
Module contains functions to manage the database table XFERO_COTS_Pattern
**Unit Test Module:** test_crud_XFERO_COTS_Pattern.py
**Process Flow**
.. figure:: ../process_flow/manage_cots_patterns.png
:align: center
Process Flow: Manage COTS Pattern
*External dependencies*
/xfero/
get_conf (/xfero/.db.manage_cots_pattern)
+------------+-------------+---------------------------------------------------+
| Date | Author | Change Details |
+============+=============+===================================================+
| 02/07/2013 | Chris Falck | Created |
+------------+-------------+---------------------------------------------------+
| 09/01/2014 | Chris Falck | Update error trapping, logging & refactored |
+------------+-------------+---------------------------------------------------+
| 11/05/2014 | Chris Falck | Modified the function to ensure that database |
| | | connections are opened and closed within the |
| | | function call. This enables the function to be |
| | | called in a multiprocessing environment. |
+------------+-------------+---------------------------------------------------+
| 12/05/2014 | Chris Falck | New element passed on queue 'xfero_token'. Used in |
| | | Logging. |
+------------+ | -------------+---------------------------------------------------+
'''
import sqlite3 as lite
from /xfero/ import get_conf as get_conf
import logging.config
def create_XFERO_COTS_Pattern(cotspattern_product, co | tspattern_pattern_name,
cotspattern_prototype, xfero_token=False):
'''
**Purpose:**
The function ```create_XFERO_COTS_Pattern``` is a script to insert a row into
the XFERO_COTS_Pattern table.
It performs the following SQL statement:
```'INSERT INTO XFERO_COTS_Pattern VALUES(NULL, ?, ?, ?)',
(cotspattern_product,cotspattern_pattern_name, cotspattern_prototype)```
**Usage Notes:**
None
*Example usage:*
```create_XFERO_COTS_Pattern(cotspattern_product, cotspattern_pattern_name,
cotspattern_prototype)```
:param cotspattern_product: Name of COTS Product
:param cotspattern_pattern_name: Pattern name
:param cotspattern_prototype: Contains the command line prototype to the
COTS product
:returns: Row Inserted
'''
try:
(xfero_logger,.xfero_database, outbound_directory, transient_directory,
error_directory, xfero_pid) = get_conf.get.xfero_config()
except Exception as err:
print('Cannot get XFERO Config: %s' % err)
raise err
logging.config.fileConfig(xfero_logger)
# create logger
logger = logging.getLogger('database')
db_location = xfero_database
try:
con = lite.connect(db_location)
cur = con.cursor()
cur = con.execute("pragma foreign_keys=ON")
cur.execute('INSERT INTO XFERO_COTS_Pattern VALUES(NULL, ?, ?, ?)',
(cotspattern_product, cotspattern_pattern_name,
cotspattern_prototype))
con.commit()
except lite.Error as err:
if con:
con.rollback()
logger.error('Error Inserting row into XFERO_COTS_Pattern table: %s. \
(XFERO_Token=%s)', err.args[0], xfero_token)
raise err
# return cur
cur.close()
con.close()
return 'Row Inserted'
def read_XFERO_COTS_Pattern(cotspattern_id, xfero_token=False):
'''
**Purpose:**
The function ```read_XFERO_COTS_Pattern``` is a script to retrieve a specific
row from the XFERO_COTS_Pattern table.
It performs the following SQL statement:
```'SELECT * FROM XFERO_COTS_Pattern
WHERE cotspattern_id=?', (cotspattern_id)```
**Usage Notes:**
None
*Example usage:*
```read_XFERO_COTS_Pattern(cotspattern_id)```
:param cotspattern_id: Primary Key ID which identifies the row to retrieve
:returns: rows: A Tuple of the selected rows.
'''
try:
(xfero_logger,.xfero_database, outbound_directory, transient_directory,
error_directory, xfero_pid) = get_conf.get.xfero_config()
except Exception as err:
print('Cannot get XFERO Config: %s' % err)
raise err
logging.config.fileConfig(xfero_logger)
# create logger
logger = logging.getLogger('database')
db_location = xfero_database
try:
con = lite.connect(db_location)
cur = con.cursor()
cur = con.execute("pragma foreign_keys=ON")
cur.execute(
'SELECT * FROM XFERO_COTS_Pattern \
WHERE cotspattern_id=?', (cotspattern_id))
except lite.Error as err:
logger.error('Error Selecting row from XFERO_COTS_Pattern table: %s. \
(XFERO_Token=%s)', err.args[0], xfero_token)
raise err
rows = cur.fetchone()
cur.close()
con.close()
return rows
def read_cpn_XFERO_COTS_Pattern(cotspattern_id, xfero_token=False):
'''
**Purpose:**
The function ```read_cpn_XFERO_COTS_Pattern``` is a script to retrieve a
specificrow from the XFERO_COTS_Pattern table. It is based on
read_XFERO_COTS_Pattern but only returns the cotspattern_prototype
It performs the following SQL statement:
```'SELECT cotspattern_prototype FROM XFERO_COTS_Pattern
WHERE cotspattern_id=?', (cotspattern_id)```
**Usage Notes:**
None
*Example usage:*
```read_cpn_XFERO_COTS_Pattern(cotspattern_id)```
:param cotspattern_id: Primary Key ID which identifies the row to retrieve
:returns: rows: A Tuple of the selected rows.
'''
try:
(xfero_logger,.xfero_database, outbound_directory, transient_directory,
error_directory, xfero_pid) = get_conf.get.xfero_config()
except Exception as err:
print('Cannot get XFERO Config: %s' % err)
raise err
logging.config.fileConfig(xfero_logger)
# create logger
logger = logging.getLogger('database')
db_location = xfero_database
try:
con = lite.connect(db_location)
cur = con.cursor()
cur = con.execute("pragma foreign_keys=ON")
cur.execute(
'SELECT cotspattern_prototype FROM \
XFERO_COTS_Pattern WHERE cotspattern_id=?', (cotspattern_id))
except lite.Error as err:
logger.error('Error Selecting row from XFERO_COTS_Pattern table: %s. \
(XFERO_Token=%s)', err.args[0], xfero_token)
raise err
rows = cur.fetchone()
cur.close()
con.close()
return rows
def read_with_name_XFERO_COTS_Pattern(cotspattern_pattern_name, xfero_token=False):
'''
**Purpose:**
The function ```read_with_name_XFERO_COTS_Pattern``` is a script to retrieve a
specific row from the XFERO_COTS_Pattern table that matched the COTS Pattern
Name supplied.
It performs the following SQL statement:
```'SELECT cotspattern_id, cotspattern_product, cotspattern_pattern_name,
cotspattern_prototype FROM XFERO_COTS_Pattern
WHERE cotspattern_pattern_name=?', (cotspattern_pattern_name,)```
**Usage Notes:**
None
*Example usage:*
```read_XFERO_COTS_Pattern(cotspattern_pattern_name)```
:param cotspattern_pattern_name: which identifies the row to retrieve
:returns: rows: A Tuple of the selected rows.
'''
try:
(xfero_logger,.xfero_database, outbound_directory, transient_directory,
error_directory, xfero_pid) = get_conf.get.xfero_config()
except Exception as err:
print('Cannot get XFERO Config: %s' % err)
raise err
logging.config.fileConfig(xfero_logger)
# create logger
logger = logging.getLogger('database')
db_location = xfero_database
try:
con = lite.connect(db_location)
cur = con.cursor()
cur = con.execute("pragma foreign_keys=ON")
cur.execute(
'SELECT cotspattern_id, \
cotspattern_product, \
|
dblenkus/rolca | src/rolca/core/views.py | Python | apache-2.0 | 3,758 | 0.000532 | """.. Ignore pydocstyle D400.
==========
Core views
==========
.. autofunction:: rolca.core.views.upload
"""
import io
import json
import logging
import os
import zipfile
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.http import (
HttpResponse,
HttpResponseBadRequest,
HttpResponseForbidden,
HttpResponseNotAllowed,
)
from django.shortcuts import get_object_or_404
from django.utils.text import slugify
from django.views.decorators.csrf import csrf_exempt
from rolca.core.models import Contest, File, Submission, Theme
logger = logging.getLogger(__name__)
@login_required
def download_contest(request, contest_id):
"""Download all submissions of the contest as zip file."""
contest = get_object_or_404(Contest, pk=contest_id)
buffer = io.BytesIO()
zip_archive = zipfile.ZipFile(buffer, mode='w')
for theme in Theme.objects.filter(contest=contest):
# Create empty directory
zip_info = zipfile.ZipInfo(
os.path.join(slugify(contest.title), slugify(theme.title)) + "/"
)
zip_archive.writestr(zip_info, '')
no_title_count = 0
for submission in Submission.objects.filter(theme=theme):
if not submission.title:
no_title_count += 1
zip_file_name = '{}.jpg'.format(
slugify(
'{}-{}'.format(
submission.author, submission.title or no_title_count
)
)
)
zip_path = os.path.join(
slugify(contest.title), slugify(theme.title), zip_file_name
)
zip_archive.write(submission.photo.file.path, zip_path)
zip_archive.close()
response = HttpResponse(
buffer.getvalue(), content_type='application/x-zip-compressed'
)
slugified_title = slugify(contest.title)
response['Content-Disposition'] = 'attachment; filename="{}.zip"'.format(
slugified_title
)
response['Content-Length'] = buffer.tell()
return response
@csrf_exempt
def upload(request):
"""Handle uploaded photo and create new File object."""
if request.method != 'POST':
logger.warning("Upload request other than POST.")
return HttpResponseNotAllowed(['POST'], 'Only POST accepted')
if not request.user.is_authenticated:
logger.warning('Anonymous user tried to upload file.')
return HttpResponseForbidden('Please login!')
if request.FILES is None:
logger.warning("Upload request without attached image.")
return HttpResponseBadRequest('Must have files attached!')
fn = request.FILES[u'files[]']
logger.info("Image received.")
file_ = File(file=fn, user=request.user)
if file_.file.size > settings.MAX_UPLOAD_SIZE:
logger.warning("Too big file.")
return HttpResponseBadRequest(
"File can't excede size of {}KB".format(settings.MAX_UPLOAD_SIZE / 1024)
| )
max_image_resolution = settings.MAX_IMAGE_RESOLUTION
if max(file_.file.width, file_.file.height) > max_image_resolution:
logger.warning("Too big file.")
return HttpResponseBadRequest(
"File can't excede size of {}px".format(settings.MAX_IMAGE_RESOLUTION)
)
file_.save()
result = []
result.append(
{
"name": os.path. | basename(file_.file.name),
"size": file_.file.size,
"url": file_.file.url,
"thumbnail": file_.thumbnail.url,
"delete_url": '',
"delete_type": "POST",
}
)
response_data = json.dumps(result)
return HttpResponse(response_data, content_type='application/json')
|
stdweird/aquilon | tests/broker/test_usecase_hacluster.py | Python | apache-2.0 | 15,847 | 0.000694 | #!/usr/bin/env python2.6
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2012,2013 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing how a HA cluster might be configured."""
import unittest
import os.path
if __name__ == "__main__":
import utils
utils.import_depends()
from brokertest import TestBrokerCommand
class TestUsecaseHACluster(TestBrokerCommand):
def test_100_add_cluster1(self):
user = self.config.get("unittest", "user")
command = ["add", "cluster", "--cluster", "hacl1", "--campus", "ny",
"--down_hosts_threshold", 0, "--archetype", "hacluster",
"--sandbox", "%s/utsandbox" % user,
"--personality", "vcs-msvcs"]
self.successtest(command)
def test_100_add_cluster2(self):
user = self.config.get("unittest", "user")
command = ["add", "cluster", "--cluster", "hacl2", "--campus", "ny",
"--down_hosts_threshold", 0, "--archetype", "hacluster",
"--sandbox", "%s/utsandbox" % user,
"--personality", "vcs-msvcs"]
self.successtest(command)
def test_110_add_members(self):
for i in range(0, 4):
server_idx = i + 2
cluster_idx = (i % 2) + 1
self.successtest(["cluster", "--cluster", "hacl%d" % cluster_idx,
"--hostname", "server%d.aqd-unittest.ms.com" %
server_idx])
def test_115_add_cluster_srv(self):
ip1 = self.net.unknown[0].usable[26]
ip2 = self.net.unknown[0].usable[27]
self.dsdb_expect_add("hacl1.aqd-unittest.ms.com", ip1)
self.successtest(["add", "service", "address",
"--service_address", "hacl1.aqd-unittest.ms.com",
"--name", "hacl1", "--cluster", "hacl1",
"--ip", ip1, "--interfaces", "eth0"])
self.dsdb_expect_add("hacl2.aqd-unittest.ms.com", ip2)
self.successtest(["add", "service", "address",
"--service_address", "hacl2.aqd-unittest.ms.com",
"--name", "hacl2", "--cluster", "hacl2",
"--ip", ip2, "--interfaces", "eth0"])
self.dsdb_verify()
def test_120_add_resourcegroups(self):
for cl in range(1, 3):
for rg in range(1, 3):
plenary = self.plenary_name("resource",
"cluster", "hacl%d" % cl,
"resourcegroup",
"hacl%dg%d" % (cl, rg),
"config")
self.successtest(["add", "resourcegroup",
"--cluster", "hacl%d" % cl,
"--resourcegroup", "hacl%dg%d" % (cl, rg)])
self.failUnless(os.path.exists(plenary),
"Plenary '%s' does not exist" % plenary)
def test_125_add_apps(self):
for cl in range(1, 3):
for rg in range(1, 3):
plenary = self.plenary_name("resource",
"cluster", "hacl%d" % cl,
"resourcegroup",
"hacl%dg%d" % (cl, rg),
"application",
"hacl%dg%dapp" % (cl, rg),
"config")
self.successtest(["add", "application",
"--cluster", "hacl%d" % cl,
"--resourcegroup", "hacl%dg%d" % (cl, rg),
"--application", "hacl%dg%dapp" % (cl, rg),
"--eonid", 42])
self.failUnless(os.path.exists(plenary),
"Plenary '%s' does not exist" % plenary)
def test_125_add_fs(self):
for cl in range(1, 3):
for rg in range(1, 3):
plenary = self.plenary_name("resource",
"cluster", "hacl%d" % cl,
"resourcegroup",
"hacl%dg%d" % (cl, rg),
"filesystem",
"hacl%dg%dfs" % (cl, rg),
"config")
self.successtest(["add", "filesystem", "--type", "ext3",
"--cluster", "hacl%d" % cl,
"--resourcegroup", "hacl%dg%d" % (cl, rg),
"--filesystem", "hacl%dg%dfs" % (cl, rg),
"--mountpoint", "/d/d%d/app" % rg,
"--blockdevice", "/dev/vx/dg.0/gnr.%d" % rg,
"--bootmount", "--dumpfreq=1",
"--fsckpass=3", "--options=rw"])
self.failUnless(os.path.exists(plenary),
"Plenary '%s' does not exist" % plenary)
def test_125_add_appsrv(self):
# grep-friendly syntax
ips = [self.net.unknown[0].usable[28],
self.net.unknown[0].usable[29]]
for cl in range(1, 3):
plenary = self.plenary_name("resource",
"cluster", "hacl%d" % cl,
"resourcegroup",
"hacl%dg1" % cl,
| "service_address",
"hacl%dg1addr" % cl,
"config")
self.dsdb_expect_add("hacl%dg1.aqd-unittest.ms.com" % cl,
ips[cl - 1])
self.successtest(["add", "service | ", "address",
"--cluster", "hacl%d" % cl,
"--resourcegroup", "hacl%dg1" % cl,
"--service_address", "hacl%dg1.aqd-unittest.ms.com" % cl,
"--name", "hacl%dg1addr" % cl,
"--ip", ips[cl - 1], "--interfaces", "eth0"])
self.failUnless(os.path.exists(plenary),
"Plenary '%s' does not exist" % plenary)
self.dsdb_verify()
def test_130_add_lb(self):
# Multi-A record pointing to two different service IPs
ips = [self.net.unknown[0].usable[30],
self.net.unknown[0].usable[31]]
# TODO: range(1, 3) once multi-A records are sorted out
for cl in range(1, 2):
plenary = self.plenary_name("resource",
"cluster", "hacl%d" % cl,
"resourcegroup",
"hacl%dg2" % cl,
"service_address",
"hacl%dg2addr" % cl,
"config")
self.dsdb_expect_add("hashared.aqd-unittest.ms.com", ips[cl - 1])
self.successtest(["add", "service", "address",
"--cluster", "hacl%d" % cl,
"--resourcegroup", "hacl%dg2" % cl,
"--service_address", "hashared.aqd-unittest.ms.com",
"-- |
koery/win-sublime | Data/Packages/Default/mark.py | Python | mit | 1,396 | 0.005014 | import sublime, sublime_plugin
class SetMarkCommand(sublime_plugin.TextCommand):
def run(self, edit):
mark = [s for s in self.view.sel()]
self.view.add_regions("mark", mark, "mark", "dot",
sublime.HIDDEN | sublime.PERSISTENT)
class SwapWithMarkCommand(sublime_plugin.TextCommand):
def run(self, edit):
old_mark = self.view.get_regions("mark")
mark = [s for s in self.view.sel()]
self.view.add_regions("mark", mark, "mark", "dot",
sublime.HIDDEN | sublime.PERSISTENT)
if len(old_mark):
self.view.sel().clear()
| for r in ol | d_mark:
self.view.sel().add(r)
class SelectToMarkCommand(sublime_plugin.TextCommand):
def run(self, edit):
mark = self.view.get_regions("mark")
num = min(len(mark), len(self.view.sel()))
regions = []
for i in range(num):
regions.append(self.view.sel()[i].cover(mark[i]))
for i in range(num, len(self.view.sel())):
regions.append(self.view.sel()[i])
self.view.sel().clear()
for r in regions:
self.view.sel().add(r)
class DeleteToMark(sublime_plugin.TextCommand):
def run(self, edit):
self.view.run_command("select_to_mark")
self.view.run_command("add_to_kill_ring", {"forward": False})
self.view.run_command("left_delete")
|
willemw12/get_iplayer_downloader | src/get_iplayer_downloader/ui/log_dialog.py | Python | gpl-3.0 | 5,896 | 0.007463 | from gi.repository import Gtk, Pango
# Load application-wide definitions
import get_iplayer_downloader
#NOTE Import module, not symbol names inside a module, to avoid circular import
import get_iplayer_downloader.ui.main_window
from get_iplayer_downloader import command_util
from get_iplayer_downloader.tools import markup
from get_iplayer_downloader.ui.tools.dialog import ExtendedMessageDialog
#NOTE Positive ID numbers are for user-defined buttons
CLEAR_CACHE_BUTTON_ID = 1
RESET_ERROR_COUNT_BUTTON_ID = 2
FULL_LOG_BUTTON_ID = 3
SUMMARY_LOG_BUTTON_ID = 4
class LogViewerDialogWrapper(object):
def __init__(self, main_controller):
self.main_controller = main_controller
self._init_dialog()
def _init_dialog(self):
self.dialog = ExtendedMessageDialog(self.main_controller.main_window, 0,
Gtk.MessageType.INFO, Gtk.ButtonsType.NONE,
"", title="download log - " + get_iplayer_downloader.PROGRAM_NAME)
label = self.dialog.get_scrolled_label()
label.set_valign(Gtk.Align.START)
label.set_halign(Gtk.Align.START)
label.set_selectable(True)
#label.override_font(Pango.FontDescription("monospace small"))
label.override_font(Pango.FontDescription("monospace 10"))
#ALTERNATIVE
#css_provider = Gtk.CssProvider()
#css_provider.load_from_data(b""" * { font: monospace; font-size: 10; } """)
#context = label.get_style_context()
#context.add_provider(css_provider, Gtk.STYLE_PROVIDER_PRIORITY_USER)
self.dialog.add_button("Clear log and cache", CLEAR_CACHE_BUTTON_ID)
self.dialog.add_button("Reset error count", RESET_ERROR_COUNT_BUTTON_ID)
self.dialog.add_button("Detailed log", FULL_LOG_BUTTON_ID)
self.dialog.add_button("Log", SUMMARY_LOG_BUTTON_ID)
self.dialog.add_button(Gtk.STOCK_CLOSE, Gtk.ResponseType.CLOSE)
# Dialog buttons are laid out from left to right
button = self.dialog.get_action_area().get_children()[0]
#button.set_image(Gtk.Image(icon_name=Gtk.STOCK_DELETE))
button.set_tooltip_text("Remove log files and image cache files")
button = self.dialog.get_action_area().get_children()[1]
#button.set_image(Gtk.Image(icon_name=Gtk.STOCK_CLEAR))
button.set_tooltip_text("Reset error count in the progress bar")
button = self.dialog.get_action_area().get_children()[2]
button.set_image(Gtk.Image(icon_name=Gtk.STOCK_REFRESH))
button.set_tooltip_text("Refresh today's download log")
button = self.dialog.get_action_area().get_children()[3]
button.set_image(Gtk.Image(icon_name=Gtk.STOCK_REFRESH))
button.set_tooltip_text("Refresh today's summary download log")
#button.grab_focus()
# Close button
button = self.dialog.get_action_area().get_children()[4]
button.grab_focus()
self.dialog.set_default_response(Gtk.ResponseType.CLOSE)
#self.dialog.format_secondary_text("")
self.dialog.get_content_area().set_size_request(get_iplayer_downloader.ui.main_window.WINDOW_LARGE_WIDTH,
get_iplayer_downloader.ui.main_window.WINDOW_LARGE_HEIGHT)
def run(self):
button_id_prev = Gtk.ResponseType.CLOSE
button_id = SUMMARY_LOG_BUTTON_ID
full = False
while True:
if button_id == FULL_LOG_BUTTON_ID or button_id == SUMMARY_LOG_BUTTON_ID:
full = (button_id == FULL_LOG_BUTTON_ID)
if full:
message_format = "Detailed Download Log"
else:
message_format = "Download Log"
markup = not full
log_output = command_util.download_log(full=full, markup=markup)
# Set dialog content title
self.dialog.set_property("text", message_format)
# Set dialog content text
#NOTE If full download log text is too large, it won't be displayed
if markup:
self.dialog.format_tertiary_scrolled_markup(log_output)
else:
self.dialog.format_tertiary_scrolled_text(log_output)
# Grab focus to enable immediate page-up/page-down scrolling with the keyboard
#label = self.dialog.get_scrolled_label()
#scrolled_window = label.get_parent().get_parent()
| #scrolled_window.grab_focus()
if button_id | == FULL_LOG_BUTTON_ID or button_id == SUMMARY_LOG_BUTTON_ID:
if button_id_prev != button_id:
# Log view changed (different log view type or log files removed)
# Scroll to top
label = self.dialog.get_scrolled_label()
adjustment = label.get_parent().get_vadjustment()
adjustment.set_value(0.0)
adjustment.value_changed()
#adjustment = label.get_parent().set_vadjustment(adjustment)
if button_id != RESET_ERROR_COUNT_BUTTON_ID:
# No need to track RESET_ERROR_COUNT_BUTTON_ID because it doesn't affect the log view
button_id_prev = button_id
button_id = self.dialog.run()
if button_id == CLEAR_CACHE_BUTTON_ID:
command_util.clear_cache()
self.main_controller.on_progress_bar_update(None)
elif button_id == RESET_ERROR_COUNT_BUTTON_ID:
self.main_controller.invalidate_error_offset()
elif button_id == Gtk.ResponseType.CLOSE or button_id == Gtk.ResponseType.DELETE_EVENT:
break
def destroy(self):
#if self.dialog is not None:
self.dialog.destroy()
|
gmr/queries | queries/results.py | Python | bsd-3-clause | 3,382 | 0 | """
query or callproc Results
"""
import logging
import psycopg2
LOGGER = logging.getLogger | (__name__)
class Results(object):
"""The :py:class:`Results` class contains the results returned from
:py:meth:`Session.query <queries.Session.query>` and
:py:meth:`Session.callproc <queries. | Session.callproc>`. It is able to act
as an iterator and provides many different methods for accessing the
information about and results from a query.
:param psycopg2.extensions.cursor cursor: The cursor for the results
"""
def __init__(self, cursor):
self.cursor = cursor
def __getitem__(self, item):
"""Fetch an individual row from the result set
:rtype: mixed
:raises: IndexError
"""
try:
self.cursor.scroll(item, 'absolute')
except psycopg2.ProgrammingError:
raise IndexError('No such row')
else:
return self.cursor.fetchone()
def __iter__(self):
"""Iterate through the result set
:rtype: mixed
"""
if self.cursor.rowcount:
self._rewind()
for row in self.cursor:
yield row
def __len__(self):
"""Return the number of rows that were returned from the query
:rtype: int
"""
return self.cursor.rowcount if self.cursor.rowcount >= 0 else 0
def __nonzero__(self):
return bool(self.cursor.rowcount)
def __bool__(self):
return self.__nonzero__()
def __repr__(self):
return '<queries.%s rows=%s>' % (self.__class__.__name__, len(self))
def as_dict(self):
"""Return a single row result as a dictionary. If the results contain
multiple rows, a :py:class:`ValueError` will be raised.
:return: dict
:raises: ValueError
"""
if not self.cursor.rowcount:
return {}
self._rewind()
if self.cursor.rowcount == 1:
return dict(self.cursor.fetchone())
else:
raise ValueError('More than one row')
def count(self):
"""Return the number of rows that were returned from the query
:rtype: int
"""
return self.cursor.rowcount
def free(self):
"""Used in asynchronous sessions for freeing results and their locked
connections.
"""
LOGGER.debug('Invoking synchronous free has no effect')
def items(self):
"""Return all of the rows that are in the result set.
:rtype: list
"""
if not self.cursor.rowcount:
return []
self.cursor.scroll(0, 'absolute')
return self.cursor.fetchall()
@property
def rownumber(self):
"""Return the current offset of the result set
:rtype: int
"""
return self.cursor.rownumber
@property
def query(self):
"""Return a read-only value of the query that was submitted to
PostgreSQL.
:rtype: str
"""
return self.cursor.query
@property
def status(self):
"""Return the status message returned by PostgreSQL after the query
was executed.
:rtype: str
"""
return self.cursor.statusmessage
def _rewind(self):
"""Rewind the cursor to the first row"""
self.cursor.scroll(0, 'absolute')
|
Enteee/pdml2flow | pdml2flow/flow.py | Python | apache-2.0 | 3,496 | 0.001144 | # vim: set fenc=utf8 ts=4 sw=4 et :
import json
import dict2xml
from .autovivification import AutoVivification
from .conf import Conf
from .utils import call_plugin
from .logging import *
class Flow():
# The overall frame time
newest_overall_frame_time = 0
@staticmethod
def get_flow_id(frame):
flowid = [frame[d] for d in Conf.FLOW_DEF]
valid = any([type(i) is not AutoVivification for i in flowid])
# check if flowid is empty
if not valid:
return None
return str(flowid)
def __init__(self, first_frame):
first_frame_time = first_frame[Conf.FRAME_TIME]
self.__newest_frame_time = self.__first_frame_time = first_frame_time
self.__id = self.get_flow_id(first_frame)
if Conf.FRAMES_ARRAY:
self.__frames = []
else:
self.__frames = AutoVivification()
self.__framecount = 0
for plugin in Conf.PLUGINS:
call_plugin(
plugin,
'flow_new',
self,
first_frame.cast_dicts(dict)
)
self.add_frame(first_frame)
def __hash__(self):
return hash(self.__id)
def __eq__(self, other):
return self.__id == other.__id
@property
def id(self):
return self.__id
@property
def frames(self):
# clean the frame data
if Conf.FRAMES_ARRAY:
self.__frames = [
f.clean_empty()
for f in self.__frames
]
ret = [
f.cast_dicts(dict)
for f in self.__frames
]
else:
self.__frames = self.__frames.clean_empty()
ret = self.__frames.cast_dicts(dict)
return ret
@property
def first_frame_time(self):
return self.__first_f | rame_time
@property
def newest_frame_time(self):
return self.__newest_frame_time
@property
def framecount(self):
return self.__framecount
def add_frame(self, frame):
# check if frame expands flow length
frame_time = frame[Conf.FRAME_TIME]
self.__first_frame_time = min(self.__first_frame_time, frame_time)
self.__newest_frame_time = max(self.__newest_frame_time, | frame_time)
self.__framecount += 1
# Extract data
if Conf.FRAMES_ARRAY:
self.__frames.append(
frame.clean_empty()
)
else:
self.__frames.merge(
frame.clean_empty()
)
if Conf.COMPRESS_DATA:
self.__frames = self.__frames.compress()
debug(
'flow duration: {}'.format(
self.__newest_frame_time - self.__first_frame_time
)
)
for plugin in Conf.PLUGINS:
call_plugin(
plugin,
'frame_new',
frame.cast_dicts(dict),
self
)
def not_expired(self):
return self.__newest_frame_time > (Flow.newest_overall_frame_time - Conf.FLOW_BUFFER_TIME)
def expired(self):
for plugin in Conf.PLUGINS:
call_plugin(
plugin,
'flow_expired',
self
)
self.end()
def end(self):
for plugin in Conf.PLUGINS:
call_plugin(
plugin,
'flow_end',
self
)
|
robwebset/script.ebooks | resources/lib/mobi/__init__.py | Python | gpl-2.0 | 7,612 | 0.011167 | #!/usr/bin/env python
# encoding: utf-8
# https://github.com/kroo/mobi-python
"""
Mobi.py
Created by Elliot Kroo on 2009-12-25.
Copyright (c) 2009 Elliot Kroo. All rights reserved.
"""
import sys
from struct import *
from lz77 import uncompress_lz77
class Mobi:
def parse(self):
""" reads in the file, then parses record tables"""
self.contents = self.f.read()
self.header = self.parseHeader()
self.records = self.parseRecordInfoList()
self.config = self.populate_config()
def readRecord(self, recordnum, disable_compression=False):
compressionType = self.config['palmdoc']['Compression']
try:
start = self.records[recordnum]['record Data Offset']
# @TODO offset by record is not always 1
# the correct record offset can be determined by examining
# `book.records`
end = self.records[recordnum + 1]['record Data Offset']
except KeyError, e:
sys.stderr.write('Could not find key value: %s\n' % str(e))
return
# @TODO configuration not present should run configurator.
if not self.config:
return
if (compressionType == 1 or disable_compression):
return self.contents[start : end]
elif (compressionType == 2):
extra = self.config['mobi']['extra bytes']
result = uncompress_lz77(self.contents[start : end - extra])
return result
else:
sys.stderr.write('Error: could not recognize compression type "%s".' \
% str(compressionType))
exit(1)
def readImageRecord(self, imgnum):
if self.config:
recordnum = self.config['mobi']['First Image index'] + imgnum
return self.readRecord(recordnum, disable_compression=True)
def author(self):
"Returns the author of the book"
return self.config['exth']['records'][100]
def title(self):
"Returns the title of the book"
return self.config['mobi']['Full Name']
########################### Private API ###########################
def __init__(self, filename):
try:
# not sure if explicit type checking is the best way to do this.
if isinstance(filename, str):
self.f = open(filename, "rb")
else:
self.f = filename
except IOError, e:
sys.stderr.write("Could not open %s! " % filename)
raise e
self.offset = 0
def __iter__(self):
# @TODO configuration not present should run configurator.
if not self.config:
return
for record in range(1, self.config['mobi']['First Non-book index'] - 1):
yield self.readRecord(record)
def parseRecordInfoList(self):
records = {}
# read in all records in info list
for recordID in range(self.header['number of records']):
fields = [
"record Data Offset",
"UniqueID"
]
headerfmt = '>II'
headerlen = calcsize(headerfmt)
infolist = self.contents[self.offset : self.offset + headerlen]
# create tuple with info
results = dict(zip(fields, unpack(headerfmt, infolist)))
# increment offset into file
self.offset += headerlen
# futz around with the unique ID record, as the uniqueID's top 8 bytes
# are really the "record attributes":
results['record Attributes'] = \
(results['UniqueID'] & 0xFF000000) >> 24
results['UniqueID'] = results['UniqueID'] & 0x00FFFFFF
# store into the records dict
records[results['UniqueID']] = results
return records
def parseHeader(self):
fields = [
"name",
"attributes",
"version",
"created",
"modified",
"backup",
"modnum",
"appInfoId",
"sortInfoID",
"type",
"creator",
"uniqueIDseed",
"nextRecordListID",
"number of records"
]
headerfmt = '>32shhIIIIII4s4sIIH'
headerlen = calcsize(headerfmt)
header = self.contents[self.offset : self.offset + headerlen]
# unpack header, zip up into list of tuples
results = dict(zip(fields, unpack(headerfmt, header)))
# increment offset into file
self.offset += headerlen
return results
# this function will populate the self.config attribute
def populate_config(self):
palmdocHeader = self.parsePalmDOCHeader()
MobiHeader = self.parseMobiHeader()
exthHeader = None
if (MobiHeader['Has EXTH Header']):
exthHeader = self.parseEXTHHeader()
config = {
'palmdoc': palmdocHeader,
'mobi' : MobiHeader,
'exth' : exthHeader
}
return config
def parseEXTHHeader(self):
headerfmt = '>III'
headerlen = calcsize(headerfmt)
header = self.contents[self.offset:self.offset + headerlen]
fields = [
'identifier',
'header length',
'record Count'
]
# unpack header, zip up into list of tuples
results = dict(zip(fields, unpack(headerfmt, header)))
self.offset += headerlen
results['records'] = {}
for record in range(results['record Count']):
recordType, recordLen = \
unpack(">II", self.contents[self.offset : self.offset + 8])
recordData = \
self.contents[self.offset + 8 : self.offset+recordLen]
results['records'][recordType] = recordData
self.offset += recordLen
return results
def parseMobiHeader(self):
headerfmt = '> IIII II 40s III IIIII IIII I 36s IIII 8s HHIIIII'
headerlen = calcsize(headerfmt)
fields = [
"identifier",
"header length",
"Mobi type",
"text Encoding",
"Unique-ID",
"Generator version",
"-Reserved",
"First Non-book index",
"Full Name Offset",
"Full Name Length",
"Language",
"Input Language",
"Output Language",
"Format version",
"First Image index",
"First Huff Record",
"Huff Record Co | unt",
"First DATP Record",
"DATP Record Count",
"EXTH flags",
"-36 u | nknown bytes, if Mobi is long enough",
"DRM Offset",
"DRM Count",
"DRM Size",
"DRM Flags",
"-Usually Zeros, unknown 8 bytes",
"-Unknown",
"Last Image Record",
"-Unknown",
"FCIS record",
"-Unknown",
"FLIS record",
"Unknown"
]
header = self.contents[self.offset:self.offset+headerlen]
# unpack header, zip up into list of tuples
results = dict(zip(fields, unpack(headerfmt, header)))
results['Start Offset'] = self.offset
results['Full Name'] = (self.contents[
self.records[0]['record Data Offset'] + results['Full Name Offset'] :
self.records[0]['record Data Offset'] + \
results['Full Name Offset'] + results['Full Name Length']])
results['Has DRM'] = results['DRM Offset'] != 0xFFFFFFFF
results['Has EXTH Header'] = (results['EXTH flags'] & 0x40) != 0
self.offset += results['header length']
def onebits(x, width=16):
# Remove reliance on xrange()?
return len(filter(lambda x: x == "1",
(str((x>>i)&1) for i in xrange(width - 1, -1, -1))))
results['extra bytes'] = \
2 * onebits(
unpack(">H", self.contents[self.offset - 2 : self.offset])[0] & 0xFFFE)
return results
def parsePalmDOCHeader(self):
headerfmt = '>HHIHHHH'
headerlen = calcsize(headerfmt)
fields = [
"Compression",
"Unused",
"text length",
"record count",
"record size",
"Encryption Type",
"Unknown"
]
offset = self.records[0]['record Data Offset']
header = self.contents[offset:offset+headerlen]
results = dict(zip(fields, unpack(headerfmt, header)))
self.offset = offset+headerlen
return results
|
osiell/oerplib | oerplib/rpc/__init__.py | Python | lgpl-3.0 | 10,786 | 0.001669 | # -*- coding: UTF-8 -*-
##############################################################################
#
# OERPLib
# Copyright (C) 2011-2013 Sébastien Alix.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
"""This module provides `RPC` connectors which use the `XML-RPC`, `Net-RPC`
or `JSON-RPC` protocol to communicate with an `OpenERP/Odoo` server.
Afterwards, `RPC` services and their associated methods can be accessed
dynamically from the connector returned.
`XML-RPC` and `Net-RPC` provide the same interface, such as services like
``db``, ``common`` or ``object``.
On the other hand, `JSON-RPC` provides a completely different interface, with
services provided by Web modules like ``web/session``,
``web/dataset`` and so on.
"""
from oerplib.rpc import error, service, jsonrpclib
from oerplib.tools import v
# XML-RPC available URL
# '/xmlrpc' => 5.0, 6.0, 6.1, 7.0, 8.0 (legacy path)
# '/openerp/xmlrpc/1' => 6.1, 7.0
# '/xmlrpc/2' => 8.0
XML_RPC_PATHS = ['/xmlrpc', '/openerp/xmlrpc/1', '/xmlrpc/2']
class Connector(object):
"""Connector base class defining the interface used
to interact with a server.
"""
def __init__(self, server, port=8069, timeout=120, version=None):
self.server = server
try:
int(port)
except ValueError:
txt = "The port '{0}' is invalid. An integer is required."
txt = txt.format(port)
raise error.ConnectorError(txt)
else:
self.port = int(port)
self._timeout = timeout
self.version = version
self._url = None
@property
def timeout(self):
return self._timeout
@timeout.setter
def timeout(self, timeout):
self._timeout = timeout
class ConnectorXMLRPC(Connector):
"""Connector class using the `XML-RPC` protocol.
>>> from oerplib import rpc
>>> cnt = rpc.ConnectorXMLRPC('localhost', port=8069)
Login and retrieve ID of the user connected:
>>> uid = cnt.common.login('database', 'user', 'passwd')
Execute a query:
>>> res = cnt.object.execute('database', uid, 'passwd', 'res.partner', 'read', [1])
Execute a workflow query:
>>> res = cnt.object.exec_workflow('database', uid, 'passwd', 'sale.order', 'order_confirm', 4)
"""
def __init__(self, server, port=8069, timeout=120, version=None, scheme='http'):
super(ConnectorXMLRPC, self).__init__(server, port, timeout, version)
self.scheme = scheme
if self.version:
# Server < 6.1
if v(self.version) < v('6.1'):
self._url = '{scheme}://{server}:{port}/xmlrpc'.format(
scheme=self.scheme, server=self.server, port=self.port)
# Server >= 6.1 and < 8.0
elif v(self.version) < v('8.0'):
self._url = '{scheme}://{server}:{port}/openerp/xmlrpc/1'.format(
scheme=self.scheme, server=self.server, port=self.port)
# Server >= 8.0
elif v(self.version) >= v('8.0'):
self._url = '{scheme}://{server}:{port}/xmlrpc/2'.format(
scheme=self.scheme, server=self.server, port=self.port)
# Detect the XML-RPC path to use
if self._url is None:
# We begin with the last known XML-RPC path to give the priority to
# the last server version supported
paths = XML_RPC_PATHS[:]
paths.reverse()
for path in paths:
url = '{scheme}://{server}:{port}{path}'.format(
scheme=self.scheme, server=self.server, port=self.port, path=path)
try:
db = service.ServiceXMLRPC(
self, 'db', '{url}/{srv}'.format(url=url, srv='db'))
version = db.server_version()
except error.ConnectorError:
continue
else:
self._url = url
self.version = version
break
def __getattr__(self, service_name):
url = self._url + '/' + service_name
srv = service.ServiceXMLRPC(self, service_name, url)
setattr(self, service_name, srv)
return srv
class ConnectorXMLRPCSSL(ConnectorXMLRPC):
"""Connector class using the `XML-RPC` protocol over `SSL`."""
def __init__(self, server, port=8069, timeout=120, version=None):
super(ConnectorXMLRPCSSL, self).__init__(
server, port, timeout, version, scheme='https')
class ConnectorNetRPC(Connector):
"""
.. note::
No longer available since `OpenERP 7.0`.
Connector class using the `Net-RPC` protocol.
"""
def __init__(self, server, port=8070, timeout=120, version=None):
super(ConnectorNetRPC, self).__init__(
server, port, timeout, version)
if self.version is None:
try:
db = service.ServiceNetRPC(self, 'db', self.server, self.port)
version = db.server_version()
except error.ConnectorError:
pass
else:
self.version = version
def __getattr__(self, service_name):
srv = service.ServiceNetRPC(
self, service_name, self.serve | r, self.port)
setattr(self, service_name, srv)
ret | urn srv
class ConnectorJSONRPC(Connector):
"""Connector class using the `JSON-RPC` protocol.
>>> from oerplib import rpc
>>> cnt = rpc.ConnectorJSONRPC('localhost', port=8069)
Open a user session:
>>> cnt.proxy.web.session.authenticate(db='database', login='admin', password='admin')
{u'jsonrpc': u'2.0', u'id': 202516757,
u'result': {u'username': u'admin', u'user_context': {u'lang': u'fr_FR', u'tz': u'Europe/Brussels', u'uid': 1},
u'db': u'test70', u'uid': 1, u'session_id': u'308816f081394a9c803613895b988540'}}
Read data of a partner:
>>> cnt.proxy.web.dataset.call(model='res.partner', method='read', args=[[1]])
{u'jsonrpc': u'2.0', u'id': 454236230,
u'result': [{u'id': 1, u'comment': False, u'ean13': False, u'property_account_position': False, ...}]}
You can send requests this way too:
>>> cnt.proxy['/web/dataset'].call(model='res.partner', method='read', args=[[1]])
{u'jsonrpc': u'2.0', u'id': 328686288,
u'result': [{u'id': 1, u'comment': False, u'ean13': False, u'property_account_position': False, ...}]}
Or like this:
>>> cnt.proxy['web']['dataset'].call(model='res.partner', method='read', args=[[1]])
{u'jsonrpc': u'2.0', u'id': 102320639,
u'result': [{u'id': 1, u'comment': False, u'ean13': False, u'property_account_position': False, ...}]}
"""
def __init__(self, server, port=8069, timeout=120, version=None,
deserialize=True):
super(ConnectorJSONRPC, self).__init__(server, port, timeout, version)
self.deserialize = deserialize
self._proxy = self._get_proxy(ssl=False)
def _get_proxy(self, ssl=False):
"""Returns a :class:`Proxy <oerplib.rpc.jsonrpclib.Proxy>` instance
corresponding to the server version used.
"""
# Detect the server version
if self.version is None:
proxy = jsonrpclib.Proxy(
self.server, self.port, self._timeout,
ssl=ssl, deserialize=self.deserialize)
result = proxy.web.webclient.version_info()['result']
|
svanschalkwyk/datafari | windows/python/Lib/test/test_threadedtempfile.py | Python | apache-2.0 | 2,021 | 0.004948 | """
Create and delete FILES_PER_THREAD temp files (via tempfile.TemporaryFile)
in each of NUM_THREADS threads, recording the number of successes and
failures. A failure is a bug in tempfile, and may be due to:
+ Trying to create more than one tempfile with the same name.
+ Trying to delete a tempfile that doesn't still exist.
+ Something we've never seen before.
By default, NUM_THREADS == 20 and FILES_PER_THREAD == 50. This is enough to
create about 150 failures per run under Win98SE in 2.0, and runs pretty
quickly. Guido reports needing to boost FILES_PER_THREAD to 500 before
provoking | a 2.0 failure under Linux.
"""
NUM_THREADS = 20
FILES_PER_THREAD = 50
import tempfile
from test.test_support import start_threads, run_unittest, import_module
threading = import_module('threading')
import unittest
import StringIO
from traceback import print_exc
startEvent = threading.Event()
class TempFileGreedy(threading.Thread):
error_count = 0
ok_count = 0
def run(self):
self.errors = StringIO.StringIO()
startEvent.wait()
for i in range(FILES_PER_ | THREAD):
try:
f = tempfile.TemporaryFile("w+b")
f.close()
except:
self.error_count += 1
print_exc(file=self.errors)
else:
self.ok_count += 1
class ThreadedTempFileTest(unittest.TestCase):
def test_main(self):
threads = [TempFileGreedy() for i in range(NUM_THREADS)]
with start_threads(threads, startEvent.set):
pass
ok = sum(t.ok_count for t in threads)
errors = [str(t.getName()) + str(t.errors.getvalue())
for t in threads if t.error_count]
msg = "Errors: errors %d ok %d\n%s" % (len(errors), ok,
'\n'.join(errors))
self.assertEqual(errors, [], msg)
self.assertEqual(ok, NUM_THREADS * FILES_PER_THREAD)
def test_main():
run_unittest(ThreadedTempFileTest)
if __name__ == "__main__":
test_main()
|
pmandr/plant_carer | MyGarden/manage.py | Python | gpl-3.0 | 806 | 0 | #!/usr | /bin/env python
import os
import sys
if __name_ | _ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "MyGarden.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
earney/asetniop | src/machine/test_registry.py | Python | gpl-2.0 | 1,430 | 0.004895 | # Copyright (c) 2013 Hesky Fisher
# See LICENSE.txt for details.
"""Unit tests for registry.py."""
import unittest
from registry import Registry, machine_registry, NoSuchMachineException
class RegistryClassTestCase(unittest.TestCase):
def test_lookup(self):
registry = Registry()
registry.regis | ter('a', 1)
self.assertEqual(1, registry.get('a'))
def test_unknown_entry(self):
registry = Registry()
with self.assertRaises(NoSuchMachineException):
registry.get('b')
def test_alias(self):
registry = Registry()
registry.register('a', 1)
registry.add_alias('b | ', 'a')
self.assertEqual(registry.resolve_alias('b'), 'a')
self.assertEqual(1, registry.get('b'))
def test_all_names(self):
registry = Registry()
registry.register('a', 1)
registry.register('b', 5)
registry.add_alias('c', 'b')
self.assertEqual(['a', 'b'], sorted(registry.get_all_names()))
class MachineRegistryTestCase(object):
def test_sidewinder(self):
self.assertEqual(machine_registery.get("NKRO Keyboard"),
machine_registry.get('Microsoft Sidewinder X4'))
def test_unknown_machine(self):
with self.assertRaises(NoSuchMachineException):
machine_registry.get('No such machine')
if __name__ == '__main__':
unittest.main()
|
jawilson/home-assistant | homeassistant/components/aws/config_flow.py | Python | apache-2.0 | 507 | 0.001972 | """Config flow for AWS component."""
from homeassistant import config_entries
from .const import DOMAIN
class AWSFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow."""
VERSION = 1
async def async_step_import(self, user_input):
"""Import a config entry."""
if self._async_current_e | ntries():
return self.async_abort(reason="single_instance_allowed")
| return self.async_create_entry(title="configuration.yaml", data=user_input)
|
shaun-h/pythonista-objc-utils | Audio Recording.py | Python | mit | 1,104 | 0.033514 | # this is based in the audio recording example provided by omz (Ole Zorn)
from objc_util import *
import os
def main():
AVAudioSession = ObjCClass('AVAudioSession')
NSURL = ObjCClass('NSURL')
AVAudioRecorder = ObjCClass('AVAudioRecorder')
shared_session = AVAudioSession.sharedInstance()
category_set = shared_session.setCategory_error_(ns('AVAudioSessionCategoryPlayAndRecord'), None)
settings = {ns('AVFormatIDKey'): ns(1633772320), ns('AVSampleRateKey'):ns(44100.00), ns('AVNumberOfChannels | Key'):ns(2)}
output_path = os.path.abspath('Recording.m4a')
out_url = NSURL.fileURLWithPath_(ns(output_path))
rec | order = AVAudioRecorder.alloc().initWithURL_settings_error_(out_url, settings, None)
started_recording = recorder.record()
if started_recording:
print('Recording started, press the "stop script" button to end recording...')
try:
while True:
pass
except KeyboardInterrupt:
print('Stopping...')
recorder.stop()
recorder.release()
print('Stopped recording.')
import console
console.quicklook(os.path.abspath('Recording.m4a'))
if __name__ == '__main__':
main()
|
davidwboswell/documentation_autoresponse | lib/l10n_utils/gettext.py | Python | mpl-2.0 | 8,132 | 0 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import with_statement
import codecs
import os
import re
from os.path import join
from tokenize import generate_tokens, NAME, NEWLINE, OP, untokenize
from django.conf import settings
from jinja2 import Environment
from dotlang import parse as parse_lang, get_lang_path
REGEX_URL = re.compile(r'.* (\S+/\S+\.[^:]+).*')
def parse_po(path):
msgs = {}
if not os.path.exists(path):
return msgs
with codecs.open(path, 'r', 'utf-8') as lines:
def parse_string(s):
return s.strip('"').replace('\\"', '"')
def extract_content(s):
# strip the first word and quotes
return parse_string(s.split(' ', 1)[1])
msgid = None
msgpath = None
for line in lines:
line = line.strip()
if line.startswith('#'):
matches = REGEX_URL.match(line)
if matches:
msgpath = matches.group(1)
elif line.startswith('msgid'):
msgid = extract_content(line)
elif line.startswith('msgstr') and msgid and msgpath:
if msgpath not in msgs:
msgs[msgpath] = []
msgs[msgpath].append(msgid)
msgid = None
msgpath = None
elif msgid is not None:
msgid += parse_string(line)
return msgs
def po_msgs():
return parse_po(join(settings.ROOT,
'locale/templates/LC_MESSAGES/messages.pot'))
def translated_strings(file_):
path = join(settings.ROOT, 'locale', 'templates', file_)
trans = parse_lang(path).keys()
return trans
def lang_file(name, lang):
return join(settings.ROOT, 'locale', lang, name)
def is_template(path):
(base, ext) = os.path.splitext(path) |
return ext == '.html'
def is_python(path):
(base, ext) = os.path.splitext(path)
return ext == '.py'
def parse_python(path):
"""
Look though a python file and extract the specified `LANG_FILES` constant
value and return it.
`LANG_FILES` must be defined at the module level, and can be a string or
list of strings.
"""
result = []
in_lang = False
in_lang_val = False
with codecs.op | en(path, encoding='utf-8') as src_f:
tokens = generate_tokens(src_f.readline)
for token in tokens:
t_type, t_val, (t_row, t_col) = token[:3]
# find the start of the constant declaration
if t_type == NAME and t_col == 0 and t_val == 'LANG_FILES':
in_lang = True
continue
if in_lang:
# we only want the value, so start recording after the = OP
if t_type == OP and t_val == '=':
in_lang_val = True
continue
# stop when there's a newline. continuation newlines are a
# different type so multiline list literals work fine
if t_type == NEWLINE:
break
if in_lang_val:
result.append((t_type, t_val))
if result:
new_lang_files = eval(untokenize(result))
if isinstance(new_lang_files, basestring):
new_lang_files = [new_lang_files]
return new_lang_files
return []
def parse_template(path):
"""Look through a template for the lang_files tag and extract the
given lang files"""
src = codecs.open(path, encoding='utf-8').read()
tokens = Environment().lex(src)
lang_files = []
def ignore_whitespace(tokens):
token = tokens.next()
if token[1] == 'whitespace':
return ignore_whitespace(tokens)
return token
for token in tokens:
if token[1] == 'block_begin':
block = ignore_whitespace(tokens)
if block[1] == 'name' and block[2] in ('set_lang_files',
'add_lang_files'):
arg = ignore_whitespace(tokens)
# Extract all the arguments
while arg[1] != 'block_end':
lang_files.append(arg[2].strip('"'))
arg = ignore_whitespace(tokens)
lang_files = filter(lambda x: x, lang_files)
if lang_files:
return lang_files
return []
def langfiles_for_path(path):
"""
Find and return any extra lang files specified in templates or python
source files, or the first entry in the DOTLANG_FILES setting if none.
:param path: path to a file containing strings to translate
:return: list of langfile names.
"""
lang_files = None
if is_template(path):
# If the template explicitly specifies lang files, use those
lang_files = parse_template(join(settings.ROOT, path))
# Otherwise, normalize the path name to a lang file
if not lang_files:
lang_files = [get_lang_path(path)]
elif is_python(path):
# If the python file explicitly specifies lang files, use those
lang_files = parse_python(join(settings.ROOT, path))
if not lang_files:
# All other sources use the first main file
lang_files = settings.DOTLANG_FILES[:1]
return lang_files
def pot_to_langfiles():
"""Update the lang files in /locale/templates with extracted
strings."""
all_msgs = po_msgs()
root = 'templates'
# Start off with some global lang files so that strings don't
# get duplicated everywhere
main_msgs = parse_lang(lang_file('main.lang', root))
main_msgs.update(parse_lang(lang_file('base.lang', root)))
main_msgs.update(parse_lang(lang_file('newsletter.lang', root)))
# Walk through the msgs and put them in the appropriate place. The
# complex part about this is that templates and python files can
# specify a list of lang files to pull from, so we need to check
# all of them for the strings and add it to the first lang file
# specified if not found.
for path, msgs in all_msgs.items():
target = None
lang_files = [lang_file('%s.lang' % f, root)
for f in langfiles_for_path(path)]
# Get the current translations
curr = {}
for f in lang_files:
if os.path.exists(f):
curr.update(parse_lang(f))
# Add translations to the first lang file
target = lang_files[0]
if not os.path.exists(target):
d = os.path.dirname(target)
if not os.path.exists(d):
os.makedirs(d)
with codecs.open(target, 'a', 'utf-8') as out:
for msg in msgs:
if msg not in curr and msg not in main_msgs:
out.write(';%s\n%s\n\n\n' % (msg, msg))
def find_lang_files(lang):
for root, dirs, files in os.walk(lang_file(lang, '')):
parts = root.split('locale/%s/' % lang)
if len(parts) > 1:
base = parts[1]
else:
base = ''
for filename in files:
name, ext = os.path.splitext(filename)
if ext == '.lang':
yield os.path.join(base, filename)
def merge_lang_files(langs):
for lang in langs:
print 'Merging into %s...' % lang
for f in find_lang_files('templates'):
# Make sure the directory exists (might be a subdirectory)
d = os.path.dirname(f)
if d:
d = lang_file(d, lang)
if not os.path.exists(d):
os.makedirs(d)
dest = lang_file(f, lang)
src_msgs = parse_lang(lang_file(f, 'templates'))
dest_msgs = parse_lang(dest)
with codecs.open(dest, 'a', 'utf-8') as out:
for msg in src_msgs:
if msg not in dest_msgs:
out.write('\n\n;%s\n%s\n' % (msg, msg))
|
pepsipepsi/nodebox_opengl_python3 | examples/05-path/05-spider.py | Python | bsd-3-clause | 1,901 | 0.023672 | import os, sys
sys.path.insert(0, os.path.join("..",".."))
from nodebox.graphics.shader import render
from nodebox.graphics.context import *
from nodebox.graphics import *
from nodebox.graphics.geometry import distance
def spider(string, x=0, y=0, radius=25, **kwargs):
""" A path filter that creates web threading along the characters of the given string.
Its output can be drawn directly to the canvas or used in a render() function.
Adapted from: http://nodebox.net/code/ | index.php/Path_Filters
"""
# **kwargs represents any additional optional parameters.
# For example: spider("hello", 100, 100, font="Helvetica") =>
| # kwargs = {"font": "Helvetica"}
# We pass these on to the textpath() call in the function;
# so the spider() function takes the same parameters as textpath:
# x, y, font, fontsize, fontweight, ...
font(
kwargs.get("font", "Droid Sans"),
kwargs.get("fontsize", 100))
p = textpath(string, x, y, **kwargs)
n = int(p.length)
m = 2.0
radius = max(radius, 0.1 * fontsize())
points = list(p.points(n))
for i in range(n):
pt1 = choice(points)
pt2 = choice(points)
while distance(pt1.x, pt1.y, pt2.x, pt2.y) > radius:
pt2 = choice(points)
line(pt1.x + random(-m, m),
pt1.y + random(-m, m),
pt2.x + random(-m, m),
pt2.y + random(-m, m))
# Render the function's output to an image.
# Rendering the image beforehand is much faster than calling spider() every frame.
stroke(0.1, 0.1, 0, 0.5)
strokewidth(1)
img = render(spider, 500, 150,
string = "SPIDER",
font = "Droid Sans",
fontsize = 100,
bold = True,
x = 25,
y = 25,
radius = 30)
def draw(canvas):
canvas.clear()
translate(0, 200)
image(img)
canvas.size = 500, 500
canvas.run(draw) |
leighpauls/k2cro4 | third_party/WebKit/Tools/Scripts/webkitpy/tool/bot/sheriff_unittest.py | Python | bsd-3-clause | 3,776 | 0.001589 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the follow | ing disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be | used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from webkitpy.common.net.buildbot import Builder
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.thirdparty.mock import Mock
from webkitpy.tool.bot.sheriff import Sheriff
from webkitpy.tool.mocktool import MockTool
class MockSheriffBot(object):
name = "mock-sheriff-bot"
watchers = [
"watcher@example.com",
]
def run_webkit_patch(self, args):
return "Created bug https://bugs.webkit.org/show_bug.cgi?id=36936\n"
class SheriffTest(unittest.TestCase):
def test_post_blame_comment_on_bug(self):
def run():
sheriff = Sheriff(MockTool(), MockSheriffBot())
builders = [
Builder("Foo", None),
Builder("Bar", None),
]
commit_info = Mock()
commit_info.bug_id = lambda: None
commit_info.revision = lambda: 4321
# Should do nothing with no bug_id
sheriff.post_blame_comment_on_bug(commit_info, builders, [])
sheriff.post_blame_comment_on_bug(commit_info, builders, ["mock-test-1", "mock-test-2"])
# Should try to post a comment to the bug, but MockTool.bugs does nothing.
commit_info.bug_id = lambda: 1234
sheriff.post_blame_comment_on_bug(commit_info, builders, [])
sheriff.post_blame_comment_on_bug(commit_info, builders, ["mock-test-1"])
sheriff.post_blame_comment_on_bug(commit_info, builders, ["mock-test-1", "mock-test-2"])
expected_stderr = u"""MOCK bug comment: bug_id=1234, cc=['watcher@example.com']
--- Begin comment ---
http://trac.webkit.org/changeset/4321 might have broken Foo and Bar
--- End comment ---
MOCK bug comment: bug_id=1234, cc=['watcher@example.com']
--- Begin comment ---
http://trac.webkit.org/changeset/4321 might have broken Foo and Bar
The following tests are not passing:
mock-test-1
--- End comment ---
MOCK bug comment: bug_id=1234, cc=['watcher@example.com']
--- Begin comment ---
http://trac.webkit.org/changeset/4321 might have broken Foo and Bar
The following tests are not passing:
mock-test-1
mock-test-2
--- End comment ---
"""
OutputCapture().assert_outputs(self, run, expected_stderr=expected_stderr)
|
arvinddoraiswamy/mywebappscripts | BurpExtensions/FileUploadFuzz.py | Python | mit | 1,693 | 0.014767 | from burp import IBurpExtender
from burp import IContextMenuFactory
from javax.swing import JMenuItem
import sys
import os
import re
#Adding directory to the path where Python searches for modules
module_folder = os.path.dirname('/home/arvind/Documents/Me/My_Projects/Git/WebAppsec/BurpExtensions/modules/')
sys.path.insert(0, module_folder)
import webcommon
filePayloadDir= '/home/arvind/Documents/Me/My_Projects/Git/WebAppsec/BurpExtensions/filePayloads'
fileNameVar= 'filename'
class BurpExtender(IBurpExtender, IContextMenuFactory):
def registerExtenderCallbacks(self,callbacks):
self._callbacks= callbacks
self._helpers = callbacks.getHelpers()
callbacks.setExtensionName("Fuzz File Upload")
callbacks.registerContextMenuFactory(self)
def createMenuItems(self, invocation):
menu= []
menu.append(JMenuItem("Test File Upload", None, actionPerformed= lambda x,inv=invocation:self.testFileUpload(inv)))
return menu
def testFileUpload(self, invocation):
fileList= self.getListOfFiles()
invMessage=invocation.getSelectedMessages()
hostname= invMessage[0].getHttpService().getHost()
port= invMessage[0].getHttpService().getPort()
bytes_req= invMessage[0].getRequest()
request= bytes_req.tostring()
for i in fileList:
r1= re.sub(r'('+fileNameVar+r'=").*(")', r'\1'+i+r' | \2', request, re.DOTALL|re.MULTILINE)
# Add regex to substitute fileContent depending on request structure
def getListOfFiles(self):
| fileList= []
for filename in os.listdir(filePayloadDir):
fileList.append(filename)
return fileList
|
stephrdev/brigitte | brigitte/repositories/migrations/0012_auto__add_field_repository_last_commit_date.py | Python | bsd-3-clause | 5,432 | 0.008837 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Repository.last_commit_date'
db.add_column('repositories_repository', 'last_commit_date', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Repository.last_commit_date'
db.delete_column('repositories_repository', 'last_commit_date')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 11, 6, 12, 4, 58, 637077)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 11, 6, 12, 4, 58, 636988)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'repositories.repository': {
'Meta': {'unique_together': "(('user', 'slug'),)", 'object_name': 'Repository'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_commit_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'private': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'repo_type': ('django.db.models.fields.CharField', [], {'default': "'git'", 'max_length': '4'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '80', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharF | ield', [], {'max_length': '80'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'repositories.repositoryuser': {
'Meta': {'unique_together': "(('repo', 'user'),)", 'object_n | ame': 'RepositoryUser'},
'can_admin': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_read': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_write': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'repo': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['repositories.Repository']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['repositories']
|
openhomity/homity-hub | runhub.py | Python | apache-2.0 | 1,615 | 0.002477 | """
Launcher for homity-hub
"""
from Hub import app
from Hub.api import hub_config
import cherrypy
from paste.translogger import TransLogger
if hub_config.get('ssl_enable'):
from OpenSSL import SSL
def run_cherrypy():
"""Start CherryPy server."""
#Enable WSGI access logging via Paste
app_logged = TransLogger(app, setup_console_handler=False)
# Mount the WSGI callable object (app) on the root directory
cherrypy.tree.graft(app_logged, '/')
# Set the configuration of the web server
cherrypy_config = {
'engine.autoreload_on': True,
'log.screen': True,
'server.socket_port': 5000,
'server.socket_ | host': '0.0.0.0'
}
if hub_config.get('ssl_enable'):
cherrypy_config['server.ssl_module'] = 'builtin'
cherrypy_config['server.ssl_private_key'] = hub_config.get(
'ssl_private_key')
cherrypy_config['server.ssl_certificate'] = hub_config.get(
'ssl_cert')
cherrypy.config.update(cherrypy_config)
# Start the CherryPy WSGI web server
cherrypy.engine.start()
cherrypy.engi | ne.block()
def run_werkzeug():
""" Werkzeug deprecated in favor of CherryPy. """
if hub_config.get('ssl_enable'):
context = SSL.Context(SSL.SSLv23_METHOD)
context.use_privatekey_file(hub_config.get('ssl_private_key'))
context.use_certificate_file(hub_config.get('ssl_cert'))
app.run(host='0.0.0.0',
ssl_context=context,
debug=False)
else:
app.run(host='0.0.0.0', debug=False)
if __name__ == "__main__":
run_cherrypy()
|
staranjeet/fjord | vendor/packages/requests-2.7.0/requests/packages/chardet/euctwprober.py | Python | bsd-3-clause | 1,674 | 0.001792 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful | ,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# Licen | se along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCTWDistributionAnalysis
from .mbcssm import EUCTWSMModel
class EUCTWProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCTWSMModel)
self._mDistributionAnalyzer = EUCTWDistributionAnalysis()
self.reset()
def get_charset_name(self):
return 'EUC-TW'
|
junkoda/mockgallib | script/compute_corr_multipole.py | Python | gpl-3.0 | 3,670 | 0.006812 | """
This script computes correlation function multipoles from given files of
galalxy and random catalgues
python3 compute_corr_multipole.py
Args:
n: index of mock
Options:
--igalaxies=1:1
--irandoms=1:1
Input:
mocks/mock_<n>.txt
randoms/random_<n>.txt
File format is:
x y z
in each line
Output:
Standard output
Column 1: r
Column 2: xi0 (monopole)
Column 3: xi2 (quadrupole)
"""
import os
import argparse
import json
import signal
import numpy as np
import h5py
import mockgallib as mock
signal.signal(signal.SIGINT, signal.SIG_DFL) # stop with ctrl-c
#
# Command-line options
#
parser = argparse.ArgumentParser()
parser.add_argument('reg', help='region w1 or w4')
parser.add_argument('--igalaxies', default='1:1',
help='index range of galaxy catalogues')
parser.add_argument('--irandoms', default='1:1',
help='index range of random catalogues')
parser.add_argument('--pair-correction', default=None,
help='pair correction file')
parser.add_argument('--param', default='param.json',
help='parameter json file')
parser.add_argument('--dir', default='.', help='data directory')
parser.add_argument('--rr', default=None, help='precomputed RR filename')
parser.add_argument('-o', default='.', help='output directory')
parser.add_argument('--zmin', type=float, default=0.5, help='minimum redshift')
parser.add_argument('--zmax', type=float, default=1.2, help='minimum redshift')
arg = parser.parse_args()
igalaxies = arg.igalaxies.split(':')
irandoms = arg.irandoms.split(':')
#
# Read parameter file
#
print('# Parameter file: %s' % arg.param)
with open(arg.param, 'r') as f:
param = json.load(f)
omega_m = param['omega_m']
print('# Setting cosmology: omega_m= %.4f' % omega_m)
print('# redshift-range %f %f' % (arg.zmin, arg.zmax))
#
# Initilise
#
mock.set_loglevel(0)
mock.cosmology.set(omega_m)
mock.distance.init(1.2)
def read_catalogues(filebase, irange):
cats = mock.Catalogues()
for i in range(int(irange[0]), int(irange[1]) + 1):
filename = '%s%05d.txt' % (filebase, i)
a = np.loadtxt(filename, delimiter=' ', usecols=[1,2,3,7,8,12])
w = np.loadtxt(filename, delimiter=' ', usecols=[12, 13])
a[:, 5] = 1.0/(w[:, 0]*w[:, 1])
cats.append(a, z_min=arg.zmin, z_max= arg.zmax)
return cats
galaxies = read_catalogues('%s/mocks/%s/mock_%s_' % (arg.dir, arg.reg, arg.reg), igalaxies)
randoms = read_catalogues('%s/rands/%s/rand_%s_' % (arg.dir, arg.reg, arg.reg), irandoms)
corr = mock.CorrelationFunctionMultipole(r_min=0.08912509, r_max=141.25375446,
nbin=32,
mu_nbin=40,
ra_min=0.0, dec_min=0.0,
pair_correction=arg.pair_correction)
if arg.rr:
rr = mock.corr_multipole.Hist2D(r_min=0.08912509, r_max=141.25375446,
r_nbin=32, mu_nbin=40)
rr.load(arg.rr)
print('computing corr multipole with rr: %s' % arg.rr)
corr.compute_corr_multipole_with_rr(galaxies, randoms, rr)
else:
corr.compute_corr_multipole(galaxies, randoms)
i0 = int(igal | axies[0])
i1 = int(igalaxies[1]) + 1
for i in range(i0, i1):
ii= i - i0
r = corr.r_i(ii)
xi0 = corr.xi0_i(ii)
xi2 = corr.xi2_i(ii)
nrow= | len(r)
filename = '%s/corr_multipole_%05d.txt' % (arg.o, i)
with open(filename, 'w') as f:
for irow in range(nrow):
f.write('%e %e %e\n' % (r[irow], xi0[irow], xi2[irow]))
print('%s written' % filename)
|
piMoll/SEILAPLAN | lib/reportlab/lib/fontfinder.py | Python | gpl-2.0 | 13,553 | 0.006862 | #Copyright ReportLab Europe Ltd. 2000-2019
#see license.txt for license details
__version__='3.4.22'
#modification of users/robin/ttflist.py.
__doc__="""This provides some general-purpose tools for finding fonts.
The FontFinder object can search for font files. It aims to build
a catalogue of fonts which our framework can work with. It may be useful
if you are building GUIs or design-time interfaces and want to present users
with a choice of fonts.
There are 3 steps to using it
1. create FontFinder and set options and directories
2. search
3. query
>>> import fontfinder
>>> ff = fontfinder.FontFinder()
>>> ff.addDirectories([dir1, dir2, dir3])
>>> ff.search()
>>> ff.getFamilyNames() #or whichever queries you want...
Because the disk search takes some time to find and | parse hundreds of fonts,
it can use a cache to store a file w | ith all fonts found. The cache file name
For each font found, it creates a structure with
- the short font name
- the long font name
- the principal file (.pfb for type 1 fonts), and the metrics file if appropriate
- the time modified (unix time stamp)
- a type code ('ttf')
- the family name
- bold and italic attributes
One common use is to display families in a dialog for end users;
then select regular, bold and italic variants of the font. To get
the initial list, use getFamilyNames; these will be in alpha order.
>>> ff.getFamilyNames()
['Bitstream Vera Sans', 'Century Schoolbook L', 'Dingbats', 'LettErrorRobot',
'MS Gothic', 'MS Mincho', 'Nimbus Mono L', 'Nimbus Roman No9 L',
'Nimbus Sans L', 'Vera', 'Standard Symbols L',
'URW Bookman L', 'URW Chancery L', 'URW Gothic L', 'URW Palladio L']
One can then obtain a specific font as follows
>>> f = ff.getFont('Bitstream Vera Sans', bold=False, italic=True)
>>> f.fullName
'Bitstream Vera Sans'
>>> f.fileName
'C:\\code\\reportlab\\fonts\\Vera.ttf'
>>>
It can also produce an XML report of fonts found by family, for the benefit
of non-Python applications.
Future plans might include using this to auto-register fonts; and making it
update itself smartly on repeated instantiation.
"""
import sys, os, tempfile
from reportlab.lib.utils import pickle, asNative as _asNative
from xml.sax.saxutils import quoteattr
from reportlab.lib.utils import asBytes
try:
from time import process_time as clock
except ImportError:
from time import clock
try:
from hashlib import md5
except ImportError:
from md5 import md5
def asNative(s):
try:
return _asNative(s)
except:
return _asNative(s,enc='latin-1')
EXTENSIONS = ['.ttf','.ttc','.otf','.pfb','.pfa']
# PDF font flags (see PDF Reference Guide table 5.19)
FF_FIXED = 1 << 1-1
FF_SERIF = 1 << 2-1
FF_SYMBOLIC = 1 << 3-1
FF_SCRIPT = 1 << 4-1
FF_NONSYMBOLIC = 1 << 6-1
FF_ITALIC = 1 << 7-1
FF_ALLCAP = 1 << 17-1
FF_SMALLCAP = 1 << 18-1
FF_FORCEBOLD = 1 << 19-1
class FontDescriptor:
"""This is a short descriptive record about a font.
typeCode should be a file extension e.g. ['ttf','ttc','otf','pfb','pfa']
"""
def __init__(self):
self.name = None
self.fullName = None
self.familyName = None
self.styleName = None
self.isBold = False #true if it's somehow bold
self.isItalic = False #true if it's italic or oblique or somehow slanty
self.isFixedPitch = False
self.isSymbolic = False #false for Dingbats, Symbols etc.
self.typeCode = None #normally the extension minus the dot
self.fileName = None #full path to where we found it.
self.metricsFileName = None #defined only for type='type1pc', or 'type1mac'
self.timeModified = 0
def __repr__(self):
return "FontDescriptor(%s)" % self.name
def getTag(self):
"Return an XML tag representation"
attrs = []
for k, v in self.__dict__.items():
if k not in ['timeModified']:
if v:
attrs.append('%s=%s' % (k, quoteattr(str(v))))
return '<font ' + ' '.join(attrs) + '/>'
from reportlab.lib.utils import rl_isdir, rl_isfile, rl_listdir, rl_getmtime
class FontFinder:
def __init__(self, dirs=[], useCache=True, validate=False, recur=False, fsEncoding=None, verbose=0):
self.useCache = useCache
self.validate = validate
if fsEncoding is None:
fsEncoding = sys.getfilesystemencoding()
self._fsEncoding = fsEncoding or 'utf8'
self._dirs = set()
self._recur = recur
self.addDirectories(dirs)
self._fonts = []
self._skippedFiles = [] #list of filenames we did not handle
self._badFiles = [] #list of filenames we rejected
self._fontsByName = {}
self._fontsByFamily = {}
self._fontsByFamilyBoldItalic = {} #indexed by bold, italic
self.verbose = verbose
def addDirectory(self, dirName, recur=None):
#aesthetics - if there are 2 copies of a font, should the first or last
#be picked up? might need reversing
if rl_isdir(dirName):
self._dirs.add(dirName)
if recur if recur is not None else self._recur:
for r,D,F in os.walk(dirName):
for d in D:
self._dirs.add(os.path.join(r,d))
def addDirectories(self, dirNames,recur=None):
for dirName in dirNames:
self.addDirectory(dirName,recur=recur)
def getFamilyNames(self):
"Returns a list of the distinct font families found"
if not self._fontsByFamily:
fonts = self._fonts
for font in fonts:
fam = font.familyName
if fam is None: continue
if fam in self._fontsByFamily:
self._fontsByFamily[fam].append(font)
else:
self._fontsByFamily[fam] = [font]
fsEncoding = self._fsEncoding
names = list(asBytes(_,enc=fsEncoding) for _ in self._fontsByFamily.keys())
names.sort()
return names
def getFontsInFamily(self, familyName):
"Return list of all font objects with this family name"
return self._fontsByFamily.get(familyName,[])
def getFamilyXmlReport(self):
"""Reports on all families found as XML.
"""
lines = []
lines.append('<?xml version="1.0" encoding="UTF-8" standalone="yes"?>')
lines.append("<font_families>")
for dirName in self._dirs:
lines.append(" <directory name=%s/>" % quoteattr(asNative(dirName)))
for familyName in self.getFamilyNames():
if familyName: #skip null case
lines.append(' <family name=%s>' % quoteattr(asNative(familyName)))
for font in self.getFontsInFamily(familyName):
lines.append(' ' + font.getTag())
lines.append(' </family>')
lines.append("</font_families>")
return '\n'.join(lines)
def getFontsWithAttributes(self, **kwds):
"""This is a general lightweight search."""
selected = []
for font in self._fonts:
OK = True
for k, v in kwds.items():
if getattr(font, k, None) != v:
OK = False
if OK:
selected.append(font)
return selected
def getFont(self, familyName, bold=False, italic=False):
"""Try to find a font matching the spec"""
for font in self._fonts:
if font.familyName == familyName:
if font.isBold == bold:
if font.isItalic == italic:
return font
raise KeyError("Cannot find font %s with bold=%s, italic=%s" % (familyName, bold, italic))
def _getCacheFileName(self):
"""Base this on the directories...same set of directories
should give same cache"""
fsEncoding = self._fsEncoding
hash = md5(b''.join(asBytes(_,enc=fsEncoding) for _ in sorted(self._dirs))).hexdigest()
from reportlab.lib.utils import get_rl_tempfile
fn = get_rl_tempfile('fonts_%s.dat' % |
kyleabeauchamp/HMCNotes | code/misc/benchmark_mts_july.py | Python | gpl-2.0 | 1,588 | 0.005038 | import time
import simtk.openmm.app as app
import simtk | .openmm as mm
from simtk import unit as u
from openmmtools import hmc_integrators, testsystems, integrators
testsystem = testsystems.DHFRExplicit(rigid_water=False, constraints=None)
system, topology, positions = testsystem.system, testsystem.topology, testsystem.positions
platform = mm.Platform.getPlatformByName('CUDA')
properties = {'CudaPrecision': "single"}
temperature = 1 * u.kelvin
timestep = 2.0 * u.femtoseconds
steps = 5000
#hmc_integrators.guess_force_groups(s | ystem, nonbonded=0, others=0, fft=0)
#groups = [(0, 1)]
# new (65.32461094856262, 13.226255578932173, 76.54083089659821, 76.54083089659821)
# old (64.60578417778015, 13.373415569455394, 77.39245121212612, 77.39245121212612)
hmc_integrators.guess_force_groups(system, nonbonded=0, others=0, fft=0)
groups = [(0, 1), (1, 4)]
# new (69.01602101325989, 12.518832400291547, 72.44694676094645, 289.7877870437858)
# old (68.56146907806396, 12.601830322746602, 72.92725881219098, 291.70903524876394)
integrator = mm.MTSIntegrator(timestep, groups)
simulation = app.Simulation(topology, system, integrator, platform=platform, platformProperties=properties)
simulation.context.setPositions(positions)
simulation.context.setVelocitiesToTemperature(temperature)
integrator.step(1)
t0 = time.time()
integrator.step(steps)
dt = time.time() - t0
outer_per_day = steps / dt * 60 * 60 * 24
outer_per_sec = steps / dt
inner_per_sec = outer_per_sec * groups[-1][1]
ns_per_day = (timestep / u.nanoseconds) * outer_per_day
dt, ns_per_day, outer_per_sec, inner_per_sec
|
lebauce/darling | tools/gdb_maloader.py | Python | gpl-3.0 | 2,497 | 0.0004 | # Copyright 2011 Shinichiro Hamaji. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY Shinichiro Hamaji ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WAR | RANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Shinichiro Hamaji OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, B | UT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
import gdb
import os
import re
import sys
def bt(demangle=True):
# Find the newest frame.
frame = gdb.selected_frame()
while True:
next = frame.newer()
if not next:
break
frame = next
if demangle:
pipe = os.popen('c++filt', 'w')
else:
pipe = sys.stdout
i = 0
while frame:
s = gdb.execute('p dumpSymbol((void*)0x%x)' % frame.pc(),
to_string=True)
m = re.match(r'.*"(.*)"$', s)
if m:
pipe.write("#%-2d %s\n" % (i, m.group(1)))
else:
sal = frame.find_sal()
lineno = ''
if sal.symtab:
lineno = 'at %s:%d' % (sal.symtab, sal.line)
else:
soname = gdb.solib_name(frame.pc())
if soname:
lineno = 'from %s' % (soname)
framename = frame.name()
if not framename:
framename = '??'
pipe.write("#%-2d 0x%016x in %s () %s\n" %
(i, frame.pc(), framename, lineno))
frame = frame.older()
i += 1
pipe.close()
|
EntPack/SilentDune-Client | silentdune_client/sdc_firewall.py | Python | lgpl-3.0 | 12,943 | 0.002627 | #
# Authors: Robert Abram <robert.abram@entpack.com>
#
# Copyright (C) 2015-2017 EntPack
# see file 'LICENSE' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import argparse
import gettext
import logging
import multiprocessing
import operator
import os
import signal
import sys
import time
from multiprocessing import Manager
from silentdune_client import modules
from silentdune_client.utils.log import setup_logging
from silentdune_client.utils.misc import node_info_dump
from silentdune_client.utils.configuration import ClientConfiguration
from silentdune_client.utils.daemon import Daemon
_logger = logging.getLogger('sd-client')
def run():
class SDCDaemon(Daemon):
# Node configuration information
_args = None
_config = None
stopProcessing = False
reload = False
t_start = time.time()
t_mod_check = 0
def __init__(self, *args, **kwargs):
self._args = kwargs.pop('args', None)
super(SDCDaemon, self).__init__(*args, **kwargs)
def startup_modules(self):
# Get the path where this file is located.
app_path = os.path.split(os.path.realpath(__file__))[0]
# Get our package path and package name
base_path, package_name = os.path.split(app_path)
# Get loadable module list
mods = modules.__load_modules__(base_path=base_path)
active_mods = [] # List of modules marked as active.
running_mods = [] # List of modules that are really running.
# Set the configuration in each module.
f | or mod in mods:
mod.set_config(self._config) # Set the configuration information in the module.
# If the | module is enabled, add it to the active_mods list.
if mod.module_enabled():
active_mods.append(mod)
else:
_logger.debug('Service: module {0} is disabled.'.format(mod.get_name()))
pmanager = Manager()
mqueue = pmanager.Queue()
# Keep the created child processes.
cprocs = dict() # Dictionary of module process handlers.
cqueues = dict() # Dictionary of module Queue objects.
mlist = list() # List of module names.
# Sort modules by the priority attribute so we can start them in the proper order.
sorted_mods = sorted(active_mods, key=operator.attrgetter('priority'))
for mod in sorted_mods:
_logger.debug('Service: starting module {0}: ({1})'.format(mod.get_name(), mod.priority))
if mod.service_startup() is False:
_logger.critical('Service: module ({0}) failed during startup.'.format(mod.get_name))
# sys.exit(1)
continue
name = mod.get_name()
running_mods.append(mod)
mlist.append(name)
cprocs[name] = None # Add a place holder for the module process.
# Setup thread for modules wanting a processing thread.
if mod.wants_processing_thread:
# _logger.debug('Initializing thread for {0}.'.format(name))
cqueues[name] = multiprocessing.Queue()
cprocs[name] = multiprocessing.Process(
target=mod.process_handler, args=(cqueues[name], mqueue, mlist))
cprocs[name].start()
# Give the firewall manager time to setup the initial rules.
if name == 'SilentDuneClientFirewallModule':
time.sleep(2)
return running_mods, pmanager, mqueue, cprocs, cqueues, mlist
def check_module_state(self, mods, mqueue, cprocs, cqueues, mlist, force=False):
"""
Check each module that has a thread and make sure it is still alive.
:param mods:
:return: False if all threads are running fine, True if failed module.
"""
# We only want to do a check once a minute.
time_t = int((time.time() - self.t_start))
if (time_t > self.t_mod_check and time_t % 60.0 == 0.0) or force:
self.t_mod_check = int((time.time() - self.t_start))
# Check to see that module process threads are still running.
_logger.debug('Service: checking module threads.')
for mod in mods:
name = mod.get_name()
_logger.debug('{0}: checking module thread...'.format(name))
if name in cprocs and cprocs[name]:
if cprocs[name].is_alive():
mod.restart_count = 0
else:
# See if we should attempt to restart this module
if mod.restart_count < 10:
_logger.critical('service: {0} module has unexpectedly stopped.'.format(name))
mod.restart_count += 1
_logger.info('service: attempting to restart module {0} (rc:{1})'.format(
name, mod.restart_count))
if mod.wants_processing_thread:
# _logger.debug('Initializing thread for {0}.'.format(name))
cqueues[name] = multiprocessing.Queue()
cprocs[name] = multiprocessing.Process(
target=mod.process_handler, args=(cqueues[name], mqueue, mlist))
cprocs[name].start()
else:
if mod.restart_count == 10:
_logger.warning(
'service: module restart limit exceeded for {0}, giving up.'.format(
name, mod.restart_count))
mod.restart_count += 1
if mod.restart_count % 60 == 0:
_logger.warning('service: module {0} is dead.'.format(name))
return mods, mqueue, cprocs, cqueues
def terminate_modules(self, mods, cprocs, cqueues):
"""
Shutdown modules.
"""
for mod in mods:
name = mod.get_name()
if cprocs[name] and cprocs[name].is_alive():
_logger.debug('Service: signalling {0} module to stop processing.'.format(name))
cqueues[name].put(modules.QueueTask(modules.TASK_STOP_PROCESSING))
cqueues[name].close()
cqueues[name].join_thread()
cprocs[name].join()
def run(self):
_logger.debug('Service: setting signal handlers.')
# Set SIGTERM signal Handler
signal.signal(signal.SIGTERM, signal_term_handler)
signal.signal(signal.SIGHUP, signal_hup_handler)
_logger.info('Starting Silent Dune firewall.')
# This loop allows for restarting and reloading the configuration after a SIGHUP signal has been received.
while True:
# Reset loop controllers
self.stopProcessing = False
self.reload = False
|
annapowellsmith/openpresc | openprescribing/dmd2/gen_models/gen_models.py | Python | mit | 1,981 | 0.000505 | import csv
def model_name(table_name):
if table_name in [
'vtm',
'vpi',
'vmp',
'vmpp',
'amp',
'ampp',
'gtin',
]:
return table_name.upper()
else:
return ''.join(tok.title() for tok in table_name.split('_'))
def quote(s):
assert '"' not in s
return '"' + s + '"'
with open('schema.csv') as f:
lines = list(csv.DictReader(f))
print('from django.db import models')
table = None
for line in lines:
if line['table'] == 'ccontent':
continue
if line['table'] != table:
table = line['table']
print()
print()
print(f'class {model_name(table)}(models.Mode | l):')
print('# class Meta:')
| print('# verbose_name = "TODO"')
print()
if line['type'] == 'retired':
continue
options = []
if line['primary_key'] == 'True':
options.append(('primary_key', 'True'))
if line['db_column']:
options.append(('db_column', quote(line['db_column'])))
if line['type'] in ['ForeignKey', 'OneToOneField']:
options.append(('to', quote(model_name(line['to']))))
options.append(('on_delete', 'models.CASCADE'))
if 'prevcd' in line['db_column'] or 'uomcd' in line['db_column']:
options.append(('related_name', quote('+')))
elif line['type'] == 'CharField':
options.append(('max_length', line['max_length']))
elif line['type'] == 'DecimalField':
options.append(('max_digits', line['max_digits']))
options.append(('decimal_places', line['decimal_places']))
if line['optional'] == 'Y':
if line['type'] != 'BooleanField' and line['primary_key'] != 'True':
options.append(('null', 'True'))
options.append(('help_text', quote(line['descr'])))
print(f' {line["field"]} = models.{line["type"]}(')
for k, v in options:
print(f' {k}={v},')
print(' )')
|
Shaunwei/shua-shua-shua | G_farthest_BST_sum.py | Python | mit | 2,599 | 0.001924 | """
given a BST, find the two nodes that sums up to a value and are the farthest apart.
"""
class Tree:
def __init__(self, val):
self.val = val
self.left = self.right = None
def __repr__(self):
return '[ %d ]' % self.val
def get_tree():
def build_tree(vals):
if not vals:
return
mid = len(vals) / 2
root = Tree(vals[mid])
root.left = build_tree(vals[:mid])
root.right = build_tree(vals[ | mid+1:])
return root
return build_tree(range(1, 8))
def print_tree(r):
stac | k = []
while stack or r:
if r:
stack.append(r)
r = r.left
else:
r = stack.pop()
print(r)
r = r.right
class Solution:
def find_farthest_nodes(self, tree, sums):
values = self.in_order(tree)
distance = 0
nodes = [0, 0]
for val in values:
if val > sums or val > sums - val:
break
if self.bs(values, sums - val):
d, n1, n2 = self.find_distance(tree, val, sums - val)
if d > distance:
distance = d
nodes = [n1, n2]
return nodes
def in_order(self, root):
values = []
stack = []
while root or stack:
if root:
stack.append(root)
root = root.left
else:
root = stack.pop()
values.append(root.val)
root = root.right
return values
def bs(self, values, target):
st, ed = 0, len(values) - 1
while st + 1 < ed:
mid = (st + ed) / 2
if values[mid] == target:
return True
elif values[mid] < target:
st = mid
else:
ed = mid
return values[st] == target or values[ed] == target
def find_distance(self, root, v1, v2):
n1, p1 = self.find_path(root, v1)
n2, p2 = self.find_path(root, v2)
i = 0
for i in xrange(min(len(p1), len(p2))):
if p1[i] != p2[i]:
break
return len(p1[i:]) + len(p2[i:]), n1, n2
def find_path(self, root, val):
path = []
while root.val != val:
path.append(root.val)
if root.val > val:
root = root.left
else:
root = root.right
return root, path
if __name__ == '__main__':
# print_tree(get_tree())
print(Solution().find_farthest_nodes(get_tree(), 7))
|
kdmurray91/scikit-bio | skbio/metadata/_repr.py | Python | bsd-3-clause | 5,981 | 0 | # ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import itertools
import numbers
import textwrap
from abc import ABCMeta, abstractmethod
from skbio._base import ElasticLines
class _MetadataReprBuilder(metaclass=ABCMeta):
"""Abstract base class for building a repr for an object containing
metadata and/or positional metadata.
Parameters
----------
obj : Type varies depending on subclass
Object to build repr for.
width : int
Maximum width of the repr.
indent : int
Number of spaces to use for indented lines.
"""
def __init__(self, obj, width, indent):
self._obj = obj
self._width = width
self._indent = ' ' * indent
@abstractmethod
def _process_header(self):
"""Used by `build` Template Method to build header for the repr"""
raise NotImplementedError
@abstractmethod
def _process_data(self):
"""Used by `build` Template Method to build data lines for the repr"""
raise NotImplementedError
def build(self):
"""Template method for building the repr"""
self._lines = ElasticLines()
self._process_header()
self._process_metadata()
self._process_positional_metadata()
self._process_stats()
self._process_data()
return self._lines.to_str()
def _process_metadata(self):
if self._obj.has_metadata():
self._lines.add_line('Metadata:')
# Python 3 doesn't allow sorting of mixed types so we can't just
# use sorted() on the metadata keys. Sort first by type then sort
# by value within each type.
for key in self._sorted_keys_grouped_by_type(self._obj.metadata):
value = self._obj.metadata[key]
self._lines.add_lines(
self._format_metadata_key_value(key, value))
def _sorted_keys_grouped_by_type(self, dict_):
"""Group keys within a dict by their type and sort within type."""
type_sorted = sorted(dict_, key=self._type_sort_key)
type_and_value_sorted = []
for _, group in itertools.groupby(type_sorted, self._type_sort_key):
type_and_value_sorted.extend(sorted(group))
return type_and_value_sorted
def _type_sort_key(self, key):
return repr(type(key))
def _format_metadata_key_value(self, key, value):
"""Format metadata key:value, wrapping across lines if necessary."""
key_fmt = self._format_key(key)
supported_type = True
if isinstance(value, str):
# extra indent of 1 so that wrapped text lines up:
#
# 'foo': 'abc def ghi
# jkl mno'
value_repr = repr(value)
extra_indent = 1
elif isinstance(value, bytes):
# extra indent of 2 so that wrapped text lines up:
#
# 'foo': b'abc def ghi
# jkl mno'
value_repr = repr(value)
extra_indent = 2
# handles any number, this includes bool
elif value is None or isinstance(value, numbers.Number):
value_repr = repr(value)
extra_indent = 0
else:
supported_type = False
if not supported_type or len(value_repr) > 140:
value_repr = str(type(value))
# extra indent of 1 so that wrapped text lines up past the bracket:
#
# 'foo': <type
# 'dict'>
extra_indent = 1
return self._wrap_text_with_indent(value_repr, key_fmt, extra_indent)
def _process_positional_metadata(self):
if self._obj.has_positional_metadata():
self._lines.add_line('Positional metadata:')
for key in | self._obj.positional_metadata.columns.values.tolist():
dtype = self._obj.positional_metadata[key].dtype
self._lines.add_lines(
self._format_positional_metadata_column(key, dtype))
def _format_positional_metadata_column(self, key, dtype):
key_fmt = self._format_key(key)
dtype_fmt = '<dtype: %s>' % str(dtype)
return self._wrap_text_with_indent(dtype_fmt, key_fmt, 1)
def _format_key(self, key):
"""Format | metadata key.
Includes initial indent and trailing colon and space:
<indent>'foo':<space>
"""
key_fmt = self._indent + repr(key)
supported_types = (str, bytes, numbers.Number, type(None))
if len(key_fmt) > (self._width / 2) or not isinstance(key,
supported_types):
key_fmt = self._indent + str(type(key))
return '%s: ' % key_fmt
def _wrap_text_with_indent(self, text, initial_text, extra_indent):
"""Wrap text across lines with an initial indentation.
For example:
'foo': 'abc def
ghi jkl
mno pqr'
<indent>'foo':<space> is `initial_text`. `extra_indent` is 1. Wrapped
lines are indented such that they line up with the start of the
previous line of wrapped text.
"""
return textwrap.wrap(
text, width=self._width, expand_tabs=False,
initial_indent=initial_text,
subsequent_indent=' ' * (len(initial_text) + extra_indent))
def _process_stats(self):
self._lines.add_line('Stats:')
for label, value in self._obj._repr_stats():
self._lines.add_line('%s%s: %s' % (self._indent, label, value))
self._lines.add_separator()
|
eth-cscs/production | easybuild/easyblocks/craytoolchain.py | Python | gpl-3.0 | 3,472 | 0.002592 | ##
# Copyright 2015-2021 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under | the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See t | he
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for installing Cray toolchains, implemented as an easyblock
@author: Kenneth Hoste (Ghent University)
@author: Guilherme Peretti Pezzi (CSCS)
@author: Petar Forai (IMP/IMBA)
"""
from easybuild.easyblocks.generic.bundle import Bundle
from easybuild.tools.build_log import EasyBuildError
KNOWN_PRGENVS = ['PrgEnv-cray', 'PrgEnv-gnu', 'PrgEnv-intel', 'PrgEnv-nvidia', 'PrgEnv-pgi']
class CrayToolchain(Bundle):
"""
Compiler toolchain: generate module file only, nothing to build/install
"""
def prepare_step(self, *args, **kwargs):
"""Prepare build environment (skip loaded of dependencies)."""
kwargs['load_tc_deps_modules'] = False
super(CrayToolchain, self).prepare_step(*args, **kwargs)
def make_module_dep(self):
"""
Generate load/swap statements for dependencies in the module file
"""
prgenv_mod = None
# collect 'swap' statement for dependencies (except PrgEnv)
swap_deps = []
for dep in self.toolchain.dependencies:
mod_name = dep['full_mod_name']
# determine versionless module name, e.g. 'fftw/3.3.4.1' => 'fftw'
dep_name = '/'.join(mod_name.split('/')[:-1])
if mod_name.startswith('PrgEnv'):
prgenv_mod = mod_name
else:
swap_deps.append(self.module_generator.swap_module(dep_name, mod_name).lstrip())
self.log.debug("Swap statements for dependencies of %s: %s", self.full_mod_name, swap_deps)
if prgenv_mod is None:
raise EasyBuildError("Could not find a PrgEnv-* module listed as dependency: %s",
self.toolchain.dependencies)
# unload statements for other PrgEnv modules
prgenv_unloads = ['']
for prgenv in [prgenv for prgenv in KNOWN_PRGENVS if not prgenv_mod.startswith(prgenv)]:
is_loaded_guard = self.module_generator.is_loaded(prgenv)
unload_stmt = self.module_generator.unload_module(prgenv).strip()
prgenv_unloads.append(self.module_generator.conditional_statement(is_loaded_guard, unload_stmt))
# load statement for selected PrgEnv module (only when not loaded yet)
prgenv_load = self.module_generator.load_module(prgenv_mod, recursive_unload=False)
txt = '\n'.join(prgenv_unloads + [prgenv_load] + swap_deps)
return txt
|
devilry/trix2 | trix/project/develop/settings/develop.py | Python | bsd-3-clause | 57 | 0 | from .common impor | t * # noqa
LANGUAGE_CODE | = 'nb'
|
pereerro/schooly | school_base/school_groups.py | Python | agpl-3.0 | 1,792 | 0.00558 | # -*- coding: utf-8 -*-
##############################################################################
#
# School App for Odoo
# Copyright (C) 2015
# Pere Ramon Erro Mas <pereerro@tecnoba.com> All Rights Reserved.
#
# This file is a part of School App for Odoo
#
# School App for Odoo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# School App for Odoo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields, orm
fro | m datetime import datetime, date, timedelta
from openerp.tools.translate import _
class groups_part | icipation(osv.osv):
_name = 'groups.participation'
_inherit = 'groups.participation'
def name_get(self, cr, uid, ids, context = None):
res = []
for item in self.browse(cr, uid, ids):
session_name = item.session_id and item.session_id.name or ''
res.append((item['id'],'%s - %s' % (session_name, item.participant.name)))
return res
_columns = {
'session_id' : fields.many2one('school.session', 'Session', ondelete = 'cascade', select=1,),
}
groups_participation()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
KarlGong/ptest | ptest/config.py | Python | apache-2.0 | 12,010 | 0.005246 | import os
import platform
import re
from optparse import OptionParser, OptionGroup
from typing import List
from . import __version__
_properties = {}
_options = {}
def get_option(option: str) -> str:
try:
return _options[option]
except KeyError:
return None
def get_property(key: str, default: str = None) -> str:
"""
Get property value.
If no property found, default value will be returned.
"""
try:
return _properties[key]
except KeyError:
return default
def get_int_property(key: str, default: int = None) -> int:
"""
Get property value and convert it to int.
If no property found, default value will be returned.
"""
try:
return int(_properties[key])
except KeyError:
return default
def get_float_property(key: str, default: float = None) -> float:
"""
Get property value and convert it to float.
If no property found, default value will be returned.
"""
try:
return float(_properties[key])
except KeyError:
return default
def get_boolean_property(key: str, default: bool = None) -> bool:
"""
Get property value and convert it to boolean.
If no property found, default value will be returned.
"""
try:
value = _properties[key]
if value.lower() == "true":
return True
elif value.lower() == "false":
return False
raise ValueError("could not convert string to boolean: %s" % value)
except KeyError:
return default
def get_list_property(key: str, default: List[str] = None, sep: str = ",") -> List[str]:
"""
Get property value and convert it to list.
If no property found, default value will be returned.
"""
try:
return _properties[key].split(sep)
except KeyError:
return default
def load(args):
option_args, property_args = __load_args(args)
_parse_options(option_args)
_load_properties_from_file()
_parse_properties(property_args)
def _load_properties_from_file():
property_file = get_option("property_file")
if property_file is not None:
file_object = open(property_file, encoding="utf-8")
try:
property_regex_str = r"^([^;#].*?)=(.*?)$"
property_regex | = re.compile(property_re | gex_str)
for line in file_object:
property_match = property_regex.search(line.strip())
if property_match:
_properties[property_match.group(1)] = property_match.group(2)
finally:
file_object.close()
def __load_args(args):
property_args = []
option_args = []
property_regex_str = r"^-D(.*?)=(.*?)$" # the format of property definition must be -D<key>=<value>
property_regex = re.compile(property_regex_str)
for arg in args:
property_match = property_regex.search(arg)
if property_match:
property_args.append(arg)
else:
option_args.append(arg)
return option_args, property_args
def _parse_properties(property_args):
property_regex_str = r"^-D(.*?)=(.*?)$" # the format of property definition must be -D<key>=<value>
property_regex = re.compile(property_regex_str)
for arg in property_args:
property_match = property_regex.search(arg)
_properties[property_match.group(1)] = property_match.group(2)
def _parse_options(option_args):
parser = OptionParser(usage="ptest [options] [properties]", version="ptest %s for Python %s" % (__version__, platform.python_version()),
description="ptest is a light test framework for Python.")
# path and property
parser.add_option("-w", "--workspace", action="store", dest="workspace", default=".", metavar="dir",
help="Specify the workspace dir (relative to working directory). Default is current working directory.")
parser.add_option("-P", "--python-paths", action="store", dest="python_paths", default=None, metavar="paths",
help="Specify the additional locations (relative to workspace) where to search test libraries from when they are imported. "
"Multiple paths can be given by separating them with a comma.")
parser.add_option("-p", "--property-file", action="store", dest="property_file", default=None, metavar="file",
help="Specify the .ini property file (relative to workspace). "
"The properties in property file will be overwritten by user defined properties in cmd line. "
"Get property via get_property() in module ptest.config.")
# running
parser.add_option("-R", "--run-failed", action="store", dest="run_failed", default=None, metavar="file",
help="Specify the xunit result xml path (relative to workspace) and run the failed/skipped test cases in it.")
parser.add_option("-t", "--targets", action="store", dest="test_targets", default=None, metavar="targets",
help="Specify the path of test targets, separated by comma. Test target can be package/module/class/method. "
"The target path format is: package[.module[.class[.method]]] "
"NOTE: ptest ONLY searches modules under --workspace, --python-paths and sys.path")
parser.add_option("-f", "--filter", action="store", dest="test_filter", default=None, metavar="class",
help="Specify the path of test filter class, select test cases to run by the specified filter. "
"The test filter class should implement class TestFilter in ptest.testfilter "
"The filter path format is: package.module.class "
"NOTE: ptest ONLY searches modules under --workspace, --python-paths and sys.path")
parser.add_option("-i", "--include-tags", action="store", dest="include_tags", default=None, metavar="tags",
help="Select test cases to run by tags, separated by comma.")
parser.add_option("-e", "--exclude-tags", action="store", dest="exclude_tags", default=None, metavar="tags",
help="Select test cases not to run by tags, separated by comma. These test cases are not run even if included with --include-tags.")
parser.add_option("-g", "--include-groups", action="store", dest="include_groups", default=None, metavar="groups",
help="Select test cases to run by groups, separated by comma.")
parser.add_option("-n", "--test-executor-number", action="store", dest="test_executor_number", metavar="int",
default=1, help="Specify the number of test executors. Default value is 1.")
# output
parser.add_option("-o", "--output-dir", action="store", dest="output_dir", default="test-output", metavar="dir",
help="Specify the output dir (relative to workspace).")
parser.add_option("-r", "--report-dir", action="store", dest="report_dir", default="html-report", metavar="dir",
help="Specify the html report dir (relative to output dir).")
parser.add_option("-x", "--xunit-xml", action="store", dest="xunit_xml", default="xunit-results.xml",
metavar="file", help="Specify the xunit result xml path (relative to output dir).")
# miscellaneous
parser.add_option("-l", "--listeners", action="store", dest="test_listeners", default=None, metavar="class",
help="Specify the path of test listener classes, separated by comma. "
"The listener class should implement class TestListener in ptest.plistener "
"The listener path format is: package.module.class "
"NOTE: 1. ptest ONLY searches modules under --workspace, --python-paths and sys.path "
"2. The listener class must be thread safe if you set -n(--test-executor-number) greater than 1.")
parser.add_option("-v", "--verbose", action="store_true", |
eunchong/build | third_party/sqlalchemy_migrate_0_7_1/migrate/tests/versioning/test_genmodel.py | Python | bsd-3-clause | 7,057 | 0.008219 | # -*- coding: utf-8 -*-
import os
import sqlalchemy
from sqlalchemy import *
from nose.tools import eq_
from migrate.versioning import genmodel, schemadiff
from migrate.changeset import schema, SQLA_06
from migrate.tests import fixture
class TestSchemaDiff(fixture.DB):
table_name = 'tmp_schemadiff'
level = fixture.DB.CONNECT
def _setup(self, url):
super(TestSchemaDiff, self)._setup(url)
self.meta = MetaData(self.engine, reflect=True)
self.meta.drop_all() # in case junk tables are lying around in the test database
self.meta = MetaData(self.engine, reflect=True) # needed if we just deleted some tables
self.table = Table(self.table_name, self.meta,
Column('id',Integer(), primary_key=True),
Column('name', UnicodeText()),
Column('data', UnicodeText()),
)
def _teardown(self):
if self.table.exists():
self.meta = MetaData(self.engine, reflect=True)
self.meta.drop_all()
super(TestSchemaDiff, self)._teardown()
def _applyLatestModel(self):
diff = schemadiff.getDiffOfModelAgainstDatabase(self.meta, self.engine, excludeTables=['migrate_version'])
genmodel.ModelGenerator(diff,self.engine).applyModel()
@fixture.usedb()
def test_functional(self):
def assertDiff(isDiff, tablesMissingInDatabase, tablesMissingInModel, tablesWithDiff):
diff = schemadiff.getDiffOfModelAgainstDatabase(self.meta, self.engine, excludeTables=['migrate_version'])
eq_(bool(diff), isDiff)
eq_(
(diff.tables_missing_from_B,
diff.tables_missing_from_A,
diff.tables_different.keys()),
(tablesMissingInDatabase,
tablesMissingInModel,
tablesWithDiff)
)
# Model is defined but database is empty.
assertDiff(True, [self.table_name], [], [])
# Check Python upgrade and downgrade of database from updated model.
diff = schemadiff.getDiffOfModelAgainstDatabase(self.meta, self.engine, excludeTables=['migrate_version'])
decls, upgradeCommands, downgradeCommands = genmodel.ModelGenerator(diff,self.engine).toUpgradeDowngradePython()
self.assertEqualsIgnoreWhitespace(decls, '''
from migrate.changeset import schema
meta = MetaData()
tmp_schemadiff = Table('tmp_schemadiff', meta,
Column('id', Integer(), primary_key=True, nullable=False),
Column('name', UnicodeText(length=None)),
Column('data', UnicodeText(length=None)),
)
''')
self.assertEqualsIgnoreWhitespace(upgradeCommands,
'''meta.bind = migrate_engine
tmp_schemadiff.create()''')
self.assertEqualsIgnoreWhitespace(downgradeCommands,
'''meta.bind = migrate_engine
tmp_schemadiff.drop()''')
# Create table in database, now model should match database.
self._applyLatestModel()
assertDiff(False, [], [], [])
# Check Python code gen from database.
diff = schemadiff.getDiffOfModelAgainstDatabase(MetaData(), self.engine, excludeTables=['migrate_version'])
src = genmodel.ModelGenerator(diff,self.engine).toPython()
exec src in locals()
c1 = Table('tmp_schemadiff', self.meta, autoload=True).c
c2 = tmp_schemadiff.c
self.compare_columns_equal(c1, c2, ['type'])
# TODO: get rid of ignoring type
if not self.engine.name == 'oracle':
# Add data, later we'll make sure it's still present.
result = self.engine.execute(self.table.insert(), id=1, name=u'mydata')
if SQLA_06:
dataId = result.inserted_primary_key[0]
else:
dataId = result.last_inserted_ids()[0]
# Modify table in model (by removing it and adding it back to model) -- drop column data and add column data2.
self.meta.remove(self.table)
self.table = Table(self.table_name,self.meta,
Column('id',Integer(),primary_key=True),
Column('name',UnicodeText(length=None)),
Column('data2',Integer(),nullable=True),
)
assertDiff(True, [], [], [self.table_name])
# Apply latest model changes and find no more diffs.
self._applyLatestModel()
assertDiff(False, [], [], [])
if not self.engine.name == 'oracle':
# Make sure data is still present.
result = self.engine.execute(self.table.select(self.table.c.id==dataId))
rows = result.fetchall()
eq_(len(rows), 1)
eq_(rows[0].name, 'mydata')
# Add data, later we'll make sure it's still present.
result = self.engine.execute(self.table.insert(), id=2, name=u'mydata2', data2=123)
if SQLA_06:
dataId2 = result.inserted_primary_key[0]
else:
dataId2 = result.last_inserted_ids()[0]
# Change column type in model.
self.meta.remove(self.table)
self.table = Table(self.table_name,self.meta,
Column('id',Integer(),primary_key=True),
Column('name',UnicodeText(length=None)),
Column('data2',String(255),nullable=True),
)
# XXX test type diff
return
assertDiff(True, [], [], [self.table_name])
# Apply latest model changes and find no more diffs.
self._applyLatestModel()
as | sertDiff(False, [], [], [])
if not self.engine.name == 'oracle':
# Make sure data is still present.
result = self.engine.execute(self.table.select(self. | table.c.id==dataId2))
rows = result.fetchall()
self.assertEquals(len(rows), 1)
self.assertEquals(rows[0].name, 'mydata2')
self.assertEquals(rows[0].data2, '123')
# Delete data, since we're about to make a required column.
# Not even using sqlalchemy.PassiveDefault helps because we're doing explicit column select.
self.engine.execute(self.table.delete(), id=dataId)
if not self.engine.name == 'firebird':
# Change column nullable in model.
self.meta.remove(self.table)
self.table = Table(self.table_name,self.meta,
Column('id',Integer(),primary_key=True),
Column('name',UnicodeText(length=None)),
Column('data2',String(255),nullable=False),
)
assertDiff(True, [], [], [self.table_name]) # TODO test nullable diff
# Apply latest model changes and find no more diffs.
self._applyLatestModel()
assertDiff(False, [], [], [])
# Remove table from model.
self.meta.remove(self.table)
assertDiff(True, [], [self.table_name], [])
|
aalto-speech/flatcat | flatcat/__init__.py | Python | bsd-2-clause | 1,193 | 0.006706 | #!/usr/bin/env python
"""
Morfessor 2.0 FlatCat - Python implementation of
the FlatCat variant of the Morfessor method
"""
import logging
__all__ = ['MorfessorException', 'ArgumentException', 'FlatcatIO',
'FlatcatModel', 'flatcat_main', 'get_flatcat_argparser',
'MorphUsageProperties', 'HeuristicPostprocessor']
__version__ = '1.0.8'
__ | author__ = 'Stig-Arne Gronroos'
__author_email__ = "morfessor@cis.hut.fi"
show_progress_bar = True
_logger = logging.getLogger(__name__)
def get_version(numeric=False):
if numeric:
return __version__
return 'FlatCat {}'.format(__version__)
# The public api import | s need to be at the end of the file,
# so that the package global names are available to the modules
# when they are imported.
from .flatcat import FlatcatModel, AbstractSegmenter
from .flatcat import FlatcatAnnotatedCorpusEncoding
from .categorizationscheme import MorphUsageProperties, HeuristicPostprocessor
from .categorizationscheme import WORD_BOUNDARY, CategorizedMorph
from .cmd import flatcat_main, get_flatcat_argparser
from .exception import MorfessorException, ArgumentException
from .io import FlatcatIO
from .utils import _progress
|
chromium/chromium | third_party/blink/tools/blinkpy/w3c/wpt_github_unittest.py | Python | bsd-3-clause | 15,485 | 0.000452 | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import base64
import json
import unittest
from blinkpy.common.host_mock import MockHost
from blinkpy.w3c.chromium_commit_mock import MockChromiumCommit
from blinkpy.w3c.common import EXPORT_PR_LABEL
from blinkpy.w3c.wpt_github import MAX_PR_HISTORY_WINDOW, GitHubError, MergeError, PullRequest, WPTGitHub
class WPTGitHubTest(unittest.TestCase):
def generate_pr_item(self, pr_number, state='closed'):
return {
'title': 'Foobar',
'number': pr_number,
'body': 'description',
'state': state,
'labels': [{
'name': EXPORT_PR_LABEL
}]
}
def setUp(self):
self.wpt_github = WPTGitHub(
MockHost(), user='rutabaga', token='decafbad')
def test_init(self):
self.assertEqual(self.wpt_github.user, 'rutabaga')
self.assertEqual(self.wpt_github.token, 'decafbad')
def test_constructor_throws_on_pr_history_window_too_large(self):
with self.assertRaises(ValueError):
self.wpt_github = WPTGitHub(
MockHost(),
user='rutabaga',
token='decafbad',
pr_history_window=MAX_PR_HISTORY_WINDOW + 1)
def test_auth_token(self):
self.assertEqual(
self.wpt_github.auth_token(),
base64.encodestring(
'rutabaga:decafbad'.encode('utf-8')).strip().decode('utf-8'))
def test_extract_link_next(self):
link_header = (
'<https://api.github.com/user/repos?page=1&per_page=100>; rel="first", '
'<https://api.github.com/user/repos?page=2&per_page=100>; rel="prev", '
'<https://api.github.com/user/repos?page=4&per_page=100>; rel="next", '
'<https://api.github.com/user/repos?page=50&per_page=100>; rel="last"'
)
self.assertEqual(
self.wpt_github.extract_link_next(link_header),
'/user/repos?page=4&per_page=100')
def test_extract_link_next_not_found(self):
self.assertIsNone(self.wpt_github.extract_link_next(''))
def test_recent_failing_chromium_exports_single_page(s | elf):
self.wpt_github = WPTGitHub(
MockHost(), user='rutabaga', token='decafbad', pr_history_window=1)
self.wpt_github.host.web.responses = [
{
'status_code':
200,
'headers': {
'Link': ''
},
'body':
| json.dumps({
'incomplete_results': False,
'items': [self.generate_pr_item(1)]
})
},
]
self.assertEqual(
len(self.wpt_github.recent_failing_chromium_exports()), 1)
def test_recent_failing_chromium_exports_all_pages(self):
self.wpt_github = WPTGitHub(MockHost(),
user='rutabaga',
token='decafbad',
pr_history_window=1)
self.wpt_github.host.web.responses = [
{
'status_code':
200,
'headers': {
'Link':
'<https://api.github.com/resources?page=2>; rel="next"'
},
'body':
json.dumps({
'incomplete_results': False,
'items': [self.generate_pr_item(1)]
})
},
{
'status_code':
200,
'headers': {
'Link': ''
},
'body':
json.dumps({
'incomplete_results': False,
'items': [self.generate_pr_item(2)]
})
},
]
self.assertEqual(
len(self.wpt_github.recent_failing_chromium_exports()), 2)
def test_recent_failing_chromium_exports_throws_github_error(self):
self.wpt_github.host.web.responses = [
{
'status_code': 204
},
]
with self.assertRaises(GitHubError):
self.wpt_github.recent_failing_chromium_exports()
def test_all_pull_requests_single_page(self):
self.wpt_github = WPTGitHub(
MockHost(), user='rutabaga', token='decafbad', pr_history_window=1)
self.wpt_github.host.web.responses = [
{
'status_code':
200,
'headers': {
'Link': ''
},
'body':
json.dumps({
'incomplete_results': False,
'items': [self.generate_pr_item(1)]
})
},
]
self.assertEqual(len(self.wpt_github.all_pull_requests()), 1)
def test_all_pull_requests_all_pages(self):
self.wpt_github = WPTGitHub(
MockHost(), user='rutabaga', token='decafbad', pr_history_window=2)
self.wpt_github.host.web.responses = [
{
'status_code':
200,
'headers': {
'Link':
'<https://api.github.com/resources?page=2>; rel="next"'
},
'body':
json.dumps({
'incomplete_results': False,
'items': [self.generate_pr_item(1)]
})
},
{
'status_code':
200,
'headers': {
'Link': ''
},
'body':
json.dumps({
'incomplete_results': False,
'items': [self.generate_pr_item(2)]
})
},
]
self.assertEqual(len(self.wpt_github.all_pull_requests()), 2)
def test_all_pull_requests_reaches_pr_history_window(self):
self.wpt_github = WPTGitHub(
MockHost(), user='rutabaga', token='decafbad', pr_history_window=2)
self.wpt_github.host.web.responses = [
{
'status_code':
200,
'headers': {
'Link':
'<https://api.github.com/resources?page=2>; rel="next"'
},
'body':
json.dumps({
'incomplete_results': False,
'items': [self.generate_pr_item(1)]
})
},
{
'status_code':
200,
'headers': {
'Link': ''
},
'body':
json.dumps({
'incomplete_results':
False,
'items':
[self.generate_pr_item(2),
self.generate_pr_item(3)]
})
},
]
self.assertEqual(len(self.wpt_github.all_pull_requests()), 2)
def test_all_pull_requests_throws_github_error_on_non_200(self):
self.wpt_github.host.web.responses = [
{
'status_code': 204
},
]
with self.assertRaises(GitHubError):
self.wpt_github.all_pull_requests()
def test_all_pull_requests_throws_github_error_when_incomplete(self):
self.wpt_github = WPTGitHub(
MockHost(), user='rutabaga', token='decafbad', pr_history_window=1)
self.wpt_github.host.web.responses = [
{
'status_code':
200,
'body':
json.dumps({
'incomplete_results': True,
'items': [self.generate_pr_item(1)]
})
},
]
with self.assertRaises(GitHubError):
self.wpt_github.all_pull_requests()
def test_all_pull_requests_throws_github_error_when_too_few_prs(self):
s |
karstenw/nodebox-pyobjc | libs/pathmatics/pathmatics.py | Python | mit | 2,790 | 0.007527 | from math import sqrt, pow
def linepoint(t, x0, y0, x1, y1):
"""Returns coordinates for point at t on the line.
Calculates the coordinates of x and y for a point
at t on a straight line.
The t parameter is a number between 0.0 and 1.0,
x0 and y0 define the starting point of the line,
x1 and y1 the ending point of the line,
"""
out_x = x0 + t * (x1-x0)
out_y = y0 + t * (y1-y0)
return (out_x, out_y)
def linelength(x0, y0, x1, y1):
"""Returns the length of the line."""
a = pow(abs(x0 - x1), 2)
b = pow(abs(y0 - y1), 2)
return sqrt(a+b)
def curvepoint(t, x0, y0, x1, y1, x2, y2, x3, y3, handles=False):
"""Returns coordinates for point at t on the spline.
Calculates the coordinates of x and y for a point
at t on the cu | bic bezier spline, and its control points,
based on the de Casteljau interpolation algorithm.
The t parameter is a number between 0.0 and 1.0,
x0 and y0 define the starting point of the spline,
x1 and y1 its control point,
x3 and y3 the e | nding point of the spline,
x2 and y2 its control point.
If the handles parameter is set,
returns not only the point at t,
but the modified control points of p0 and p3
should this point split the path as well.
"""
mint = 1 - t
x01 = x0 * mint + x1 * t
y01 = y0 * mint + y1 * t
x12 = x1 * mint + x2 * t
y12 = y1 * mint + y2 * t
x23 = x2 * mint + x3 * t
y23 = y2 * mint + y3 * t
out_c1x = x01 * mint + x12 * t
out_c1y = y01 * mint + y12 * t
out_c2x = x12 * mint + x23 * t
out_c2y = y12 * mint + y23 * t
out_x = out_c1x * mint + out_c2x * t
out_y = out_c1y * mint + out_c2y * t
if not handles:
return (out_x, out_y, out_c1x, out_c1y, out_c2x, out_c2y)
else:
return (out_x, out_y, out_c1x, out_c1y, out_c2x, out_c2y, x01, y01, x23, y23)
def curvelength(x0, y0, x1, y1, x2, y2, x3, y3, n=20):
"""Returns the length of the spline.
Integrates the estimated length of the cubic bezier spline
defined by x0, y0, ... x3, y3, by adding the lengths of
lineair lines between points at t.
The number of points is defined by n
(n=10 would add the lengths of lines between 0.0 and 0.1,
between 0.1 and 0.2, and so on).
The default n=20 is fine for most cases, usually
resulting in a deviation of less than 0.01.
"""
length = 0
xi = x0
yi = y0
for i in range(n):
t = 1.0 * (i+1) / n
pt_x, pt_y, pt_c1x, pt_c1y, pt_c2x, pt_c2y = \
curvepoint(t, x0, y0, x1, y1, x2, y2, x3, y3)
c = sqrt(pow(abs(xi-pt_x),2) + pow(abs(yi-pt_y),2))
length += c
xi = pt_x
yi = pt_y
return length
|
gragas/simpletcp | examples/echoreverse/client.py | Python | apache-2.0 | 664 | 0 | from simpletcp.clientsocket import ClientSocket
s1 = ClientSocket("localhost", 5000)
response = s1.send | ("Hello, World!")
s2 = ClientSocket("localhost", 5000, single_use=False)
r1 = s2.send("Hello for the first time...")
r2 = s2.send("...and hello for the last!")
s2.close()
# Display the correspondence
print("s1 sent\t\tHello, World!")
print("s1 received\t\t{}".format( | response.decode("UTF-8")))
print("-------------------------------------------------")
print("s2 sent\t\tHello for the first time....")
print("s2 received\t\t{}".format(r1.decode("UTF-8")))
print("s2 sent\t\t...and hello for the last!.")
print("s2 received\t\t{}".format(r2.decode("UTF-8")))
|
wpoely86/easybuild-framework | easybuild/tools/repository/svnrepo.py | Python | gpl-2.0 | 6,109 | 0.002783 | # #
# Copyright 2009-2016 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
# #
"""
Repository tools
Svn repository
:author: Stijn De Weirdt (Ghent University)
:author: Dries Verdegem (Ghent University)
:author: Kenneth Hoste (Ghent University)
:author: Pieter De Baets (Ghent University)
:author: Jens Timmerman (Ghent University)
:author: Toon Willems (Ghent University)
:author: Ward Poelmans (Ghent University)
:author: Fotis Georgatos (Uni.Lu, NTUA)
"""
import getpass
import os
import socket
import tempfile
import time
from vsc.utils import fancylogger
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.filetools import rmtree2
from easybuild.tools.repository.filerepo import FileRepository
from easybuild.tools.utilities import only_if_module_is_available
_log = fancylogger.getLogger('svnrepo', fname=False)
# optional Python packages, these might be missing
# failing imports are just ignored
# PySVN
try:
import pysvn # @UnusedImport
from pysvn import ClientError # IGNORE:E0611 pysvn fails to recognize ClientError is available
HAVE_PYSVN = True
except ImportError:
_log.debug("Failed to import pysvn module")
HAVE_PYSVN = False
class SvnRepository(FileRepository):
"""
Class for svn repositories
"""
DESCRIPTION = ("An SVN repository. The 1st argument contains the "
"subversion repository location, this can be a directory or an URL. "
"The 2nd argument is a path inside the repository where to save the files.")
USABLE = HAVE_PYSVN
@only_if_module_is_available('pysvn', url='http://pysvn.tigris.org/')
def __init__(self, *args):
"""
Set self.client to None. Real logic is in setup_repo and create_working_copy
"""
self.client = None
FileRepository.__init__(self, *args)
def setup_repo(self):
"""
Set up SVN repository.
"""
self.repo = os.path.join(self.repo, self.subdir)
# try to connect to the repository
self.log.debug("Try to connect to repository %s" % self.repo)
try:
self.client = pysvn.Client()
self.client.exception_style = 0
except ClientError:
raise EasyBuildError("Svn Client initialization failed.")
try:
if not self.client.is_url(self.repo):
raise EasyBuildError("Provided repository %s is not a valid svn url", self.repo)
except ClientError:
raise EasyBuildError("Can't connect to svn repository %s", self.repo)
def create_working_copy(self):
"""
Create SVN working copy.
"""
self.wc = tempfile.mkdtemp(prefix='svn-wc-')
# check if tmppath exists
# this will trigger an error if it does not exist
try:
self.client.info2(self.repo, recurse=False)
except ClientError:
raise EasyBuildError("Getting info from %s failed.", self.wc)
try | :
res = self.client.update(self.wc)
self.log.debug("Updated to revision %s in %s" % (res, self.wc))
except C | lientError:
raise EasyBuildError("Update in wc %s went wrong", self.wc)
if len(res) == 0:
raise EasyBuildError("Update returned empy list (working copy: %s)", self.wc)
if res[0].number == -1:
# revision number of update is -1
# means nothing has been checked out
try:
res = self.client.checkout(self.repo, self.wc)
self.log.debug("Checked out revision %s in %s" % (res.number, self.wc))
except ClientError, err:
raise EasyBuildError("Checkout of path / in working copy %s went wrong: %s", self.wc, err)
def add_easyconfig(self, cfg, name, version, stats, append):
"""
Add easyconfig to SVN repository.
"""
dest = FileRepository.add_easyconfig(self, cfg, name, version, stats, append)
self.log.debug("destination = %s" % dest)
if dest:
self.log.debug("destination status: %s" % self.client.status(dest))
if self.client and not self.client.status(dest)[0].is_versioned:
# add it to version control
self.log.debug("Going to add %s (working copy: %s, cwd %s)" % (dest, self.wc, os.getcwd()))
self.client.add(dest)
def commit(self, msg=None):
"""
Commit working copy to SVN repository
"""
tup = (socket.gethostname(), time.strftime("%Y-%m-%d_%H-%M-%S"), getpass.getuser(), msg)
completemsg = "EasyBuild-commit from %s (time: %s, user: %s) \n%s" % tup
try:
self.client.checkin(self.wc, completemsg, recurse=True)
except ClientError, err:
raise EasyBuildError("Commit from working copy %s (msg: %s) failed: %s", self.wc, msg, err)
def cleanup(self):
"""
Clean up SVN working copy.
"""
try:
rmtree2(self.wc)
except OSError, err:
raise EasyBuildError("Can't remove working copy %s: %s", self.wc, err)
|
MISP/MISP | tools/misp_retention.py | Python | agpl-3.0 | 3,874 | 0.003872 | #!/usr/bin/env python3
#
# This script requires the MISP retention taxonomy is installed and enabled
# See https://github.com/MISP/misp-taxonomies/tree/master/retention/retention
from pymisp import ExpandedPyMISP, MISPEvent
from datetime import datetime
from dateutil.relativedelta import relativedelta
import re
from keys import misp_url, misp_key
# pip install pymisp python-dateutil
class misphelper(object):
"""Helper class around a MISP object."""
taxonomyId = None
expiredTag = "retention:expired"
def __init__(self):
self.misp = ExpandedPyMISP(url=misp_url,
key=misp_key,
ssl=True)
self.taxonomyId = self.searchTaxonomy()
def searchTaxonomy(self):
res = self.misp.taxonomies()
for tax in res:
if (tax["Taxonomy"]["namespace"] == "retention" and tax["Taxonomy"]["enabled"]):
return tax["Taxonomy"]["id"]
raise Exception("Could not find the 'retention' Taxonomy in MISP. Please enable this first!")
def processEvent(self, event):
mevent = MISPEvent()
mevent.from_dict(Event=event)
changed = False
for attr in mevent.attributes:
if (attr["type"] == "ip-dst" or attr["type"] == "ip-src") and attr["to_ids"]:
print("Removing IDS flag in event '{}' on attr '{}'".format(mevent.id, attr["value"]))
changed = True
attr["to_ids"] = False
self.misp.update_attribute(attr)
for obj in mevent.objects:
for attr in obj.Attribute:
if (attr["type"] == "ip-dst" or attr["type"] == "ip-src") and attr["to_ids"]:
print("Removing IDS flag in event '{}' on attr '{}'".format(mevent.id, attr["value"]))
changed = True
attr["to_ids"] = False
self.misp.update_attribute(attr)
self.misp.tag(mevent, self.expiredTag, True)
if changed:
self.misp.update_event(mevent.id, mevent)
self.misp.publish(mevent)
def findEventsAfterRetention(self, events, retention):
for event in events:
ts = datetime.strptime(event["Event"]["date"], "%Y-%m-%d")
now = datetime.utcnow()
if retention[1] == "d":
delta = relativedelta(days=int(retention[0]))
elif retention[1] == "w":
delta = relativedelta(weeks=int(retention[0]))
elif retention[1] == "m":
delta = relativedelta(months=int(retention[0]))
elif retention[1] == "y":
delta = relativedelta(years=int(retention[0]))
if ts < (now - delta):
self.processEvent(event["Event"])
def queryRetentionTags(self):
res = self.misp.get_taxonomy(self.taxonomyId)
for tag in res['entries']:
m = re.match(r"^retention:([0-9]+)([d,w,m,y])$", tag["tag"])
if m:
tagSearch = self.misp.build_complex_query(and_parameters = [tag["tag"]], not_parameters = [self.expiredTag])
events = self.misp.search(published=True, tags=tagSearch)
self.findEventsAfterRetention(events, (m.group(1), m.group(2)))
else:
# set expiredTag to hidden if it was accidentally enabled by "enable all"
if tag["tag"] == self.expiredTag:
| if tag["existing_tag"]["Tag"]["hide_tag"] is False:
| tag["existing_tag"]["Tag"]["hide_tag"] = True
self.misp.update_tag(tag["existing_tag"]["Tag"])
else:
raise Exception("Could not parse retention time/unit from tag: '{}'.".format(tag["tag"]))
if __name__ == "__main__":
misp = misphelper()
misp.queryRetentionTags()
|
apeng2012/apeng-kicad | tools/markcomp/markcomp.py | Python | mit | 4,277 | 0.002806 | # coding: utf-8
import argparse
from pyparsing import OneOrMore, nestedExpr
from progressbar import ProgressBar, Percentage, Bar
from time import time
from collections import defaultdict
import platform
last_val = -1
checked = False
mark_val = "0.1u"
class KiCAD_PCB:
def __init__(self, filename):
self.ast = self._parsePCB(filename)
mark_comps = self.generate_mark_components_coordinate(self.ast)
self.mark_pcb(mark_comps, filename)
def mark_pcb(self, mark_comps, filename):
coordinates, drawing_num = mark_comps
with open(filename, 'r') as f:
data = f.read()
cnt = len(coordinates)
old_drawings = "(drawings %d)" % (drawing_num)
now_drawings = "(drawings %d)" % (drawing_num + cnt + 1)
data = data.replace(old_drawings, now_drawings)
data = data.replace("\n)", "\n")
for x,y in coordinates:
data += " (gr_circle(center {x} {y}) (end {xx} {y}) (layer F.Fab) (width 0.20066))\n".format(
x=x, y=y, xx=x+1)
data += \
"""
(gr_text %s->%d (at 30 30) (layer F.Fab)
(effects (font (size 1.5 1.5) (thickness 0.3)))
)
)
""" %(mark_val, cnt)
writefilename = filename.split(".")[0] + mark_val.replace(".", "_") +".kicad_pcb"
with open(writefilename, 'w') as wf:
wf.write(data)
def generate_mark_components_coordinate(self, ast):
coordinate = []
for i in ast[0]:
token = i[0]
if token == 'general': # find drawings
attr = self.pick(i[1:], 'drawings')
drawings = int(attr['drawings'][0])
elif token == 'module': # a module!
footprint = i[1]
lst = i[2:]
attr = self.pick(lst, 'at', 'fp_text value', 'fp_text reference')
x, y = float(attr['at'][0]), float(attr['at'][1])
reference = attr['fp_text reference'][0]
value = attr['fp_text value'][0]
package = None
if reference[0].lower() in ['r', 'l', 'c']:
# is resistor, capacitor or inductor
if value == mark_val:
coordinate.append((float(x), float(y)))
return coordinate, drawings
def pick(self, lst, *attribute_names):
attr_pool = defaultdict(list)
for i in attribute_names:
values = i.split()
attr_pool[values[0]].append(values)
obj = {}
for item in lst:
if item[0] in attr_pool:
token_len = len(attr_pool[item[0]][0])
if token_len == 1: # simple case, direct match
obj[item[0]] = item[1:]
else: # complex, try matching tail tokens
for tokens in attr_pool[item[0]]:
if item[:len(tokens)] == tokens: # match
obj[' '.join(tokens)] = item[len(tokens):]
break
return obj
def _parsePCB(self, | filename):
with open(filename) as f:
data = f.read()
start_time = time()
total_len = len(data)
bar = ProgressBar(widgets=['Parsing...', Percentage(), ' ', Bar('=', '|')], maxval=100).start()
def cb(locn, tokens):
global last_val, checked
val = locn * 100 / total_len
if last_val != val:
cur_time = time()
if not checked and cur_time - start_time > 3: # takes too long, check if pypy e | nabled
if not platform.python_implementation().startswith('PyPy'):
print "Parsing too slow? Consider using PyPy to accelerate the progress."
checked = True
bar.update(locn * 100 / total_len)
last_val = val
ast = OneOrMore(nestedExpr().setParseAction(cb)).parseString(data, parseAll=True)
bar.finish()
return ast
def init_argparse():
parser = argparse.ArgumentParser()
parser.add_argument('filename', help='KiCAD pcb filename')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = init_argparse()
filename = KiCAD_PCB(args.filename)
|
SeungGiJeong/SK_FastIR | health/windows8_1StateMachine.py | Python | gpl-3.0 | 2,827 | 0.003184 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from statemachine import _Statemachine
class Windows8_1StateMachine(_Statemachine):
def __init__(self, params):
_Statemachine.__init__(self, params)
def _list_share(self):
return super(Windows8_1StateMachine, self)._list_share()
def _list_running(self):
return super(Windows8_1StateMachine, self)._list_running()
def _list_drives(self):
return super(Windows8_1StateMachine, self)._list_drives()
def _list_network_drives(self):
return super(Windows8_1StateMachine, self)._list_network_drives()
def _list_sessions(self):
return super(Windows8_1StateMachine, self)._list_sessions()
def _list_scheduled_jobs(self):
return super(Windows8_1StateMachine, self)._list_scheduled_jobs()
def _list_network_adapters(self):
return super(Windows8_1StateMachine, self)._list_network_adapters()
def _list_arp_table(self):
return super(Windows8_1StateMachine, self)._list_arp_table()
def _list_route_table(self):
return super(Windows8_1StateMachine, self)._list_route_table()
def _list_sockets_network(self):
return super(Windows8_1StateMachine, self)._list_sockets_network()
def _list_sockets_services(self):
return super(Windows8_1StateMachine, self)._list_services()
def _list_kb(self):
return super(Windows8_1StateMachine, self)._list_kb()
def csv_list_drives(self):
super(Windows8_1StateMachine, self)._csv_list_drives(self._list_drives())
def csv_list_network_drives(self):
super(Windows8_1StateMachine, self)._csv_list_network_drives(self._list_network_drives())
def csv_list_share(self):
super(Windows8_1StateMachine, self)._csv_list_share(self._list_share())
def csv_list_running_proccess(self):
super(Windows8_1StateMachine, self)._csv_list_running_process(self._list_running())
def csv_hash_running_proccess(self):
super(Windows10StateMachine, self)._csv_hash_running_process(self._list_running())
def csv_list_sessions(self):
super(Windows8_1StateMachine, self)._csv_list_sessions(self._list_sessions())
def csv_list_arp_table(self):
super(Windows8_1StateMachine, self)._csv_list_arp_table(self._list_ | arp_table())
def csv_list_route_table(self):
super(Windows8_1StateMachine, self)._csv_list_route_table(self._list_route_table())
def csv_list_sockets | _networks(self):
super(Windows8_1StateMachine, self)._csv_list_sockets_network(self._list_sockets_network())
def csv_list_services(self):
super(Windows8_1StateMachine, self)._csv_list_services(self._list_services())
def csv_list_kb(self):
super(Windows8_1StateMachine, self)._csv_list_kb(self._list_kb())
|
dongsenfo/pymatgen | pymatgen/electronic_structure/tests/test_bandstructure.py | Python | mit | 23,174 | 0.003884 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
import os
import json
from io import open
import warnings
from pymatgen.electronic_structure.bandstructure import Kpoint
from pymatgen import Lattice
from pymatgen.electronic_structure.core import Spin, Orbital
from pymatgen.io.vasp import BSVasprun
from pymatgen.electronic_structure.bandstructure import BandStructureSymmLine, get_reconstructed_band_structure, \
LobsterBandStructureSymmLine
from pymatgen.util.testing import PymatgenTest
from monty.serialization import loadfn
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
class KpointTest(unittest.TestCase):
def setUp(self):
self.lattice = Lattice.cubic(10.0)
self.kpoint = Kpoint([0.1, 0.4, -0.5], self.lattice, label="X")
def test_properties(self):
self.assertEqual(self.kpoint.frac_coords[0], 0.1)
self.assertEqual(self.kpoint.frac_coords[1], 0.4)
self.assertEqual(self.kpoint.frac_coords[2], -0.5)
self.assertEqual(self.kpoint.a, 0.1)
self.assertEqual(self.kpoint.b, 0.4)
self.assertEqual(self.kpoint.c, -0.5)
self.assertEqual(self.lattice, Lattice.cubic(10.0))
self.assertEqual(self.kpoint.cart_coords[0], 1.0)
self.assertEqual(self.kpoint.cart_coords[1], 4.0)
self.assertEqual(self.kpoint.cart_coords[2], -5.0)
self.assertEqual(self.kpoint.label, "X")
class Ban | dStructureSymmLine_test(PymatgenTest):
def setUp(self):
self.bs = loadfn(os.path.join(test_dir, " | Cu2O_361_bandstructure.json"))
self.bs2 = loadfn(os.path.join(test_dir, "CaO_2605_bandstructure.json"))
self.bs_spin = loadfn(os.path.join(test_dir, "NiO_19009_bandstructure.json"))
self.bs_cbm0 = loadfn(os.path.join(test_dir, "InN_22205_bandstructure.json"))
self.bs_cu = loadfn(os.path.join(test_dir, "Cu_30_bandstructure.json"))
self.bs_diff_spins = loadfn(os.path.join(test_dir, "VBr2_971787_bandstructure.json"))
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_basic(self):
self.assertArrayAlmostEqual(self.bs.projections[Spin.up][10][12][0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
self.assertArrayAlmostEqual(self.bs.projections[Spin.up][25][0][
Orbital.dyz.value],
[0.0, 0.0, 0.0011, 0.0219, 0.0219, 0.069])
self.assertAlmostEqual(
self.bs.get_projection_on_elements()[Spin.up][25][10]['O'], 0.0328)
self.assertAlmostEqual(
self.bs.get_projection_on_elements()[Spin.up][22][25]['Cu'], 0.8327)
proj = self.bs.get_projections_on_elements_and_orbitals({'Cu': ['s',
'd']})
self.assertAlmostEqual(
proj[Spin.up][25][0]['Cu']['s'], 0.0027)
self.assertAlmostEqual(
proj[Spin.up][25][0]['Cu']['d'], 0.8495999999999999)
self.assertEqual(self.bs2.nb_bands, 16)
self.assertAlmostEqual(self.bs2.bands[Spin.up][5][10], 0.5608)
self.assertAlmostEqual(self.bs2.bands[Spin.up][5][10], 0.5608)
self.assertEqual(self.bs2.branches[5]['name'], "L-U")
self.assertEqual(self.bs2.branches[5]['start_index'], 80)
self.assertEqual(self.bs2.branches[5]['end_index'], 95)
self.assertAlmostEqual(self.bs2.distance[70], 4.2335127528765737)
self.assertEqual(self.bs_spin.nb_bands, 27)
self.assertAlmostEqual(self.bs_spin.bands[Spin.up][5][10], 0.262)
self.assertAlmostEqual(self.bs_spin.bands[Spin.down][5][10],
1.6156)
def test_properties(self):
self.one_kpoint = self.bs2.kpoints[31]
self.assertEqual(self.one_kpoint.frac_coords[0], 0.5)
self.assertEqual(self.one_kpoint.frac_coords[1], 0.25)
self.assertEqual(self.one_kpoint.frac_coords[2], 0.75)
self.assertAlmostEqual(self.one_kpoint.cart_coords[0], 0.64918757)
self.assertAlmostEqual(self.one_kpoint.cart_coords[1], 1.29837513)
self.assertAlmostEqual(self.one_kpoint.cart_coords[2], 0.0)
self.assertEqual(self.one_kpoint.label, "W")
self.assertAlmostEqual(self.bs2.efermi, 2.6211967, "wrong fermi energy")
def test_get_branch(self):
self.assertAlmostEqual(self.bs2.get_branch(110)[0]['name'], "U-W")
def test_get_direct_band_gap_dict(self):
direct_dict = self.bs_diff_spins.get_direct_band_gap_dict()
self.assertEqual(direct_dict[Spin.down]['value'], 4.5365)
for bs in [self.bs2, self.bs_spin]:
dg_dict = bs.get_direct_band_gap_dict()
for spin, v in bs.bands.items():
kpt = dg_dict[spin]['kpoint_index']
vb, cb = dg_dict[spin]['band_indices']
gap = v[cb][kpt] - v[vb][kpt]
self.assertEqual(gap, dg_dict[spin]['value'])
self.assertRaises(ValueError, self.bs_cu.get_direct_band_gap_dict)
def test_get_direct_band_gap(self):
self.assertAlmostEqual(self.bs2.get_direct_band_gap(),
4.0125999999999999)
self.assertTrue(self.bs_diff_spins.get_direct_band_gap() > 0)
self.assertEqual(self.bs_cu.get_direct_band_gap(), 0)
def test_is_metal(self):
self.assertFalse(self.bs2.is_metal(), "wrong metal assignment")
self.assertFalse(self.bs_spin.is_metal(), "wrong metal assignment")
self.assertTrue(self.bs_cu.is_metal(), "wrong metal assignment")
def test_get_cbm(self):
cbm = self.bs2.get_cbm()
self.assertAlmostEqual(cbm['energy'], 5.8709, "wrong CBM energy")
self.assertEqual(cbm['band_index'][Spin.up][0], 8, "wrong CBM band index")
self.assertEqual(cbm['kpoint_index'][0], 15, "wrong CBM kpoint index")
self.assertEqual(cbm['kpoint'].frac_coords[0], 0.5, "wrong CBM kpoint frac coords")
self.assertEqual(cbm['kpoint'].frac_coords[1], 0.0, "wrong CBM kpoint frac coords")
self.assertEqual(cbm['kpoint'].frac_coords[2], 0.5, "wrong CBM kpoint frac coords")
self.assertEqual(cbm['kpoint'].label, "X", "wrong CBM kpoint label")
cbm_spin = self.bs_spin.get_cbm()
self.assertAlmostEqual(cbm_spin['energy'], 8.0458, "wrong CBM energy")
self.assertEqual(cbm_spin['band_index'][Spin.up][0], 12, "wrong CBM band index")
self.assertEqual(len(cbm_spin['band_index'][Spin.down]), 0, "wrong CBM band index")
self.assertEqual(cbm_spin['kpoint_index'][0], 0, "wrong CBM kpoint index")
self.assertEqual(cbm_spin['kpoint'].frac_coords[0], 0.0, "wrong CBM kpoint frac coords")
self.assertEqual(cbm_spin['kpoint'].frac_coords[1], 0.0, "wrong CBM kpoint frac coords")
self.assertEqual(cbm_spin['kpoint'].frac_coords[2], 0.0, "wrong CBM kpoint frac coords")
self.assertEqual(cbm_spin['kpoint'].label, "\\Gamma", "wrong CBM kpoint label")
def test_get_vbm(self):
vbm = self.bs2.get_vbm()
self.assertAlmostEqual(vbm['energy'], 2.2361, "wrong VBM energy")
self.assertEqual(len(vbm['band_index'][Spin.up]), 3, "wrong VBM number of bands")
self.assertEqual(vbm['band_index'][Spin.up][0], 5, "wrong VBM band index")
self.assertEqual(vbm['kpoint_index'][0], 0, "wrong VBM kpoint index")
self.assertEqual(vbm['kpoint'].frac_coords[0], 0.0, "wrong VBM kpoint frac coords")
self.assertEqual(vbm['kpoint'].frac_coords[1], 0.0, "wrong VBM kpoint frac coords")
self.assertEqual(vbm['kpoint'].frac_coords[2], 0.0, "wrong VBM kpoint frac coords")
self.assertEqual(vbm['kpoint'].label, "\\Gamma", "wrong VBM kpoint label")
vbm_spin = self.bs_spin.get_vbm()
self.assertAlmostEqual(vbm_spin['energy'], 5.731, "wrong VBM energy")
self.assertEqual(len(vbm_spin['band_index'][Spin.up]), 2, "wrong VBM number of bands")
self.assertEqual(len(vbm_spin['band_index'][Spin.down]), 0, "wrong VBM |
andrewpaulreeves/soapy | soapy/pyqtgraph/canvas/TransformGuiTemplate_pyqt5.py | Python | gpl-3.0 | 2,586 | 0.001547 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file './pyqtgraph/canvas/TransformGuiTemplate.ui'
#
# Created: Wed Mar 26 15:09:28 2014
# by: PyQt5 UI code generator 5.0.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(224, 117)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(Form.sizePolicy().hasHeightForWidth())
Form.setSizePolicy(sizePolicy)
self.verticalLayout = QtWidgets.QVBoxLayout(Form)
self.verticalLayout.setSpacing(1)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.translateLabel = QtWidgets.QLabel(Form)
self.translateLabel.setObjectName("translateLabel")
self.verticalLayout.addWidget(self.translateLabel)
self.rotateLabel = QtWidgets.QLabel(Form)
self.rotateLabel.setObjectName("rotateLabel")
self.verticalLayout.addWidget(self.rotateLabel)
self.scaleLabel = QtWidgets.QLabel(Form)
self.scaleLabel.setObjectName("scaleLabel")
self.verticalLayout.addWidget(self.scaleLabel)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.mirrorImageBtn = QtWidgets.QPushButton(Form)
self.mirrorImageBtn.setToolTip("")
self.mirrorImageBtn.setObjectName("mirrorImageBtn")
self.horizontalLayout.addWidget(self.mirrorImageBtn)
self.reflectImageBtn = QtWidgets.QPushButton(Form)
self.reflectImageBtn.setObjectName("reflectImageBtn")
self.horizontalLayout.addWidge | t(self.reflectImageBtn)
self.verticalLayout.addLayout(self.horizontalLayout)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
self.translateLabel.setText(_translate("Form", "Translate:"))
self.rotateLabel.setText(_translate("Form", "Rotate:"))
self.scaleLabel.setText(_translate("Form", "Scale:" | ))
self.mirrorImageBtn.setText(_translate("Form", "Mirror"))
self.reflectImageBtn.setText(_translate("Form", "Reflect"))
|
shaneoc/atom | atom/http/headers.py | Python | mit | 7,250 | 0.010069 | from urlparse import urlparse, parse_qs
from datetime import datetime
from atom.logger import get_logger
from atom.http.exceptions import HTTPSyntaxError
log = get_logger(__name__)
status_codes = {
100: 'Continue',
200: 'OK',
302: 'Found',
400: 'Bad Request',
403: 'Forbidden',
404: 'Not Found',
405: 'Method Not Allowed',
411: 'Length Required',
500: 'Internal Server Error',
}
days = ['Mon','Tue','Wed','Thu','Fri','Sat','Sun']
months = ['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec']
class HTTPHeaders(object):
def __init__(self, type_):
assert type_ in ('request', 'response')
self.type = type_
self._headers = []
self._chunked = None
self._content_length = None
@classmethod
def parse(cls, type_, lines):
self = cls(type_)
first_line = lines[0].split(None, 2)
if len(first_line) < 3:
raise HTTPSyntaxError('Invalid first line: "{}"'.format(lines[0]))
if type_ == 'request':
self.method, self.uri, self.http_version = first_line
else:
self.http_version, self.code, self.message = first_line
try:
self.code = int(self.code)
except ValueError:
raise HTTPSyntaxError('Invalid first line: "{}"'.format(lines[0]))
# TODO make this work with HTTP/1.0 and >HTTP/1.1 too
if self.http_version != 'HTTP/1.1':
raise HTTPSyntaxError('Unknown HTTP version: "{}"'.format(self.http_version))
cur_header = None
for line in lines[1:]:
if line[0] in ' \t':
if cur_header == None:
raise HTTPSyntaxError('Invalid header: "{}"'.format(line))
cur_header += '\r\n' + line
else:
if cur_header != None:
self._add_raw(cur_header)
cur_header = line
if cur_header != None:
self._add_raw(cur_header)
self.check_syntax()
return self
def _add_raw(self, header):
parts = header.split(':',1)
if len(parts) != 2:
raise HTTPSyntaxError('Invalid header: "{}"'.format(header))
self.add(parts[0], parts[1] | )
@classmethod
def response(cls, code, message = None):
self = cls('response')
self.http_version = 'HTTP/1.1'
self.code = int(code)
self.message = message or status_codes[code]
return self
@classmethod
def request(cls, method, uri):
self = cls('request')
self.method = method
self.uri = uri
self.http_version = 'HTTP/1.1'
return self
@property
| def raw(self):
if self.type == 'request':
ret = ['{} {} {}'.format(self.method, self.uri, self.http_version)]
else:
ret = ['{} {} {}'.format(self.http_version, self.code, self.message)]
ret.extend('{}:{}'.format(h[1], h[2]) for h in self._headers)
return '\r\n'.join(ret) + '\r\n\r\n'
def add(self, name, value):
self._headers.append([name.lower().strip(), name, ' ' + value])
self._updated()
def remove(self, name):
self._headers = [h for h in self._headers if h[0] != name.lower()]
self._updated()
def set(self, name, value):
self.remove(name)
self.add(name, value)
def get(self, name):
return [h[2].strip() for h in self._headers if h[0] == name.lower()]
def get_single(self, name):
vals = self.get(name)
if len(vals) > 1:
raise HTTPSyntaxError('Header "{}" present multiple times'.format(name))
return vals[0] if len(vals) != 0 else None
def check_syntax(self):
self.get_chunked()
self.get_content_length()
return True
def _updated(self):
self._chunked = None
self._content_length = None
def get_chunked(self):
if self._chunked == None:
te_headers = [h[2] for h in self._headers if h[0] == 'transfer-encoding']
encodings = [value.lower().strip() for header in te_headers for value in header.split(';')]
self._chunked = False
if len(encodings) > 0:
self._chunked = (encodings[-1] == 'chunked')
if any(e == 'chunked' for e in encodings[:-1]):
raise HTTPSyntaxError('Invalid Transfer-Encoding')
return self._chunked
def get_content_length(self):
if self._content_length == None:
if any(h[0] == 'transfer-encoding' for h in self._headers):
return None
cl_headers = [h[2] for h in self._headers if h[0] == 'content-length']
if len(cl_headers) == 1:
try:
self._content_length = int(cl_headers[0].strip())
except ValueError:
raise HTTPSyntaxError('Invalid Content-Length')
elif len(cl_headers) > 1:
raise HTTPSyntaxError('Too many Content-Length headers')
return self._content_length
@property
def path(self):
return urlparse(self.uri).path
@property
def args(self):
return parse_qs(urlparse(self.uri).query)
def set_cookie(self, name, value, expires, secure, httponly, path = '/'):
assert self.type == 'response'
cookie_str = '{}={}'.format(name, value)
if expires == False:
expires = datetime.utcfromtimestamp(2**31-1)
if expires:
t = expires.utctimetuple()
cookie_str += '; Expires={}, {:02} {} {:04} {:02}:{:02}:{:02} GMT'.format(
days[t.tm_wday], t.tm_mday, months[t.tm_mon-1], t.tm_year, t.tm_hour, t.tm_min, t.tm_sec)
if path:
cookie_str += '; Path=' + path
if secure:
cookie_str += '; Secure'
if httponly:
cookie_str += '; HttpOnly'
self.delete_cookie(name)
self.add('Set-Cookie', cookie_str)
log.debug('Set-Cookie: {}', cookie_str)
def get_cookie(self, name):
assert self.type == 'request'
cookie_values = []
for h in self._headers:
if h[0] == 'cookie':
cookies = [c.split('=') for c in h[2].split(';')]
cookie_values.extend(c[1].strip() for c in cookies if c[0].strip().lower() == name.lower())
return cookie_values
def delete_cookie(self, name):
if self.type == 'request':
for h in self._headers:
if h[0] == 'cookie':
cookies = [c.split('=') for c in h[2].split(';')]
cookies = [c for c in cookies if c[0].strip().lower() != name.lower()]
h[2] = ';'.join('='.join(c) for c in cookies)
self._headers = [h for h in self._headers if not (h[0] == 'cookie' and len(h[2]) == 0)]
else:
self._headers = [h for h in self._headers if not (h[0] == 'set-cookie' and h[2].split('=',1)[0].strip().lower() == name.lower())]
|
poldracklab/mriqcwebapi | dockereve-master/eve-app/settings.py | Python | apache-2.0 | 15,268 | 0.000131 | import os
from copy import deepcopy
bids_schema = {
# BIDS identification bits
'modality': {
'type': 'string',
'required': True
},
'subject_id': {
'type': 'string',
'required': True
},
'session_id': {'type': 'string'},
'run_id': {'type': 'string'},
'acq_id': {'type': 'string'},
'task_id': {'type': 'string'},
'run_id': {'type': 'string'},
# BIDS metadata
'AccelNumReferenceLines': {'type': 'integer'},
'AccelerationFactorPE': {'type': 'integer'},
'AcquisitionMatrix': {'type': 'string'},
'CogAtlasID': {'type': 'string'},
'CogPOID': {'type': 'string'},
'CoilCombinationMethod': {'type': 'string'},
'ContrastBolusIngredient': {'type': 'string'},
'ConversionSoftware': {'type': 'string'},
'ConversionSoftwareVersion': {'type': 'string'},
'DelayTime': {'type': 'float'},
'DeviceSerialNumber': {'type': 'string'},
'EchoTime': {'type': 'float'},
'EchoTrainLength': {'type': 'integer'},
'EffectiveEchoSpacing': {'type': 'float'},
'FlipAngle': {'type': 'integer'},
'GradientSetType': {'type': 'string'},
'HardcopyDeviceSoftwareVersion': {'type': 'string'},
'ImagingFrequency': {'type': 'integer'},
'InPlanePhaseEncodingDirection': {'type': 'string'},
'InstitutionAddress': {'type': 'string'},
'InstitutionName': {'type': 'string'},
'Instructions': {'type': 'string'},
'InversionTime': {'type': 'float'},
'MRAcquisitionType': {'type': 'string'},
'MRTransmitCoilSequence': {'type': 'string'},
'MagneticFieldStrength': {'type': 'float'},
'Manufacturer': {'type': 'string'},
'ManufacturersModelName': {'type': 'string'},
'MatrixCoilMode': {'type': 'string'},
'MultibandAccelerationFactor': {'type': 'float'},
'NumberOfAverages': {'type': 'integer'},
'NumberOfPhaseEncodingSteps': {'type': 'integer'},
'NumberOfVolumesDiscardedByScanner': {'type': 'float'},
'NumberOfVolumesDiscardedByUser': {'type': 'float'},
'NumberShots': {'type': 'integer'},
'ParallelAcquisitionTechnique': {'type': 'string'},
'ParallelReductionFactorInPlane': {'type': 'float'},
'PartialFourier': {'type': 'boolean'},
'PartialFourierDirection': {'type': 'string'},
'PatientPosition': {'type': 'string'},
'PercentPhaseFieldOfView': {'type': 'integer'},
'PercentSampling': {'type': 'integer'},
'PhaseEncodingDirection': {'type': 'string'},
'PixelBandwidth': {'type': 'integer'},
'ProtocolName': {'type': 'string'},
'PulseSequenceDetails': {'type': 'string'},
'PulseSequenceType': {'type': 'string'},
'ReceiveCoilName': {'type': 'string'},
'RepetitionTime': {'type': 'float'},
'ScanOptions': {'type': 'string'},
'ScanningSequence': {'type': 'string'},
'SequenceName': {'type': 'string'},
'SequenceVariant': {'type': 'string'},
'SliceEncodingDirection': {'type': 'string'},
'SoftwareVersions': {'type': 'string'},
'TaskDescription': {'type': 'string'},
'TotalReadoutTime': {'type': 'float'},
'TotalScanTimeSec': {'type': 'integer'},
'TransmitCoilName': {'type': 'string'},
'VariableFlipAngleFlag': {'type': 'string'},
}
prov_schema = {
'version': {
'type': 'string',
'required': True
},
'md5sum': {
'type': 'string',
'required': True
},
'software': {
'type': 'string',
'required': True
},
'settings': {
'type': 'dict',
'schema': {
'fd_thres': {'type': 'float'},
'hmc_fsl': {'type': 'boolean'},
'testing': {'type': 'boolean'}
},
},
'mriqc_pred': {'type': 'integer'},
'email': {'type': 'string'},
}
bold_iqms_schema = {
'aor': {
'type': 'float',
'required': True
},
'aqi': {
'type': 'float',
'required': True
},
'dummy_trs': {'type': 'integer'},
'dvars_nstd': {
'type': 'float',
'required': True
},
'dvars_std': {
'type': 'float',
'required': True
},
'dvars_vstd': {
'type': 'float',
'required': True
},
'efc': {
'type': 'float',
'required': True
},
'fber': {
'type': 'float',
'required': True
},
'fd_mean': {
'type': 'float',
'required': True
},
'fd_num': {
'type': 'float',
'required': True
},
'fd_perc': {
'type': 'float',
'required': True
},
'fwhm_avg': {
'type': 'float',
'required': True
},
'fwhm_x': {
'type': 'float',
'required': True
},
'fwhm_y': {
'type': 'float',
'required': True
},
'fwhm_z': {
'type': 'float',
'required': True
},
'gcor': {
'type': 'float',
'required': True
},
'gsr_x': {
'type': 'float',
'required': True
},
'gsr_y': {
'type': 'float',
'required': True
},
'size_t': {
'type': 'float',
'required': True
},
'size_x': {
'type': 'float',
'required': True
},
'size_y': {
'type': 'float',
'required': True
},
'size_z': {
'type': 'float',
'required': True
},
'snr': {
'type': 'float',
'required': True
},
'spacing_tr': {
'type': 'float',
'required': True
},
'spacing_x': {
'type': 'float',
'required': True
},
'spacing_y': {
'type': 'float',
'required': True
},
'spacing_z': {
'type': 'float',
'required': True
},
'summary_bg_k': {
'type': 'float',
'required': True
},
'summary_bg_mean': {
'type': 'float',
'required': True
},
'summary_bg_median': {
'type': 'float',
'required': True
},
'summary_bg_mad': {
'type': 'float',
'required': True
},
'summary_bg_p05': {
'type': 'float',
'required': True
},
'summary_bg_p95': {
'type': 'float',
'required': True
},
'summary_bg_stdv': {
'type': 'float',
'required': True
},
'summary_bg_n': {
'type': 'float',
're | quired': True
},
'summary_fg_k': {
'type': 'float',
'required': True
},
'summary_fg_mean': {
'type': 'float',
'required': True
},
'summary_ | fg_median': {
'type': 'float',
'required': True
},
'summary_fg_mad': {
'type': 'float',
'required': True
},
'summary_fg_p05': {
'type': 'float',
'required': True
},
'summary_fg_p95': {
'type': 'float',
'required': True
},
'summary_fg_stdv': {
'type': 'float',
'required': True
},
'summary_fg_n': {
'type': 'float',
'required': True
},
'tsnr': {
'type': 'float',
'required': True
},
}
struct_iqms_schema = {
'cjv': {
'type': 'float',
'required': True
},
'cnr': {
'type': 'float',
'required': True
},
'efc': {
'type': 'float',
'required': True
},
'fber': {
'type': 'float',
'required': True
},
'fwhm_avg': {
'type': 'float',
'required': True
},
'fwhm_x': {
'type': 'float',
'required': True
},
'fwhm_y': {
'type': 'float',
'required': True
},
'fwhm_z': {
'type': 'float',
'required': True
},
'icvs_csf': {
'type': 'float',
'required': True
},
'icvs_gm': {
'type': 'float',
'required': True
},
'icvs_wm': {
'type': 'float',
'required': True
},
'inu_med': {
'type': 'float',
'required': True
},
'inu_range': {
'type': 'float',
'required': True
},
'qi_1': {
'type': 'float',
'required': True
},
'qi_2': {
'type': 'float',
'required': True
},
|
jessicalucci/NovaOrc | nova/api/openstack/compute/contrib/used_limits.py | Python | apache-2.0 | 2,963 | 0 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova.openstack.common import log as logging
from nova import quota
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
XMLNS = "http://docs.openstack.org/compute/ext/used_limits/api/v1.1"
ALIAS = "os-used-limits"
authorize = extensions.soft_extension_authorizer('compute', 'used_limits')
class UsedLimitsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('limits', selector='limits')
root.set('{%s}usedLimits' % XMLNS, '%s:usedLimits' % ALIAS)
return xmlutil.SlaveTemplate(root, 1, nsmap={ALIAS: XMLNS})
class UsedLimitsController(wsgi.Controller):
@staticmethod
def _reserv | ed(req):
try:
return int(req.GET['reserved'])
except (ValueError, KeyError):
return False
@wsgi.extends
def index(self, req, resp_obj):
resp_obj.attach(xml=UsedLimitsTemplate())
context = req.environ['nova.context']
quotas = QUOTAS.get_project_quotas(context, context.project_id,
usages=True)
quota_map = {
'totalRAMUsed': 'ram',
'totalCoresUsed': 'co | res',
'totalInstancesUsed': 'instances',
'totalFloatingIpsUsed': 'floating_ips',
'totalSecurityGroupsUsed': 'security_groups',
}
used_limits = {}
for display_name, quota in quota_map.iteritems():
if quota in quotas:
reserved = (quotas[quota]['reserved']
if self._reserved(req) else 0)
used_limits[display_name] = quotas[quota]['in_use'] + reserved
resp_obj.obj['limits']['absolute'].update(used_limits)
class Used_limits(extensions.ExtensionDescriptor):
"""Provide data on limited resources that are being used."""
name = "UsedLimits"
alias = ALIAS
namespace = XMLNS
updated = "2012-07-13T00:00:00+00:00"
def get_controller_extensions(self):
controller = UsedLimitsController()
limits_ext = extensions.ControllerExtension(self, 'limits',
controller=controller)
return [limits_ext]
|
Ivoz/pip | setup.py | Python | mit | 2,816 | 0.00071 | import codecs
import os
import re
import sys
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
here = os.path.abspath(os.path.dirname(__file__))
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
#import here, cause outside the eggs aren't loaded
import pytest
sys.exit(pytest.main(self.test_args))
def read(*parts):
# intentionally *not* adding an encoding option to open, See:
# https://github.com/pypa/virtualenv/issues/201#issuecomment-3145690
return codecs.open(os.path.join(here, *parts), 'r').read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
long_description = read('README.rst')
tests_require = ['pytest', 'virtualenv>=1.10', 'scripttest>=1.3', 'mock']
setup(
name="pip",
version=find_version("pip", "__init__.py"),
description="The PyPA recommended tool for installing Python packages.",
long_description=long_description,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Topic :: Software Development :: Build Tools",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: Implementation :: PyPy"
],
keywords='easy_install distutils setuptools egg virtualenv | ',
author='The pip developers',
author_email='python-virtualenv@groups.google.com',
url='http://www.pip-installer.org',
license='MIT',
packages=find_packages(exclude=["contrib", "docs", "tests*", "tasks"]),
package_data={
'pip._vendor.requests': ['*.pem'],
'pip._vendor.distlib._backport': ['sysconfig.cfg'],
'pip._vendor.distlib': ['t32.exe', 't64.exe', 'w32.exe' | , 'w64.exe'],
},
entry_points={
"console_scripts": [
"pip=pip:main",
"pip%s=pip:main" % sys.version[:1],
"pip%s=pip:main" % sys.version[:3],
],
},
tests_require=tests_require,
zip_safe=False,
extras_require={
'testing': tests_require,
},
cmdclass={'test': PyTest},
)
|
pokymobo/redomat | libredo/data/result_httpd.py | Python | mpl-2.0 | 4,908 | 0.00326 | #!/usr/bin/env python3
# coding:utf-8
from __future__ import print_function
'PackagesHTTPD - stream folder content as .tar over http'
__author__ = 'Mathias Gumz <mgumz@tpip.net>'
__license__ = 'MPL2'
__version__ = ''
import sys
import os, os.path
import zipfile, tarfile
from StringIO import StringIO
import cgi
try:
from http | .server import SimpleHTTPRequestHandler, HTTPServer
except Impor | tError: # assume py2
from BaseHTTPServer import HTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
class PackagesHTTPD(SimpleHTTPRequestHandler):
'''
httpd-server to stream the contents of a given folder as
/packages.tar if /packages.tar is accessed. otherwise
it acts just like SimpleHTTPRequestHandler
'''
def do_GET(self):
'''
/packages.tar - serve the contents of the folder referenced in
self.server.packages as a streamd .tar file
/packages/* - serve the files of the folder referenced in
self.server.packages (chrooting into it)
/* - serve the files of the folder referenced in
self.server.chroot
'''
if self.path == '/packages.tar':
self._serve_folder_as_tar(self.server.packages)
return
SimpleHTTPRequestHandler.do_GET(self)
def list_directory(self, path):
try:
list = os.listdir(path)
except os.error:
self.send_error(404, "No permission to list directory")
return None
if path == self.server.chroot:
list.append("packages/")
list.append("packages.tar")
list.sort(lambda a, b: cmp(a.lower(), b.lower()))
f = StringIO()
f.write("<title>Directory listing for %s</title>\n" % self.path)
f.write("<h2>Directory listing for %s</h2>\n" % self.path)
f.write("<hr>\n<ul>\n")
for name in list:
fullname = os.path.join(path, name)
displayname = linkname = name = cgi.escape(name)
# Append / for directories or @ for symbolic links
if os.path.isdir(fullname):
displayname = name + "/"
linkname = name + "/"
if os.path.islink(fullname):
displayname = name + "@"
# Note: a link to a directory displays with @ and links with /
f.write('<li><a href="%s">%s</a>\n' % (linkname, displayname))
f.write("</ul>\n<hr>\n")
f.seek(0)
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
return f
def translate_path(self, path):
'''
translates 'path' (the path-part of an uri) to a file-system based
path.
we assume self.server.folder to be the standard chroot-folder. if
the user tries to access /packages, the self.server.packages folder
is used as the chroot
'''
chroot = self.server.chroot
if path.find('/packages/') == 0:
chroot = self.server.packages
_, path = path.split('/packages/', 1)
if not os.path.isabs(chroot):
chroot = os.path.abspath(chroot)
result = SimpleHTTPRequestHandler.translate_path(self, path)
_, result = result.split(os.getcwd(), 1)
if len(result) > 0 and result[0] == '/':
result = result[1:]
result = os.path.join(chroot, result)
return result
def _serve_folder_as_tar(self, folder):
tfile = tarfile.open(name='packages.tar', mode='w|', fileobj=self.wfile)
self.send_response(200)
self.send_header('Content-type', 'application/x-tar')
self.end_headers()
tfile.add(folder, arcname='packages')
tfile.close()
def _serve_zip_entry(self, name):
try:
entry = self.server.zipfile.open(name, 'r')
except KeyError:
self.send_response(404)
self.end_headers()
return
@staticmethod
def _create_zipfile(zname, zdir):
zfile = zipfile.ZipFile(zname, 'w', zipfile.ZIP_STORED, True)
for root, dirs, files in os.walk(zdir):
for f in files:
fname = os.path.join(root, f)
zfile.write(fname)
zfile.close()
if __name__ == '__main__':
def main():
if len(sys.argv) < 4:
print('usage: %s <port> <chroot> <packages_chroot>' % __file__)
return
port, chroot, packages_chroot = int(sys.argv[1]), sys.argv[2], sys.argv[3]
server_class = HTTPServer
httpd = server_class(('', port), PackagesHTTPD)
httpd.chroot = chroot
httpd.packages = packages_chroot
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
main()
|
gutooliveira/progScript | tekton/backend/apps/emprestado_app/facade.py | Python | mit | 2,614 | 0.004208 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from gaegraph.business_base import NodeSearch, DeleteNode
from emprestado_app.commands import ListEmprestadoCommand, SaveEmprestadoCommand, UpdateEmprestadoCommand, \
EmprestadoPublicForm, EmprestadoDetailForm, EmprestadoShortForm, SalvarEmprestimo, DeletarEmprestimo, GetEmprestimo
def save_emprestado_cmd(**emprestado_properties):
"""
Command to save Emprestado entity
:param emprestado_properties: a dict of properties to save on model
:return: a Command that save Emprestado, validating and localizing properties received as strings
"""
return SaveEmprestadoCommand(**emprestado_properties)
def update_emprestado_cmd(emprestado_id, **emprestado_properties):
"""
Command to upd | ate Emprestado entity with id equals 'emprestado_id'
:param emprestado_properties: a dict of properties to update model
:return: a Command that update Emprestado, validating and localizing properties received as strings
"""
return UpdateEmprestadoCommand(emprestado_id, **emprestado_properties)
def list_emprestados_cmd():
"""
Command t | o list Emprestado entities ordered by their creation dates
:return: a Command proceed the db operations when executed
"""
return ListEmprestadoCommand()
def emprestado_detail_form(**kwargs):
"""
Function to get Emprestado's detail form.
:param kwargs: form properties
:return: Form
"""
return EmprestadoDetailForm(**kwargs)
def emprestado_short_form(**kwargs):
"""
Function to get Emprestado's short form. just a subset of emprestado's properties
:param kwargs: form properties
:return: Form
"""
return EmprestadoShortForm(**kwargs)
def emprestado_public_form(**kwargs):
"""
Function to get Emprestado'spublic form. just a subset of emprestado's properties
:param kwargs: form properties
:return: Form
"""
return EmprestadoPublicForm(**kwargs)
def get_emprestado_cmd(emprestado_id):
"""
Find emprestado by her id
:param emprestado_id: the emprestado id
:return: Command
"""
return NodeSearch(emprestado_id)
def delete_emprestado_cmd(emprestado_id):
"""
Construct a command to delete a Emprestado
:param emprestado_id: emprestado's id
:return: Command
"""
return DeleteNode(emprestado_id)
def salvar_emprestimo(amigo,**pertences):
return SalvarEmprestimo(amigo,**pertences)
def delete_arco(amigo):
return DeletarEmprestimo(amigo)
def get_emprestimo(pertences):
return GetEmprestimo(pertences)
|
ema/conpaas | conpaas-services/src/conpaas/services/webservers/manager/autoscaling/monitoring.py | Python | bsd-3-clause | 23,600 | 0.019068 |
"""
Monitoring Controller collects all the informantion extracted using Ganglia monitoring system.
@author: fernandez
"""
import sys
from subprocess import Popen, PIPE
import memcache
import socket
from time import time
from os import path, listdir
from conpaas.services.webservers.manager.autoscaling.performance import ServicePerformance, ServiceNodePerf, StatUtils
from conpaas.services.webservers.manager import client
DEFAULT_NUM_CPU = 1.0
DEFAULT_RAM_MEMORY = '1034524.0'
class Monitoring_Controller:
def __init__(self, logger, cost_controller, config_parser, config_file_path, manager_host, manager_port, process_state, ganglia_rrd_dir):
self.cost_controller = cost_controller
self.config_parser = config_parser
self.manager_host = manager_host
self.manager_port = manager_port
self.logger = logger
self.process_state = process_state
self.ganglia_rrd_dir = ganglia_rrd_dir
self.last_collect_time = time()
self.stat_utils = StatUtils()
try:
self.config_parser.read(config_file_path)
| except:
print >>sys.stderr, 'Failed to read configuration file'
sys.exit(1)
#initialize a memcache client
memcache_addr = config_parser.get('manager', 'MEMCACHE_ADDR')
if memcache_addr == '':
pr | int >>sys.stderr, 'Failed to find memcache address in the config file'
sys.exit(1)
self.memcache = memcache.Client([memcache_addr])
self.perf_info = ServicePerformance()
self._performance_info_set(self.perf_info)
self.monitoring_metrics_web = ['web_request_rate', 'web_response_time', 'cpu_user', 'boottime']
self.monitoring_metrics_backend = ['php_request_rate', 'php_response_time', 'cpu_user', 'cpu_system', 'cpu_num', 'mem_total', 'boottime']
self.monitoring_metrics_proxy = ['web_request_rate_lb', 'web_response_time_lb', \
'php_request_rate_lb', 'php_response_time_lb', 'cpu_user', 'boottime']
def _performance_info_get(self):
return self.memcache.get('performance_info')
def _performance_info_set(self, perf_info):
self.memcache.set('performance_info', perf_info)
def nodes_info_update(self, killed_backends):
#conpaas_init_ssl_ctx(self.certs_dir, 'manager')
print('MANAGER %s' % self.manager_host)
print('PORT %s' % self.manager_port)
nodes = client.list_nodes(self.manager_host, self.manager_port)
self.logger.debug('Got update info from manager')
perf_info = self._performance_info_get()
perf_info.reset_role_info()
self.logger.debug('Updating nodes...')
for node_id in nodes['proxy']:
node = perf_info.serviceNodes.get(node_id)
if node != None:
node.registered_with_manager = True
node.isRunningProxy = True
else:
perf_info.serviceNodes[node_id] = ServiceNodePerf(node_id, '', True, False, False, self.process_state)
for node_id in nodes['web']:
node = perf_info.serviceNodes.get(node_id)
if node != None:
node.registered_with_manager = True
node.isRunningWeb = True
else:
perf_info.serviceNodes[node_id] = ServiceNodePerf(node_id, '', False, True, False, self.process_state)
for node_id in nodes['backend']:
node = perf_info.serviceNodes.get(node_id)
if node != None:
node.registered_with_manager = True
node.isRunningBackend = True
else:
perf_info.serviceNodes[node_id] = ServiceNodePerf(node_id, '', False, False, True, self.process_state)
self.logger.info('Filtering backend nodes killed_backends : '+str(killed_backends)+' '+str(perf_info.serviceNodes))
for id, node in perf_info.serviceNodes.items():
if node.ip == '':
response = client.get_node_info(self.manager_host, self.manager_port, id)
node.ip = response['serviceNode']['ip']
if node.registered_with_manager == False:
del perf_info.serviceNodes[id]
####FIXME TO FILTER REMOVE OF BACKENDS #####
if id in killed_backends:
self.logger.info('Filtered backend with id: '+str(id))
try:
del perf_info.serviceNodes[id]
except:
self.logger.warning('Backend already removed or not containing in serviceNodes: '+str(id))
###########################################
self.logger.info('Filtered backend nodes killed_backends : '+str(killed_backends)+' '+str(perf_info.serviceNodes))
self._performance_info_set(perf_info)
self.logger.info('Updating nodes information from ConPaaS manager...')
self.logger.info('Updated service nodes: %s' % str(perf_info.serviceNodes))
def collect_monitoring_metric(self, node_ip, metric_name):
timestamps = []
param_values = []
# Added this for EC2, where the RRD directory names in Ganglia are hosts and not IPs:
ganglia_dir_name = ''
if node_ip.find('amazonaws') > 0: # this is an IP address
ganglia_dir_name = node_ip
else: # this is a DNS name
for ganglia_host in listdir(self.ganglia_rrd_dir):
#self.logger.error('collect from ganglia host: ' + str(ganglia_host))
if ganglia_host.find('Summary') > 0:
continue
try:
hostname, array, array_ip = socket.gethostbyaddr(node_ip)
except:
self.logger.warning('Found private ip when trying to get the hostname for ip: '+str(node_ip))
ganglia_dir_name = node_ip
break
#self.logger.error('gethostbyaddr: ' + hostname)
if ganglia_host == hostname:
ganglia_dir_name = ganglia_host
break
rrd_file_name = self.ganglia_rrd_dir + ganglia_dir_name + '/' + metric_name + '.rrd'
self.logger.error('rrd_file_name: ' + str(rrd_file_name))
# logger.info('Searching in RRD file:' + rrd_file_name)
if (not path.isfile(rrd_file_name)):
self.logger.error('RRD file not found: ' + rrd_file_name)
return []
#logger.info('Getting monitoring info for node %s, parameter %s ...' % (node_ip, metric_name))
# logger.info('last collect time: ' + str(int(self.last_collect_time)))
collect_from = self.last_collect_time - (time() - self.last_collect_time)
#collect_from = self.last_collect_time
proc = Popen(['rrdtool', 'fetch', '-s', str(int(collect_from)), '-r', '15', \
str(rrd_file_name), 'AVERAGE'], stdout=PIPE, stderr=PIPE, close_fds=True)
stdout_req, stderr_req = proc.communicate()
lines = stdout_req.splitlines()
for line in lines:
#logger.debug(line)
tokens = line.split()
if (line.find('sum') >=0 or len(tokens) < 2):
continue;
timestamps.append(int(tokens[0].replace(':', '')))
if (tokens[1].find('nan') < 0):
param_values.append(float(tokens[1]))
else:
param_values.append(-1)
## Cleaning the memory allocated by subprocess.Popen()
try:
proc.terminate()
except OSError:
# logger.critical("Cannot kill the subprocess.popen rrdtool")
# can't kill a dead proc
pass
#logger.debug('timestamps: ' + str(timestamps))
#logger.debug('param values: ' + str(param_values))
return [timestamps, param_values]
def init_collect_monitoring_data(self):
self.perf_info = self._performance_info_get()
def collect_monitoring_data(self):
web_monitoring_data = {}
backend_monitoring_data = {}
proxy_monitoring_data = {}
for web_node in self.perf_info.getWebServiceNodes():
self.logger.info('Getting web monitoring info for %s ...' % web_node.ip)
#if web_node.ip not in web_monitoring_data:
web_monitoring_data[web_node.ip] = {}
cpu_num = DEFAULT_NUM_CPU
mem_total = DEFAULT_RAM_MEMORY
self.logger.info('Getting web monitoring info 1')
for it in range(len(self.monitoring_metrics_web)):
self.logger.info('Getting web monitoring info 2')
ret = self.collect_monitoring_metric(web_node.ip, self.monitoring_metrics_web[it])
|
jinglundong/GuessGame | service/indicator.py | Python | mit | 827 | 0.004837 | from collections import defaultdict
class Indicator():
def indicate( | self, guess, answer):
if(len(guess) != len(answer)):
raise InvalidInput("Length of answer and guess are different.")
aligned = 0;
not_aligned = 0;
chars_guess = defaultdict(int)
chars_answer = defaultdict(int)
for i in range(len(guess)):
char_guess = guess[i:i+1]
| char_answer = answer[i:i+1]
if char_guess == char_answer:
aligned += 1
else:
chars_guess[char_guess] += 1
chars_answer[char_answer] += 1
for key, value in chars_guess.iteritems():
if chars_answer.get(key):
not_aligned += min(value, chars_answer.get(key))
return aligned, not_aligned
|
rdio/sentry | src/sentry/migrations/0094_auto__add_eventmapping__add_unique_eventmapping_project_event_id.py | Python | bsd-3-clause | 29,441 | 0.008356 | # -*- coding: utf-8 -*-
import datetime
from south.db | import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'EventMapping'
db.create_table('sentry_eventmapping', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('project', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['sentry.Project'])),
('group', self | .gf('django.db.models.fields.related.ForeignKey')(to=orm['sentry.Group'])),
('event_id', self.gf('django.db.models.fields.CharField')(max_length=32)),
('date_added', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
))
db.send_create_signal('sentry', ['EventMapping'])
# Adding unique constraint on 'EventMapping', fields ['project', 'event_id']
db.create_unique('sentry_eventmapping', ['project_id', 'event_id'])
def backwards(self, orm):
# Removing unique constraint on 'EventMapping', fields ['project', 'event_id']
db.delete_unique('sentry_eventmapping', ['project_id', 'event_id'])
# Deleting model 'EventMapping'
db.delete_table('sentry_eventmapping')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sentry.accessgroup': {
'Meta': {'unique_together': "(('team', 'name'),)", 'object_name': 'AccessGroup'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.User']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Project']", 'symmetrical': 'False'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'})
},
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Event']", 'null': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.affecteduserbygroup': {
'Meta': {'unique_together': "(('project', 'tuser', 'group'),)", 'object_name': 'AffectedUserByGroup'},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'tuser': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.TrackedUser']", 'null': 'True'})
},
'sentry.alert': {
'Meta': {'object_name': 'Alert'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"}),
'related_groups': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_alerts'", 'symmetrical': 'False', 'through': "orm['sentry.AlertRelatedGroup']", 'to': "orm['sentry.Group']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], |
dashpay/dash | test/functional/feature_bip68_sequence.py | Python | mit | 17,835 | 0.003869 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test BIP68 implementation."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.blocktools import *
SEQUENCE_LOCKTIME_DISABLE_FLAG = (1<<31)
SEQUENCE_LOCKTIME_TYPE_FLAG = (1<<22) # this means use time (0 means height)
SEQUENCE_LOCKTIME_GRANULARITY = 9 # this is a bit-shift
SEQUENCE_LOCKTIME_MASK = 0x0000ffff
# RPC error for non-BIP68 final transactions
NOT_FINAL_ERROR = "non-BIP68-final (code 64)"
class BIP68Test(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [[], ["-acceptnonstdtxn=0"]]
def run_test(self):
self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"]
# Generate some coins
self.nodes[0].generate(110)
self.log.info("Running test disable flag")
self.test_disable_flag()
self.log.info("Running test sequence-lock-confirmed-inputs")
self.test_sequence_lock_confirmed_inputs()
self.log.info("Running test sequence-lock-unconfirmed-inputs")
self.test_sequence_lock_unconfirmed_inputs()
self.log.info("Running test BIP68 not consensus before versionbits activation")
self.test_bip68_not_consensus()
self.log.info("Activating BIP68 (and 112/113)")
self.activateCSV()
self.log.info("Verifying nVersion=2 transactions are standard.")
self.log.info("Note that nVersion=2 transactions are always standard (independent of BIP68 activation status).")
self.test_version2_relay()
self.log.info("Passed")
# Test that BIP68 is not in effect if tx version is 1, or if
# the first sequence bit is set.
def test_disable_flag(self):
# Create some unconfirmed inputs
new_addr = self.nodes[0].getnewaddress()
self.nodes[0].sendtoaddress(new_addr, 2) # send 2 BTC
utxos = self.nodes[0].listunspent(0, 0)
assert(len(utxos) > 0)
utxo = utxos[0]
tx1 = CTransaction()
value = int(satoshi_round(utxo["amount"] - self.relayfee)*COIN)
# Check that the disable flag disables relative locktime.
# If sequence locks were used, this would require 1 block for the
# input to mature.
sequence_value = SEQUENCE_LOCKTIME_DISABLE_FLAG | 1
tx1.vin = [CTxIn(COutPoint(int(utxo["txid"], 16), utxo["vout"]), nSequence=sequence_value)]
tx1.vout = [CTxOut(value, CScript([b'a']))]
tx1_signed = self.nodes[0].signrawtransactionwithwallet(ToHex(tx1))["hex"]
tx1_id = self.nodes[0].sendrawtransaction(tx1_signed)
tx1_id = int(tx1_id, 16)
# This transaction will enable sequence-locks, so this transaction should
# fail
tx2 = CTransaction()
tx2.nVersion = 2
sequence_value = sequence_value & 0x7fffffff
tx2.vin = [CTxIn(COutPoint(tx1_id, 0), nSequence=sequence_value)]
tx2.vout = [CTxOut(int(value-self.relayfee*COIN), CScript([b'a']))]
tx2.rehash()
assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, ToHex(tx2))
# Setting the version back down to 1 should disable the sequence lock,
# so this should be accepted.
tx2.nVersion = 1
self.nodes[0].sendrawtransaction(ToHex(tx2))
# Calculate the median time past of a prior block ("confirmations" before
# the current tip).
def get_median_time_past(self, confirmations):
block_hash = self.nodes[0].getblockhash(self.nodes[0].getblockcount()-confirmations)
return self.nodes[0].getblockheader(block_hash)["mediantime"]
# Test that sequence locks are respected for transactions spending confirmed inputs.
def test_sequence_lock_confirmed_inputs(self):
# Create lots of confirmed utxos, and use them to generate lots of random
# transactions.
max_outputs = 50
addresses = []
while len(addresses) < max_outputs:
addresses.append(self.nodes[0].getnewaddress())
while len(self.nodes[0].listunspent()) < 200:
import random
random.shuffle(addresses)
num_outputs = random.randint(1, max_outputs)
outputs = {}
for i in range(num_outputs):
outputs[addresses[i]] = random.randint(1, 20)*0.01
self.nodes[0].sendmany("", outputs)
self.nodes[0].generate(1)
utxos = self.nodes[0].listunspent()
# Try creating a lot of random transactions.
# Each time, choose a random number of inputs, and randomly set
# some of those inputs to be sequence locked (and randomly choose
# between height/time locking). Small random chance of making the locks
# all pass.
for i in range(400):
# Randomly choose up to 10 inputs
num_inputs = random.randint(1, 10)
random.shuffle(utxos)
# Track whether any sequence locks used should fail
should_pass = True
# Track whether this transaction was built with sequence locks
using_sequence_locks = False
tx = CTransaction()
tx.nVersion = 2
value = 0
for j in range(num_inputs):
sequence_value = 0xfffffffe # this disables sequence locks
# 50% chance we enable sequence locks
if random.randint(0,1):
using_sequence_locks = True
# 10% of the time, make the input sequence value pass
input_will_pass = (random.randint(1,10) == 1)
sequence_value = utxos[j]["confirmations"]
if not input_will_pass:
sequence_value += 1
should_pass = False
# Figure out what the median-time-past was for the confirmed input
# Note that if an input has N confirmations, we're going back N blocks
# from the tip so that we're looking up MTP of the block
# PRIOR to the one the input appears in, as per the BIP68 spec.
orig_time = self.get_median_time_past(utxos[j]["confirmations"])
cur_time = self.get_median_time_past(0) # MTP of the tip
# can only timelock this input if it's not too old -- otherwise use height
can_time_lock = True
if ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY) >= SEQUENCE_LOCKTIME_MASK:
can_time_lock = False
# if time-lockable, then 50% chance we make this a time lock
if random.randint(0,1) and can_time_lock:
# Find first time-lock value that fails, or latest one that succeeds
time_delta = sequence_value << SEQUENCE_LOCKTIME_GRANULARITY
if input_will_pass and time_delta > cur_time - orig_time:
sequence_value = ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY)
elif (not input_will_pass and time_delta <= cur_time - orig_time):
sequence_value = ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY)+1
sequence_value |= SEQUENCE_LOCKTIME_TYPE_FLAG
tx.vin.append(CTxIn(COutPoint(int(utxos[j]["txid"], 16), utxos[j]["vout"]), nSequence=sequence_value))
value += utxos[j]["amount"]*COIN
# Overestimate the size of the tx - signatures should be less than 120 byt | es, and leave 50 for the output
tx_size = len(ToHex(tx))//2 + 120*num_inputs + 50
tx.vout.append(CTxOut(int(value-self.relayfee*tx_size*COIN/1000), CScript([b'a'])))
rawtx = self.nodes[0].signrawtransactionwithwallet(ToHex(tx))["hex"]
if (using_sequence_locks a | nd not should_ |
andrewleech/script.module.pycurl | lib/pycurl/pycurl-7.19.5.1/tests/reset_test.py | Python | lgpl-2.1 | 2,691 | 0.003716 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vi:ts=4:et
import pycurl
import unittest
import sys
try:
import urllib.parse as urllib_parse
except ImportError:
import urllib as urllib_parse
from . import appmanager
from . import util
setup_module, teardown_module = appmanager.setup(('app', 8380))
class ResetTest(unittest.TestCase):
def test_reset(self):
c = pycurl.Curl()
c.setopt(pycurl.URL, 'http://localhost:8380/success')
c.reset()
try:
c.perform()
self.fail('Perform worked when it should not have')
except pycurl.error:
exc = sys.exc_info()[1]
code = exc.args[0]
self.assertEqual(pycurl.E_URL_MALFORMAT, code)
# check that Curl object is usable
c.setopt(pycurl.URL, 'http://localhost:8380/success')
sio = util.BytesIO()
c.setopt(pycurl.WRITEFUNCTION, sio.write)
c.perform()
self.assertEqual('success', sio.getvalue().decode())
# XXX this test was broken when it was test_reset.py
def skip_reset_with_multi(self):
outf = util.BytesIO()
cm = pycurl.CurlMulti()
eh = pycurl.Curl()
for x in range(1, 20):
eh.setopt(pycurl.WRITEFUNCTION, outf.write)
eh.setopt(pycurl.URL, 'http://localhost:8380/success')
cm.add_handle(eh)
while 1:
ret, active_handles = cm.perform()
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
while active_handles:
ret = cm.select(1.0)
if ret == -1:
continue
while 1:
ret, active_handles = cm.perform()
if ret != pycurl.E_CALL_MULTI_PERFORM:
| break
count, good, bad = cm.info_read()
for h | , en, em in bad:
print("Transfer to %s failed with %d, %s\n" % \
(h.getinfo(pycurl.EFFECTIVE_URL), en, em))
raise RuntimeError
for h in good:
httpcode = h.getinfo(pycurl.RESPONSE_CODE)
if httpcode != 200:
print("Transfer to %s failed with code %d\n" %\
(h.getinfo(pycurl.EFFECTIVE_URL), httpcode))
raise RuntimeError
else:
print("Recd %d bytes from %s" % \
(h.getinfo(pycurl.SIZE_DOWNLOAD),
h.getinfo(pycurl.EFFECTIVE_URL)))
cm.remove_handle(eh)
eh.reset()
eh.close()
cm.close()
outf.close()
|
zake7749/Chatbot | Chatbot/QuestionAnswering/responsesEvaluate.py | Python | gpl-3.0 | 5,244 | 0.005353 | import logging
import os
import math
from collections import defaultdict
from gensim import corpora
# 引入斷詞與停用詞的配置
from .Matcher.matcher import Matcher
class Evaluator(Matcher):
"""
讀入一串推文串列,計算出當中可靠度最高的推文
"""
def __init__(self,segLib="Taiba"):
#FIXME 若「線上版本」受記憶體容量限制,需考慮更換為 jieba!
super().__init__(segLib)
self.responses = []
self.segResponses = []
self.totalWords = 0
self.path = os.path.dirname(__file__)
self.debugLog = open(self.path + "/data/EvaluateLog.txt",'w',encoding="utf-8")
self.filteredWords = set() # 必須濾除的回應
self.counterDictionary = defaultdict(int) # 用於統計詞頻
self.tokenDictionary = None # 用於分配詞 id,與建置詞袋
# 中文停用詞與特殊符號加載
self.loadStopWords(path=self.path + "/data/stopwords/chinese_sw.txt")
self.loadStop | Words(path=self.path + "/data/stopwords/specialMarks.txt")
self.loadFilterdWord(path=self.path + "/data/stopwords/ptt_words.txt")
def cleanFormerResult(self):
"""
清空之前回應留下的紀錄
"""
self.responses = []
self.seg | Responses = []
self.totalWords = 0
def getBestResponse(self, responses, topk, debugMode=False):
"""
從 self.responses 中挑選出可靠度前 K 高的回應回傳
Return : List of (reply,grade)
"""
self.cleanFormerResult()
self.buildResponses(responses)
self.segmentResponse()
self.buildCounterDictionary()
candiateList = self.evaluateByGrade(topk, debug=debugMode)
return candiateList
def loadFilterdWord(self,path):
with open(path, 'r', encoding='utf-8') as sw:
for word in sw:
self.filteredWords.add(word.strip('\n'))
def buildResponses(self, responses):
"""
將 json 格式中目前用不上的 user,vote 去除,只留下 Content
"""
self.responses = []
for response in responses:
clean = True
r = response["Content"]
for word in self.filteredWords:
if word in r:
clean = False
if clean:
self.responses.append(response["Content"])
def segmentResponse(self):
"""
對 self.responses 中所有的回應斷詞並去除中文停用詞,儲存於 self.segResponses
"""
self.segResponses = []
for response in self.responses:
keywordResponse = [keyword for keyword in self.wordSegmentation(response)
if keyword not in self.stopwords
and keyword != ' ']
self.totalWords += len(keywordResponse)
self.segResponses.append(keywordResponse)
#logging.info("已完成回應斷詞")
def buildCounterDictionary(self):
"""
統計 self.segResponses 中每個詞出現的次數
"""
for reply in self.segResponses:
for word in reply:
self.counterDictionary[word] += 1
#logging.info("計數字典建置完成")
def buildTokenDictionary(self):
"""
為 self.segResponses 中的詞配置一個 id
"""
self.tokenDictionary = corpora.Dictionary(self.segResponses)
logging.info("詞袋字典建置完成,%s" % str(self.tokenDictionary))
def evaluateByGrade(self,topk,debug=False):
"""
依照每個詞出現的在該文件出現的情形,給予每個回覆一個分數
若該回覆包含越多高詞頻的詞,其得分越高
Args:
- 若 debug 為 True,列出每筆評論的評分與斷詞情形
Return: (BestResponse,Grade)
- BestResponse: 得分最高的回覆
- Grade: 該回覆獲得的分數
"""
bestResponse = ""
candiates = []
avgWords = self.totalWords/len(self.segResponses)
for i in range(0, len(self.segResponses)):
wordCount = len(self.segResponses[i])
sourceCount = len(self.responses[i])
meanful = 0
if wordCount == 0 or sourceCount > 24:
continue
cur_grade = 0.
for word in self.segResponses[i]:
wordWeight = self.counterDictionary[word]
if wordWeight > 1:
meanful += math.log(wordWeight,10)
cur_grade += wordWeight
cur_grade = cur_grade * meanful / (math.log(len(self.segResponses[i])+1,avgWords) + 1)
candiates.append([self.responses[i],cur_grade])
if debug:
result = self.responses[i] + '\t' + str(self.segResponses[i]) + '\t' + str(cur_grade)
self.debugLog.write(result+'\n')
print(result)
candiates = sorted(candiates,key=lambda candiate:candiate[1],reverse=True)
return candiates[:topk]
class ClusteringEvaluator(Evaluator):
"""
基於聚類評比推文可靠度
"""
pass
|
IgniparousTempest/py-minutiae-viewer | pyminutiaeviewer/gui_editor.py | Python | mit | 5,203 | 0.002306 | import math
from pathlib import Path
from tkinter import W, N, E, StringVar, PhotoImage
from tkinter.ttk import Button, Label, LabelFrame
from overrides import overrides
from pyminutiaeviewer.gui_common import NotebookTabBase
from pyminutiaeviewer.minutia import Minutia, MinutiaType
class MinutiaeEditorFrame(NotebookTabBase):
# TODO: I'd like to remove the <minutiae> parameter
def __init__(self, parent, load_fingerprint_func, load_minutiae_func, save_minutiae_file):
super(self.__class__, self).__init__(parent, load_fingerprint_func)
self.root = parent
self.minutiae_count = StringVar()
self._update_minutiae_count()
self.current_minutiae = None
self.load_minutiae_btn = Button(self, text="Load Minutiae", command=load_minutiae_func)
self.load_minutiae_btn.grid(row=1, column=0, sticky=N + W + E)
self.export_minutiae_btn = Button(self, text="Export Minutiae", command=save_minutiae_file)
self.export_minutiae_btn.grid(row=2, column=0, sticky=N + W + E)
self.info_frame = InfoFrame(self, "Info", self.minutiae_count)
self.info_frame.grid(row=3, column=0, padx=4, sticky=N + W + E)
@overrides
def load_fingerprint_image(self, image):
self._update_minutiae_count()
@overrides
def load_minutiae_file(self):
self._update_minutiae_count()
def _update_minutiae_count(self):
self.minutiae_count.set("Minutiae: {}".format(self.root.number_of_minutiae()))
@overrides
def on_canvas_mouse_left_click(self, event):
"""
Adds a new bifurcation at the mouse click.
"""
x, y = event.x, event.y
if not self.root.is_point_in_canvas_image(x, y):
return
self.current_minutiae = ((x, y), MinutiaType.RIDGE_ENDING)
@overrides
def on_canvas_ctrl_mouse_left_click(self, event):
"""
Adds a new ridge ending at the mouse click.
"""
x, y = event.x, event.y
if not self.root.is_point_in_canvas_image(x, y):
return
self.current_minutiae = ((x, y), MinutiaType.BIFURCATION)
@overrides
def on_canvas_mouse_right_click(self, event):
"""
Removes a minutiae close to the mouse click.
"""
x, y = event.x, event.y
if not self.root.is_point_in_canvas_image(x, y):
return
scale_factor = self.root.canvas_image_scale_factor()
x, y = x * scale_factor, y * scale_factor
possible_minutiae = []
for i in range(self.root.number_of_minutiae()):
m = self.root.minutiae[i]
dist = abs(m.x - x) + abs(m.y - y)
if dist < 10:
possible_minutiae.append((dist, i))
# Sort ascending, in-place.
possible_minutiae.sort(key=lambda tup: tup[0])
if len(possible_minutiae) == 0:
return
else:
del self.root.minutiae[possible_minutiae[0][1]]
self.root.draw_minutiae()
self._update_minutiae_count()
@overrides
def on_canvas_mouse_left_drag(self, event):
"""
Sets the angle of the minutiae being placed.
"""
x, y = event.x, event.y
((sx, sy), minutiae_type) = self.current_minutiae
angle = math.degrees(math.atan2(y - sy, x - sx)) + 90
minutia = Minutia(round(sx), round(sy), angle, minutiae_type, 1.0)
self.root.draw_single_minutia(minutia)
@overrides
def on_canvas_mouse_left_release(self, event):
"""
Places the minutiae currently being edited..
"""
x, y = event.x, event.y
scale_factor = self.root.canvas_image_scale_factor()
((px, py), minutiae_type) = self.current_minutiae
angle = math.degrees(math.atan2(y - py, x - px)) + 9 | 0
self.root.minutiae.appen | d(Minutia(round(px * scale_factor), round(py * scale_factor), angle, minutiae_type, 1.0))
self.current_minutiae = None
self.root.draw_minutiae()
self._update_minutiae_count()
class InfoFrame(LabelFrame):
def __init__(self, parent, title, minutiae_count):
super(self.__class__, self).__init__(parent, text=title)
self.current_number_minutiae_label = Label(self, textvariable=minutiae_count)
self.current_number_minutiae_label.grid(row=0, column=0, sticky=N + W + E)
self.bifurcation_label = Label(self, text="Bifurcation (LMB):")
self.bifurcation_label.grid(row=1, column=0, sticky=W)
self.bifurcation_image = PhotoImage(file=Path(__file__).resolve().parent / 'images' / 'bifurcation.png')
self.bifurcation_image_label = Label(self, image=self.bifurcation_image)
self.bifurcation_image_label.grid(row=2, column=0, sticky=W)
self.ridge_ending_label = Label(self, text="Ridge Ending (CTRL + LMB):")
self.ridge_ending_label.grid(row=3, column=0, sticky=W)
self.ridge_ending_image = PhotoImage(file=Path(__file__).resolve().parent / 'images' / 'ridge_ending.png')
self.ridge_ending_image_label = Label(self, image=self.ridge_ending_image)
self.ridge_ending_image_label.grid(row=4, column=0, sticky=W)
|
popeye123/studip-sync | studip_sync/parsers.py | Python | unlicense | 8,325 | 0.002282 | import cgi
import json
import re
import urllib.parse
from bs4 import BeautifulSoup
class ParserError(Exception):
pass
def extract_files_flat_last_edit(html):
def extract_json(s):
form = s.find('form', id="files_table_form")
if not form:
raise ParserError("last_edit: files_table_form not found")
if "data-files" not in form.attrs:
raise ParserError("last_edit: Missing data-files attribute in form")
form_data_files = json.loads(form.attrs["data-files"])
file_timestamps = []
for file_data in form_data_files:
if "chdate" not in file_data:
raise ParserError("last_edit: No chdate: " + str(file_data.keys()))
file_timestamps.append(file_data["chdate"])
if len(file_timestamps) > 0:
return max(file_timestamps)
else:
return 0
def extract_html_table(s):
for form in s.find_all('form'):
if 'action' in form.attrs:
tds = form.find('table').find('tbody').find_all('tr')[0].find_all('td')
if len(tds) == 8:
td = tds[6]
if 'data-sort-value' in td.attrs:
try:
return int(td.attrs['data-sort-value'])
except:
raise ParserError("last_edit: Couldn't convert data-sort-value to int")
else:
raise ParserError(
"last_edit: Couldn't find td object with data-sort-value")
elif len(tds) == 1 and "Keine Dateien vorhanden." in str(tds[0]):
return 0 # No files, so no information when was the last time a file was edited
else:
raise ParserError("last_edit: row doesn't have expected length of cells")
raise ParserError("last_edit: Found no valid form")
soup = BeautifulSoup(html, 'lxml')
func_attempts = [extract_json, extract_html_table]
for func_attempt in func_attempts:
try:
return func_attempt(soup)
except ParserError:
continue
# Debug statement to identify parser errors
print("----------- DEBUG -----------")
print(html)
raise ParserError("last_edit: all attempts to extract last edit failed")
def extract_files_index_data(html):
soup = BeautifulSoup(html, 'lxml')
form = soup.find('form', id="files_table_form")
if "data-files" not in form.attrs:
raise ParserError("index_data: Missing data-files attribute in form")
if "data-folders" not in form.attrs:
raise ParserError("index_data: Missing data-folders attribute in form")
form_data_files = json.loads(form["data-files"])
form_data_folders = json.loads(form["data-folders"])
return form_data_files, form_data_folders
def extract_parent_folder_id(html):
soup = BeautifulSoup(html, 'lxml')
folder_ids = soup.find_all(attrs={"name": "parent_folder_id"})
if len(folder_ids) != 1:
raise ParserError("Could not find parent folder ID")
return folder_ids.pop().attrs.get("value", "")
def extr | act_csrf_token(html):
soup = BeautifulSoup(html, 'lxml')
tokens = soup.find_all("input", attrs={"name": "security_token"})
if len(tokens) < 1:
raise ParserError("Could not find CSRF token")
return tokens.pop().attrs.get("value", "")
def extract_courses(html, only_recen | t_semester):
soup = BeautifulSoup(html, 'lxml')
div = soup.find("div", id="my_seminars")
tables = div.find_all("table")
for i in range(0, len(tables)):
if only_recent_semester and i > 0:
continue
j = len(tables) - i
table = tables[i]
semester = table.find("caption").string.strip()
matcher = re.compile(
r"https://.*seminar_main.php\?auswahl=[0-9a-f]*$")
links = table.find_all("a", href=matcher)
for link in links:
href = link.attrs.get("href", "")
query = urllib.parse.urlsplit(href).query
course_id = urllib.parse.parse_qs(query).get("auswahl", [""])[0]
save_as = re.sub(r"\s\s+", " ", link.get_text(strip=True))
save_as = save_as.replace("/", "--")
yield {
"course_id": course_id,
"save_as": save_as,
"semester": semester,
"semester_id": j
}
def extract_media_list(html):
soup = BeautifulSoup(html, 'lxml')
media_files = []
for table in soup.find_all("table", class_="media-table"):
if "id" not in table.attrs:
raise ParserError("media_list: 'id' is missing from table")
media_hash = table["id"]
a_element_overlay_curtain = table.select_one("div.overlay-curtain > a")
a_element_media_table_infos = table.select_one("div.media-table-infos > div > a")
if not a_element_media_table_infos:
raise ParserError("media_list: a_element_media_table_infos is missing")
if "href" not in a_element_media_table_infos.attrs:
raise ParserError("media_list: 'href' is missing from a_element_media_table_infos")
media_url = a_element_media_table_infos["href"]
if not media_hash or not media_url:
raise ParserError("media_list: hash or url is empty")
media_files.append({
"hash": media_hash,
"media_url": media_url,
"type": "direct_download" if a_element_overlay_curtain is None else "player"
})
return media_files
def extract_media_best_download_link(html):
def extract_table(_, s):
download_options = s.select("table#dllist tr td")
if not download_options or len(download_options) <= 1:
raise ParserError("media_download_link: No download options found")
# Always select the first result as the best result
# (skip first "Download" td, so instead of 0 select 1)
download_td = download_options[1]
download_a = download_td.find("a")
if "href" not in download_a.attrs:
raise ParserError("media_download_link: href is missing from download_a")
return download_a["href"]
def extract_iframe(_, s):
iframe = s.find("iframe", id="framed_player")
if not iframe:
raise ParserError("media_download_link: No iframe found")
if "src" not in iframe.attrs:
raise ParserError("media_download_link: src is missing from iframe")
return iframe.attrs["src"]
def extract_video(_, s):
video = s.find("video", id="mediaplayer_html5_api")
if not video:
raise ParserError("media_download_link: No video item found")
if "src" not in video.attrs:
raise ParserError("media_download_link: src is missing from video item")
return video.attrs["src"]
def extract_video_regex(h, _):
matcher = re.compile(r"/plugins.php/mediacastplugin/media/check/.+\.mp4")
links = matcher.findall(h)
if len(links) < 1:
raise ParserError("media_download_link: links < 1")
return links[len(links) - 1]
soup = BeautifulSoup(html, 'lxml')
func_attempts = [extract_table, extract_iframe, extract_video, extract_video_regex]
for func_attempt in func_attempts:
try:
return func_attempt(html, soup)
except ParserError:
continue
# Debug statement to identify parser errors
print("----------- DEBUG -----------")
print(html)
raise ParserError("media_download_link: all attempts to extract url failed")
def extract_filename_from_headers(headers):
if "Content-Disposition" not in headers:
raise ParserError(
"media_filename_headers: \"Content-Disposition\" is missing")
content_disposition = headers["Content-Disposition"]
header_value, header_params = cgi.parse_header(content_disposition)
if "filename" not in header_params:
raise ParserError("media_filename_headers: \"filename\" is missing")
if hea |
dsandersAzure/python_cowbull_server | Game/GameMode.py | Python | apache-2.0 | 9,034 | 0.00476 | from python_cowbull_server import error_handler
from flask_helpers.check_kwargs import check_kwargs
class GameMode(object):
"""
A representation of a game mode (complexity, number of digits, guesses allowed, etc.). The
mode contains the following information:
* mode <str> A text name for the mode.
* priority <int> An integer of the priority in terms of returning a list.
* digits <int> An integer representing the number of digits used in this mode.
* digit_type <int> An integer representing the type of digit used, e.g. Hex or Digit
* guesses_allowed <int> An integer representing the number of guesses that can be made
* instruction_text <str> A free form string for instructions on the mode
* help_text <str> A free form string offering help text for the mode
"""
# Simplify method as per http://sonarqube:9000/project/issues?id=cowbull_server&issues=AWiRMJ-OaAhZ-jY-ujHk&open=AWiRMJ-OaAhZ-jY-ujHk
def __init__(
self,
**kwargs
):
"""
Constructor to create a new mode.
:param mode: <str> A text name for the mode.
:param priority: <int> priority of modes (in terms of returning a list)
:param digits: <int> number of digits used in this mode.
:param digit_type: <int> type of digit, e.g. DigitWord.HEXDIGIT or DigitWord.DIGIT
:param guesses_allowed: <int> Number of guesses permitted.
:param instruction_text: <str> Instruction text (dependent upon caller to show)
:param help_text: <str> Help text (dependent upon caller to show)
"""
check_kwargs(
parameter_list=[
"mode",
"priority",
"digits",
"digit_type",
"guesses_allowed",
"instruction_text",
"help_text"
],
caller="GameMode__init__",
**kwargs
)
mode=kwargs.get("mode", None)
priority=kwargs.get("priority", None)
digits=kwargs.get("digits", None)
digit_type=kwargs.get("digit_type", None)
guesses_allowed=kwargs.get("guesses_allowed", None)
instruction_text=kwargs.get("instruction_text", None)
help_text=kwargs.get("help_text", None)
# execute_load error handler
# self.handler = ErrorHandler(module="GameMode", method="__init__")
self.handler = error_handler
self.handler.module = "GameMode"
self.handler.method = "__init__"
# Initialize variables
self.handler.log(message="Initializing variables")
self._mode = None
self._priority = None
self._digits = None
self._digit_type = None
self._guesses_allowed = None
self._instruction_text = None
self._help_text = None
# NOTICE: Properties are used to set 'private' fields (e.g. _mode) to handle
# data validation in one place. When adding a new parameter to __init__ ensure
# that the property is created (following the existing code) and set the
# property not the 'internal' variable.
#
self.handler.log(message="Creating mode {}".format(mode))
self.mode = mode
self.priority = priority
self.digits = digits
self.digit_type = digit_type
self.guesses_allowed = guesses_allowed
self.instruction_text = instruction_text
self.help_text = help_text
self.handler.log(message="Mode {} created: {}".format(mode, self.dump()))
#
# Overrides
#
def __str__(self):
"""
Override of __str__ method.
:return: <str> representation of the GameMode
"""
return str(self.dump())
def __repr__(self):
"""
Override of __repr__ method.
:return: <str> representation of object showing mode name
"""
return "<GameObject: mode: {}>".format(self._mode)
#
# Properties
#
@property
def mode(self):
"""
The name of the mode.
:return: <str>
"""
return self._mode
@mode.setter
def mode(self, value):
self._mode = self._property_setter(
keyword="mode", required=True, datatype=str, value=value
)
@property
def priority(self):
"""
The priority of the mode when collected in a list. For example: priority 10 is less than 20,
so 10 will come before 20 in a list of GameMode objects.
This is useful because other modules might return a sorted list of GameMode objects to their
callers and priority provides a simple means to sort and sequence a collection of GameMode
objects.
:return: <int>
"""
return self._priority
@priority.setter
def priority(self, value):
self._priority = self._property_setter(
keyword="priority", required=True, datatype=int, value=value
)
@property
def digits(self):
"""
The number of digits used by the DigitWord used in this mode; e.g. a value of 3 would
indicate there are three digits (e.g. 1, 2, and 3), while a value of 5 would indicate
five values (e.g. 0, 1, 2, 3, 4).
:return: <int>
"""
return self._digits
@digits.setter
def digits(self, value):
self._digits = self._property_setter(
keyword="digits", required=False, default=4, datatype=int, value=value
)
@property
def digit_type(self):
"""
The digit_type is a flag used to specify the type of digit to be used; for example, a
digit (DigitWord.DIGIT) enables a single digit between 0 and 9, while a hex digit
(DigitWord.HEXDIGIT) enables a single digit between 0 and F.
:return | : <int>
"""
return self._digit_type
@digit_type.setter
def digit_type(sel | f, value):
self._digit_type = self._property_setter(
keyword="digit_type", required=False, default=0, datatype=int, value=value
)
@property
def guesses_allowed(self):
"""
The number of guesses the mode is allowed; for example an easy mode might allow
20 guesses while a hard mode only allowed 7.
:return: <int>
"""
return self._guesses_allowed
@guesses_allowed.setter
def guesses_allowed(self, value):
self._guesses_allowed = self._property_setter(
keyword="guesses_allowed", required=False, default=10, datatype=int, value=value
)
@property
def instruction_text(self):
"""
Instructions on how to use the mode (if present).
:return: <str>
"""
return self._instruction_text
@instruction_text.setter
def instruction_text(self, value):
self._instruction_text = self._property_setter(
keyword="instruction_text", required=False, datatype=str, value=value
)
@property
def help_text(self):
"""
Help text intended to guide the user on how to use and interact with the game
mode.
:return: <str>
"""
return self._help_text
@help_text.setter
def help_text(self, value):
self._help_text = self._property_setter(
keyword="help_text", required=False, datatype=str, value=value
)
#
# 'public' methods
#
def dump(self):
"""
Dump (convert to a dict) the GameMode object
:return: <dict>
"""
return {
"mode": self._mode,
"priority": self._priority,
"digits": self._digits,
"digit_type": self._digit_type,
"guesses_allowed": self._guesses_allowed,
"instruction_text": self._instruction_text,
"help_text": self._help_text
}
#
# 'private' methods
#
def _property_setter(
self,
keyword=None,
required=None,
default=None,
datatype=None,
value=None,
): |
plotly/plotly.py | packages/python/plotly/plotly/validators/image/hoverlabel/font/_colorsrc.py | Python | mit | 424 | 0.002358 | impor | t _plotly_utils.basevalidators
class Co | lorsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="colorsrc", parent_name="image.hoverlabel.font", **kwargs
):
super(ColorsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs
)
|
mrtommyb/AstroFuckOffs | bot/tests/test_bot.py | Python | mit | 1,600 | 0.005009 | import os
import json
import pytest
from .. import bot, PACKAGEDIR
EXAMPLE_TWEET = json.load(open(os.path.join(PACKAGEDIR, 'tests', 'examples', 'example-tweet.json'), 'r'))
EXAMPLE_RETWEET = json.load(open(os.path.join(PACKAGEDIR, 'tests', 'examples', 'retweeted-status.json'), 'r'))
EXAMPLE_NARCISSISTIC = json.load(open(os.path.join(PACKAGEDIR, 'tests', 'examples', 'narcissistic-tweet.json'), 'r'))
TESTDB = 'test_goldstar.db'
def test_recipients():
handler = bot.TweetHandler(EXAMPLE_TWEET, dbfile=TESTDB, dry_run=True)
recipients = handler.get_recipients()
assert len(recipients) == 1
assert recipients[0]['screen_name'] == 'exoplaneteer'
def test_responses():
handler = bot.TweetHandler(EXAMPLE_TWEET, dbfile=TESTDB, dry_run=True)
responses = handler.handle()
assert len(responses) == 1 # only 1 star handed out
assert len(responses[0]) < 140 # max tweet length
assert responses[0] == '@exoplaneteer Congratulations, you just earned a 🌟 from @GeertHub! Your total is 1. https://twitter.com/GeertHub/status/745616020581265408'
def test_retweet():
"""A retweet should not result in a star!"""
with pytest.raises(bot.InvalidTweetException):
handler = bot.TweetHandler(EXAMPLE_RETWEET, dbfile=TESTDB, dry_run=True)
def test_narcisstic():
"""Don't allow people to give stars to themselves!"""
handler = bot.TweetHandler(EXAMPLE_NARCISSISTIC, dbfile=TESTDB, dry_run=True)
responses = handler.handle()
assert len( | responses) == 1
assert resp | onses[0] == "@exoplaneteer I'm sorry, Dan. I'm afraid I can't do that."
|
ept/windmill | windmill/server/https.py | Python | apache-2.0 | 17,211 | 0.003428 | # Copyright (c) 2009 Canonical Ltd.
# Copyright (c) 2009 Mikeal Rogers <mikeal.rogers@gmail.com>
# Copyright (c) 2009 Domen Kozar <domen@dev.si>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Contributor: Anthony Lenton <anthony.lenton@canonical.com>
"""
This module provides an SSL-enabled HTTP server, WindmillChooserApplication
and WindmillProxyApplication that are drop-in replacements for the standard
non-ssl-enabled ones.
"""
import time
import socket
import select
import urllib
import SocketServer
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from StringIO import StringIO
from proxy import WindmillProxyApplication
from httplib import HTTPConnection, HTTPException
import traceback
import sys
import windmill
if not sys.version.startswith('2.4'):
from urlparse import urlparse, urlunparse
else:
# python 2.4
from windmill.tools.urlparse_25 import urlparse, urlunparse
import logging
logger = logging.getLogger(__name__)
try:
import ssl # python 2.6
_ssl_wrap_socket = ssl.wrap_socket
_socket_create_connection = socket.create_connection
except ImportError:
# python 2.5
if windmill.has_ssl:
from OpenSSL import SSL
from httplib import FakeSocket
class BetterFakeSocket(FakeSocket):
""" A FakeSocket that implements sendall and
handles exceptions better
"""
class _closedsocket:
def __nonzero__(self):
return False
def __getattr__(self, name):
if name == '__nonzero__':
return self.__nonzero__
raise HTTPException(9, 'Bad file descriptor')
def __init__(self, sock, ssl):
FakeSocket.__init__(self, sock, ssl)
self.ok = True
def sendall(self, data):
if self.ok:
try:
self._ssl.sendall(data)
except SSL.SysCallError, err:
self.ok = False
print err
def recv(self, len = 1024, flags = 0):
if self.ok:
try:
result = self._ssl.read(len)
except SSL.SysCallError, err:
self.ok = False
print err
return result
def _ssl_verify_peer(conection, certificate, errnum, depth, ok):
return True
def _ssl_wrap_socket(sock, certfile=None,
server_side=False, ca_certs=None,
do_handshake_on_connect=True,
suppress_ragged_eofs=True):
ctx = SSL.Context(SSL.SSLv23_METHOD)
if certfile is not None:
ctx.use_privatekey_file(certfile)
try:
ctx.use_certificate_file(certfile)
except: pass
ctx.load_verify_locations(certfile)
ctx.set_verify(SSL.VERIFY_NONE, _ssl_verify_peer)
ssl_sock = SSL.Connection(ctx, sock)
if server_side:
ssl_sock.set_accept_state()
else:
ssl_sock.set_connect_state()
return BetterFakeSocket(sock, ssl_sock)
def _socket_create_connection(address, timeout=None):
if timeout is None:
timeout = socket.getdefaulttimeout()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
oldtimeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
sock.connect(address)
socket.setdefaulttimeout(oldtimeout)
return sock
class WindmillHTTPRequestHandler(SocketServer.ThreadingMixIn, BaseHTTPRequestHandler):
def __init__(self, request, client_address, server):
self.headers_set = []
self.headers_sent = []
self.header_buffer = ''
BaseHTTPRequestHandler.__init__(self, request, client_address,
server)
def _sock_connect_to(self, netloc, soc):
"""Parse netloc string and establish connection on socket."""
host_port = netloc.split(':', 1)
if len(host_port) == 1:
host_port.append(80)
# establish connection or else write 404 and fail the function
try:
soc.connect(host_port)
except socket.error, msg:
| self.send_error(404, msg)
| return False
return True
def do_CONNECT(self):
""" Handle CONNECT commands. Just set up SSL and restart. """
request = None
try:
try:
self.log_request(200)
self.wfile.write(self.protocol_version +
' 200 Connection established\r\n')
self.wfile.write('Proxy-agent: %s\r\n' % self.version_string())
self.wfile.write('\r\n')
if not windmill.has_ssl:
return
request = self.connection
connstream = _ssl_wrap_socket(self.connection,
server_side=True,
certfile=self.server.cert_creator[self.path].certfile)
self.request = connstream
# And here we go again!
# setup...
self.base_path = 'https://' + self.path
if self.base_path.endswith(':443'):
self.base_path = self.base_path[:-4]
self.connection = self.request
self.rfile = socket._fileobject(self.connection, 'rb', self.rbufsize)
self.wfile = socket._fileobject(self.connection, 'wb', self.wbufsize)
# handle...
try:
self.handle()
self.finish()
finally:
sys.exc_traceback = None # Help garbage collection
#self.connection.close()
except socket.error, err:
logger.debug("%s while serving (%s) %s" % (err,
self.command, self.path))
finally:
if request is not None:
request.close()
def handle_ALL(self):
namespaces = self.server.namespaces
proxy = self.server.proxy
found = None
path = self.path.split('?', 1)[0]
for key in namespaces:
if path.find('/'+key+'/') is not -1:
found = key
environ = self.get_environ()
result = namespaces[found](environ, self.start_response)
break
else:
found = None
environ = self.get_environ()
result = proxy(environ, self.start_response)
# == Old blocking code ==
# out = list(result)
# # send data back to browser
# try:
# self.write(''.join(out))
# except socket.error, err:
# logger.debug("%s while serving (%s) %s" % (err,
# self.command, self.path))
# == New non-blocking code ==
try:
for out in result:
self.write(out)
except socket.error, err:
logger.debug("%s while serving (%s) %s" % (err,self.command, self.path))
self.wfile.flush()
self.connection.close()
do_GET = handle_ALL
do_POST = handle_ALL
def start_response(self, status, headers, exc_info=None):
if exc_info:
try:
if self.headers_sent:
raise exc_info[0], exc_info[1], exc_info[2]
finally:
exc_info = None
elif self.headers_set:
raise AssertionError("Headers al |
solintegra/addons | sale/wizard/sale_line_invoice.py | Python | agpl-3.0 | 6,126 | 0.002775 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
from openerp.tools.translate import _
from openerp import workflow
class sale_order_line_make_invoice(osv.osv_memory):
_name = "sale.order.line.make.invoice"
_description = "Sale OrderLine Make_invoice"
def _prepare_invoice(self, cr, uid, order, lines, context=None):
a = order.partner_id.property_account_receivable.id
if order.partner_id and order.partner_id.property_payment_term.id:
pay_term = order.partner_id.property_payment_term.id
else:
pay_term = False
return {
'name': order.client_order_ref or '',
'origin': order.name,
'type': 'out_invoice',
'reference': "P%dSO%d" % (order.partner_id.id, order.id),
'account_id': a,
'partner_id': order.partner_invoice_id.id,
'invoice_line': [(6, 0, lines)],
'currency_id' : order.pricelist_id.currency_id.id,
'comment': order.note,
'payment_term': pay_term,
'fiscal_position': order.fiscal_position.id or order.partner_id.property_account_position.id,
'user_id': order.user_id and order.user_id.id or False,
'company_id': order.company_id and order.company_id.id or False,
'date_invoice': fields.date.today(),
'section_id': order.section_id.id,
}
def make_invoices(self, cr, uid, ids, context=None):
"""
To make invoices.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: the ID or list of IDs
@param context: A standard dictionary
@return: A dictionary which of fields with values.
"""
if context is None: context = {}
res = False
invoices = {}
#TODO: merge with sale.py/make_invoice
def make_invoice(order, lines):
"""
To make invoices.
@param order:
@param lines:
@return:
"""
inv = self._prepare_invoice(cr, uid, order, lines)
inv_id = self.pool.get('account.invoice').create(cr, uid, inv)
return inv_id
sales_order_line_obj = self.pool.get('sale.order.line')
sales_order_obj = self.pool.get('sale.order')
for line in sales_order_line_obj.browse(cr, uid, context.get('active_ids', []), context=context):
if (not line.invoiced) and (line.state not in ('draft', 'cancel')):
if not line.order_id in invoices:
invoices[line.order_id] = []
line_id = sales_order_line_obj.invoice_line_create(cr, uid, [line.id])
for lid in line_id:
invoices[line.order_id].append(lid)
for order, il in invoices.items():
res = make_invoice(order, il)
cr.execute('INSERT INTO sale_order_invoice_rel \
(order_id,invoice_id) values (%s,%s)', (order.id, res))
sales_order_obj.invalidate_cache(cr, uid, ['invoice_ids'], [order.id], context=context)
flag = True
sales_order_obj.message_post(cr, uid, [order.id], body=_("Invoice created"), context=context)
data_sale = sales_order_obj.browse(cr, uid, order.id, context=context)
for line in data_sale.order_line:
if not line.invoiced and line.state != 'cancel':
flag = False
break
if flag:
line.order_id.write({'state': 'progress'})
workflow.trg_validate(uid, 'sale.order', order.id, 'all_lines', cr)
if not invoices:
raise osv.except_osv(_('Warning!'), _('Invoice cannot be created for this Sales Order Line due to one of the following reasons:\n1.The state of this sales order line is either "draft" or "cancel"!\n2.The Sales Order Line is Invoiced!'))
if context.get('open_invoices', False):
return self.open_invoices(cr, uid, ids, res, context=context)
return {'type': 'ir.actions.act_window_close'}
def open_invoices(self, cr, uid, ids, invoice_ids, context=None):
""" open a view on one of the given invoice_ids """
ir_model_data = self.pool.get('ir.model.data')
form_res = ir_model_data.get_object_reference(cr, uid, 'account', 'invoice_form')
form_id = form_res and form_res[1] or False
tree_res = ir_model_data.get_object_referen | ce(cr, uid, 'account', 'invoice_tree')
tree_id = tree_res | and tree_res[1] or False
return {
'name': _('Invoice'),
'view_type': 'form',
'view_mode': 'form,tree',
'res_model': 'account.invoice',
'res_id': invoice_ids,
'view_id': False,
'views': [(form_id, 'form'), (tree_id, 'tree')],
'context': {'type': 'out_invoice'},
'type': 'ir.actions.act_window',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
OCA/social | mass_mailing_subscription_email/tests/test_unsubscribe_from_list.py | Python | agpl-3.0 | 1,590 | 0.001259 | # Copyright 2022 Camptocamp SA (https://www.camptocamp.com).
# @author Iván Todorovich <ivan.todorovich@camptocamp.com>
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from lxml import etree
from odoo.tests import HttpCase, tagged
from odoo.addons.mail.tests.common import MockEmail
@tagged("-at_install", "post_install")
class WebsiteSaleHttpCase(HttpCase, MockEmail):
def setUp(self):
super().setUp()
self.env = self.env(context=dict(self.env.context, tracking_disable=True))
self.mailing_list = self.env.ref("mass_mailing. | mailing_list_data")
self.mailing_contact = self.env["mailing.contact"].create(
{
"name": "John Doe",
"email": "john.doe@example.com",
}
)
def test_subscription_email_unsubscribe_from_list(self):
# Create subscription
with self.mock_mail_gateway():
subs = self.env["mailing.contact.subscription"].create(
{
"contact_id": self.mailing_conta | ct.id,
"list_id": self.mailing_list.id,
}
)
body = self._new_mails._send_prepare_values()["body"]
root = etree.fromstring(body, etree.HTMLParser())
anchor = root.xpath("//a[@href]")[0]
unsubscribe_url = anchor.attrib["href"]
web_base_url = self.env["ir.config_parameter"].sudo().get_param("web.base.url")
self.url_open(unsubscribe_url.replace(web_base_url, ""))
subs.invalidate_cache()
self.assertEqual(subs.opt_out, True)
|
m00dawg/holland | plugins/holland.backup.mysql_lvm/holland/backup/mysql_lvm/plugin/mysqldump/__init__.py | Python | bsd-3-clause | 38 | 0 | from pl | ugin impo | rt MysqlDumpLVMBackup
|
fossoult/odoo | openerp/addons/base/ir/ir_sequence.py | Python | agpl-3.0 | 14,812 | 0.005198 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# | GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import time
import openerp
from openerp.osv import osv
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
class ir_sequence_type(openerp.osv.osv.osv):
_name | = 'ir.sequence.type'
_order = 'name'
_columns = {
'name': openerp.osv.fields.char('Name', required=True),
'code': openerp.osv.fields.char('Code', size=32, required=True),
}
_sql_constraints = [
('code_unique', 'unique(code)', '`code` must be unique.'),
]
def _code_get(self, cr, uid, context=None):
cr.execute('select code, name from ir_sequence_type')
return cr.fetchall()
class ir_sequence(openerp.osv.osv.osv):
""" Sequence model.
The sequence model allows to define and use so-called sequence objects.
Such objects are used to generate unique identifiers in a transaction-safe
way.
"""
_name = 'ir.sequence'
_order = 'name'
def _get_number_next_actual(self, cr, user, ids, field_name, arg, context=None):
'''Return number from ir_sequence row when no_gap implementation,
and number from postgres sequence when standard implementation.'''
res = dict.fromkeys(ids)
for element in self.browse(cr, user, ids, context=context):
if element.implementation != 'standard':
res[element.id] = element.number_next
else:
# get number from postgres sequence. Cannot use
# currval, because that might give an error when
# not having used nextval before.
statement = (
"SELECT last_value, increment_by, is_called"
" FROM ir_sequence_%03d"
% element.id)
cr.execute(statement)
(last_value, increment_by, is_called) = cr.fetchone()
if is_called:
res[element.id] = last_value + increment_by
else:
res[element.id] = last_value
return res
def _set_number_next_actual(self, cr, uid, id, name, value, args=None, context=None):
return self.write(cr, uid, id, {'number_next': value or 0}, context=context)
_columns = {
'name': openerp.osv.fields.char('Name', size=64, required=True),
'code': openerp.osv.fields.selection(_code_get, 'Sequence Type', size=64),
'implementation': openerp.osv.fields.selection( # TODO update the view
[('standard', 'Standard'), ('no_gap', 'No gap')],
'Implementation', required=True,
help="Two sequence object implementations are offered: Standard "
"and 'No gap'. The later is slower than the former but forbids any"
" gap in the sequence (while they are possible in the former)."),
'active': openerp.osv.fields.boolean('Active'),
'prefix': openerp.osv.fields.char('Prefix', help="Prefix value of the record for the sequence"),
'suffix': openerp.osv.fields.char('Suffix', help="Suffix value of the record for the sequence"),
'number_next': openerp.osv.fields.integer('Next Number', required=True, help="Next number of this sequence"),
'number_next_actual': openerp.osv.fields.function(_get_number_next_actual, fnct_inv=_set_number_next_actual, type='integer', required=True, string='Next Number', help='Next number that will be used. This number can be incremented frequently so the displayed value might already be obsolete'),
'number_increment': openerp.osv.fields.integer('Increment Number', required=True, help="The next number of the sequence will be incremented by this number"),
'padding' : openerp.osv.fields.integer('Number Padding', required=True, help="Odoo will automatically adds some '0' on the left of the 'Next Number' to get the required padding size."),
'company_id': openerp.osv.fields.many2one('res.company', 'Company'),
}
_defaults = {
'implementation': 'standard',
'active': True,
'company_id': lambda s,cr,uid,c: s.pool.get('res.company')._company_default_get(cr, uid, 'ir.sequence', context=c),
'number_increment': 1,
'number_next': 1,
'number_next_actual': 1,
'padding' : 0,
}
def init(self, cr):
return # Don't do the following index yet.
# CONSTRAINT/UNIQUE INDEX on (code, company_id)
# /!\ The unique constraint 'unique_name_company_id' is not sufficient, because SQL92
# only support field names in constraint definitions, and we need a function here:
# we need to special-case company_id to treat all NULL company_id as equal, otherwise
# we would allow duplicate (code, NULL) ir_sequences.
cr.execute("""
SELECT indexname FROM pg_indexes WHERE indexname =
'ir_sequence_unique_code_company_id_idx'""")
if not cr.fetchone():
cr.execute("""
CREATE UNIQUE INDEX ir_sequence_unique_code_company_id_idx
ON ir_sequence (code, (COALESCE(company_id,-1)))""")
def _create_sequence(self, cr, id, number_increment, number_next):
""" Create a PostreSQL sequence.
There is no access rights check.
"""
if number_increment == 0:
raise osv.except_osv(_('Warning!'),_("Increment number must not be zero."))
assert isinstance(id, (int, long))
sql = "CREATE SEQUENCE ir_sequence_%03d INCREMENT BY %%s START WITH %%s" % id
cr.execute(sql, (number_increment, number_next))
def _drop_sequence(self, cr, ids):
""" Drop the PostreSQL sequence if it exists.
There is no access rights check.
"""
ids = ids if isinstance(ids, (list, tuple)) else [ids]
assert all(isinstance(i, (int, long)) for i in ids), \
"Only ids in (int, long) allowed."
names = ','.join('ir_sequence_%03d' % i for i in ids)
# RESTRICT is the default; it prevents dropping the sequence if an
# object depends on it.
cr.execute("DROP SEQUENCE IF EXISTS %s RESTRICT " % names)
def _alter_sequence(self, cr, id, number_increment, number_next=None):
""" Alter a PostreSQL sequence.
There is no access rights check.
"""
if number_increment == 0:
raise osv.except_osv(_('Warning!'),_("Increment number must not be zero."))
assert isinstance(id, (int, long))
seq_name = 'ir_sequence_%03d' % (id,)
cr.execute("SELECT relname FROM pg_class WHERE relkind = %s AND relname=%s", ('S', seq_name))
if not cr.fetchone():
# sequence is not created yet, we're inside create() so ignore it, will be set later
return
statement = "ALTER SEQUENCE %s INCREMENT BY %d" % (seq_name, number_increment)
if number_next is not None:
statement += " RESTART WITH %d" % (number_next, )
cr.execute(statement)
def create(self, cr, uid, values, context=None):
""" Create a sequence, in implementation == standard a fast gaps-allowed PostgreSQL sequence is used.
"""
values = self._add_missing_default_values(cr, uid, values, |
yashdsaraf/scancode-toolkit | tests/extractcode/test_patch.py | Python | apache-2.0 | 72,541 | 0.007637 | #
# Copyright (c) 2017 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import codecs
import json
import os
from unittest.case import expectedFailure
from commoncode.testcase import FileBasedTesting
from commoncode.text import as_unicode
from extractcode import patch
class TestIsPatch(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def test_is_not_patch(self):
test_dir = self.get_test_loc('patch/not_patches', copy=True)
for r, _, files in os.walk(test_dir):
for f in files:
test_file = os.path.join(r, f)
assert not patch.is_patch(test_file)
def test_is_patch(self):
test_dir = self.get_test_loc('patch/patches', copy=True)
for r, _, files in os.walk(test_dir):
for f in files:
if not f.endswith('expected'):
test_file = os.path.join(r, f)
assert patch.is_patch(test_file)
def check_patch(test_file, expected_file, regen=False):
result = [list(pi) for pi in patch.patch_info(test_file)]
result = [[as_unicode(s), as_unicode(t), map(as_unicode, lines)]
for s, t, lines in result]
if regen:
with codecs.open(expected_file, 'wb', encoding='utf-8') as regened:
json.dump(result, regened, indent=2)
with codecs.open(expected_file, 'rb', encoding='utf-8') as expect:
expected = json.load(expect)
assert expected == result
class TestPatchInfoFailing(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
# FIXME: these tests need love and eventually a bug report upstream
@expectedFailure
def test_patch_info_patch_patches_misc_webkit_opensource_patches_sync_xhr_patch(self):
test_file = self.get_test_loc(u'patch/patches/misc/webkit/opensource/patches/sync_xhr.patch')
# fails with Exception Unable to parse patch file
list(patch.patch_info(test_file))
@expectedFailure
def test_patch_info_patch_patches_problematic_opensso_patch(self):
test_file = self.get_test_loc(u'patch/patches/problematic/OpenSSO.patch')
# fails with Exception Unable to parse patch file
list(patch.patch_info(test_file))
class TestPatchInfo(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def test_patch_info_patch_patches_dnsmasq_2_63_1_diff(self):
test_file = self.get_test_loc(u'patch/patches/dnsmasq_2.63-1.diff')
expected_file | = self.get_test_loc('patch/patches/dnsmasq_2.63-1.diff.expected')
check_patch(test_file, expected_file)
def test_patch_info_patch_patches_dropbear_2012_55_1_diff(self):
test_file = self.get_test_loc(u'patch/patches/d | ropbear_2012.55-1.diff')
expected_file = self.get_test_loc('patch/patches/dropbear_2012.55-1.diff.expected')
check_patch(test_file, expected_file)
def test_patch_info_patch_patches_electricfence_2_0_5_longjmp_patch(self):
test_file = self.get_test_loc(u'patch/patches/ElectricFence-2.0.5-longjmp.patch')
expected_file = self.get_test_loc('patch/patches/ElectricFence-2.0.5-longjmp.patch.expected')
check_patch(test_file, expected_file)
def test_patch_info_patch_patches_electricfence_2_1_vaarg_patch(self):
test_file = self.get_test_loc(u'patch/patches/ElectricFence-2.1-vaarg.patch')
expected_file = self.get_test_loc('patch/patches/ElectricFence-2.1-vaarg.patch.expected')
check_patch(test_file, expected_file)
def test_patch_info_patch_patches_electricfence_2_2_2_madvise_patch(self):
test_file = self.get_test_loc(u'patch/patches/ElectricFence-2.2.2-madvise.patch')
expected_file = self.get_test_loc('patch/patches/ElectricFence-2.2.2-madvise.patch.expected')
check_patch(test_file, expected_file)
def test_patch_info_patch_patches_electricfence_2_2_2_pthread_patch(self):
test_file = self.get_test_loc(u'patch/patches/ElectricFence-2.2.2-pthread.patch')
expected_file = self.get_test_loc('patch/patches/ElectricFence-2.2.2-pthread.patch.expected')
check_patch(test_file, expected_file)
def test_patch_info_patch_patches_libmediainfo_0_7_43_diff(self):
test_file = self.get_test_loc(u'patch/patches/libmediainfo-0.7.43.diff')
expected_file = self.get_test_loc('patch/patches/libmediainfo-0.7.43.diff.expected')
check_patch(test_file, expected_file)
def test_patch_info_patch_patches_misc_avahi_0_6_25_patches_configure_patch(self):
test_file = self.get_test_loc(u'patch/patches/misc/avahi-0.6.25/patches/configure.patch')
expected_file = self.get_test_loc('patch/patches/misc/avahi-0.6.25/patches/configure.patch.expected')
check_patch(test_file, expected_file)
def test_patch_info_patch_patches_misc_avahi_0_6_25_patches_main_c_patch(self):
test_file = self.get_test_loc(u'patch/patches/misc/avahi-0.6.25/patches/main.c.patch')
expected_file = self.get_test_loc('patch/patches/misc/avahi-0.6.25/patches/main.c.patch.expected')
check_patch(test_file, expected_file)
def test_patch_info_patch_patches_misc_busybox_patches_fix_subarch_patch(self):
test_file = self.get_test_loc(u'patch/patches/misc/busybox/patches/fix-subarch.patch')
expected_file = self.get_test_loc('patch/patches/misc/busybox/patches/fix-subarch.patch.expected')
check_patch(test_file, expected_file)
def test_patch_info_patch_patches_misc_busybox_patches_gtrick_patch(self):
test_file = self.get_test_loc(u'patch/patches/misc/busybox/patches/gtrick.patch')
expected_file = self.get_test_loc('patch/patches/misc/busybox/patches/gtrick.patch.expected')
check_patch(test_file, expected_file)
def test_patch_info_patch_patches_misc_busybox_patches_workaround_old_uclibc_patch(self):
test_file = self.get_test_loc(u'patch/patches/misc/busybox/patches/workaround_old_uclibc.patch')
expected_file = self.get_test_loc('patch/patches/misc/busybox/patches/workaround_old_uclibc.patch.expected')
check_patch(test_file, expected_file)
def test_patch_info_patch_patches_misc_curl_patches_ekioh_cookie_fix_patch(self):
test_file = self.get_test_loc(u'patch/patches/misc/curl/patches/ekioh_cookie_fix.patch')
expected_file = self.get_test_loc('patch/patches/misc/curl/patches/ekioh_cookie_fix.patch.expected')
check_patch(test_file, expected_file)
def test_patch_info_patch_patches_misc_e2fsprogs_1_37_uuidlibs_blkidlibs_only_target_makefile_in_patch(self):
test_file = self.get_test_loc(u'patch/patches/misc/e2fsprogs-1.37/uuidlibs_blkidlibs_only_target_Makefile.in.patch')
expected_fi |
lo-windigo/fragdev | fragdev/settings.py | Python | agpl-3.0 | 5,073 | 0.000986 | # This file is part of the FragDev Website.
#
# the FragDev Website is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# the FragDev Website is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with the FragDev Website. If not, see <http://www.gnu.org/licenses/>.
import os, stat
# Default DEBUG value to false - overridden in local_settings.py
DEBUG = False
# Hosts/domain names that are valid for this site; required if DEBUG is False
ALLOWED_HOSTS = ['127.0.0.1']
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'UTC'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
##################
# Local settings #
##################
# Import the local settings file (borrowed from Mezzanine)
PROJECT_APP_PATH = os.path.dirname(os.path.abspath(__file__))
f = os.path.join(PROJECT_APP_PATH, "local_settings.py")
if os.path.exists(f):
import sys
import imp
module_name = "local_settings"
module = imp.new_module(module_name)
module.__file__ = f
sys.modules[module_name] = module
exec(open(f, "rb").read())
# URL prefix for static files.
STATIC_URL = '/static/'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
MEDIA_URL = '/media/'
# Default filesystem permissions for uploaded files
FILE_UPLOAD_PERMISSIONS = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP
# List of callables that know how to import templates from various sources.
MIDDLEWARE = (
'django.middleware.common.CommonMiddleware',
'django.contrib. | sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'dj | ango.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'fragdev.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'fragdev.wsgi.application'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
WEBROOT + '/fragdev/fragdev/templates'
],
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
'debug': DEBUG,
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
},
},
]
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
#'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'fragdev',
'images',
'projects',
'wiblog',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
stonebig/bokeh | scripts/issues.py | Python | bsd-3-clause | 12,422 | 0.002898 | #!/usr/bin/env python
from __future__ import print_function
import argparse
import datetime
import dateutil.parser
import dateutil.tz
import gzip
import json
import logging
import os
import pickle
import sys
from collections import OrderedDict
from functools import partial
from itertools import count, groupby
from six.moves.urllib.request import urlopen, Request
logging.basicConfig(level=logging.INFO)
API_PARAMS = {
'base_url': 'https://api.github.com/repos',
'owner': 'bokeh',
'repo': 'bokeh',
}
IGNORE_ISSUE_TYPE = {
'type: discussion',
'type: tracker',
}
LOG_SECTION = OrderedDict([ # issue type label -> log section heading
('type: bug', 'bugfixes'),
('type: feature', 'features'),
('type: task', 'tasks'),
])
ISSUES_SORT_KEY = lambda issue: (issue_section_order(issue), int(issue['number']))
ISSUES_BY_SECTION = lambda issue: issue_section(issue)
#######################################
# Object Storage
#######################################
def save_object(filename, obj):
"""Compresses and pickles given object to the given filename."""
logging.info('saving {}...'.format(filename))
try:
with gzip.GzipFile(filename, 'wb') as f:
f.write(pickle.dumps(obj, 1))
except Exception as e:
logging.error('save failure: {}'.format(e))
raise
def load_object(filename):
"""Unpickles and decompresses the given filename and returns the created object."""
logging.info('loading {}...'.format(filename))
try:
with gzip.GzipFile(filename, 'rb') as f:
buf = ''
while True:
data = f.read()
if data == '':
break
buf += data
return pickle.loads(buf)
except Exception as e:
logging.error('load failure: {}'.format(e))
raise
#######################################
# Issues
#######################################
def issue_section_order(issue):
"""Returns the section order for the given issue."""
try:
return LOG_SECTION.values().index(issue_section(issue))
except:
return -1
def issue_completed(issue):
"""Returns True iff this issue is has been resolved as completed."""
labels = issue.get('labels', [])
return any(label['name'] == 'reso: completed' for label in labels)
def issue_section(issue):
"""Returns the se | ction heading for the issue, or None if this issue should be ignored."""
labels = issue.get('labels', [])
for label in labels:
if not label['name'].startswith('type: '):
continue
if label['name'] in LOG_SECTION:
return LOG_SECTION[label['name']]
elif label['name'] in IGNORE_ISSUE_TYPE:
return None
else:
| logging.warning('unknown issue type: "{}" for: {}'.format(label['name'], issue_line(issue)))
return None
def issue_tags(issue):
"""Returns list of tags for this issue."""
labels = issue.get('labels', [])
return [label['name'].replace('tag: ', '') for label in labels if label['name'].startswith('tag: ')]
def closed_issue(issue, after=None):
"""Returns True iff this issue was closed after given date. If after not given, only checks if issue is closed."""
if issue['state'] == 'closed':
if after is None or parse_timestamp(issue['closed_at']) > after:
return True
return False
def relevent_issue(issue, after):
"""Returns True iff this issue is something we should show in the changelog."""
return (closed_issue(issue, after) and
issue_completed(issue) and
issue_section(issue))
def relevant_issues(issues, after):
"""Yields relevant closed issues (closed after a given datetime) given a list of issues."""
logging.info('finding relevant issues after {}...'.format(after))
seen = set()
for issue in issues:
if relevent_issue(issue, after) and issue['title'] not in seen:
seen.add(issue['title'])
yield issue
def closed_issues(issues, after):
"""Yields closed issues (closed after a given datetime) given a list of issues."""
logging.info('finding closed issues after {}...'.format(after))
seen = set()
for issue in issues:
if closed_issue(issue, after) and issue['title'] not in seen:
seen.add(issue['title'])
yield issue
def all_issues(issues):
"""Yields unique set of issues given a list of issues."""
logging.info('finding issues...')
seen = set()
for issue in issues:
if issue['title'] not in seen:
seen.add(issue['title'])
yield issue
#######################################
# GitHub API
#######################################
def get_labels_url():
"""Returns github API URL for querying labels."""
return '{base_url}/{owner}/{repo}/labels'.format(**API_PARAMS)
def get_issues_url(page, after):
"""Returns github API URL for querying tags."""
template = '{base_url}/{owner}/{repo}/issues?state=closed&per_page=100&page={page}&since={after}'
return template.format(page=page, after=after.isoformat(), **API_PARAMS)
def get_tags_url():
"""Returns github API URL for querying tags."""
return '{base_url}/{owner}/{repo}/tags'.format(**API_PARAMS)
def parse_timestamp(timestamp):
"""Parse ISO8601 timestamps given by github API."""
dt = dateutil.parser.parse(timestamp)
return dt.astimezone(dateutil.tz.tzutc())
def read_url(url):
"""Reads given URL as JSON and returns data as loaded python object."""
logging.debug('reading {url} ...'.format(url=url))
token = os.environ.get("BOKEH_GITHUB_API_TOKEN")
headers = {}
if token:
headers['Authorization'] = 'token %s' % token
request = Request(url, headers=headers)
response = urlopen(request).read()
return json.loads(response.decode("UTF-8"))
def query_tags():
"""Hits the github API for repository tags and returns the data."""
return read_url(get_tags_url())
def query_issues(page, after):
"""Hits the github API for a single page of closed issues and returns the data."""
return read_url(get_issues_url(page, after))
def query_all_issues(after):
"""Hits the github API for all closed issues after the given date, returns the data."""
page = count(1)
data = []
while True:
page_data = query_issues(next(page), after)
if not page_data:
break
data.extend(page_data)
return data
def dateof(tag_name, tags):
"""Given a list of tags, returns the datetime of the tag with the given name; Otherwise None."""
for tag in tags:
if tag['name'] == tag_name:
commit = read_url(tag['commit']['url'])
return parse_timestamp(commit['commit']['committer']['date'])
return None
def get_data(query_func, load_data=False, save_data=False):
"""Gets data from query_func, optionally saving that data to a file; or loads data from a file."""
if hasattr(query_func, '__name__'):
func_name = query_func.__name__
elif hasattr(query_func, 'func'):
func_name = query_func.func.__name__
pickle_file = '{}.pickle'.format(func_name)
if load_data:
data = load_object(pickle_file)
else:
data = query_func()
if save_data:
save_object(pickle_file, data)
return data
#######################################
# Validation
#######################################
def check_issue(issue, after):
have_warnings = False
labels = issue.get('labels', [])
if 'pull_request' in issue:
if not any(label['name'].startswith('status: ') for label in labels):
logging.warning('pull request without status label: {}'.format(issue_line(issue)))
have_warnings = True
else:
if not any(label['name'].startswith('type: ') for label in labels):
if not any(label['name']=="reso: duplicate" for label in labels):
logging.warning('issue with no type label: {}'.format(issue_line((issue))))
have_warnings = True
if closed_issue(issue, after):
|
yarikoptic/NiPy-OLD | examples/neurospin/need_data/block_matching.py | Python | bsd-3-clause | 1,238 | 0.016963 | #!/usr/bin/env python
from nipy.neurospin.register.iconic_matcher import IconicMatcher
from nipy.io.imageformats import load as load_image
from os.path import join
import numpy as np
"""
Example of running affine matching on the 'sulcal2000' database
"""
##rootpath = 'D:\\data\\sulcal2000'
rootpath = '/neurospin/lnao/Panabase/roche/sulcal2000'
print('Scanning data directory...')
# Get data
print('Fetching image data...')
I = load_image(join(rootpath,'nobias_anubis'+'.nii'))
J = load_image(join(rootpath,'ammon_TO_anubis'+'.nii'))
# Setup registration algorithm
matcher = IconicMatcher(I.get_data(), J.get_data(),
I.g | et_affine(), J.get_affine()) ## I: source, J: target
# Params
size = 5
nsimu = 1
depth = 10
import pylab as pl
# Simulations
for i in range(nsimu):
# Select random block
x0 = np.random.randint(I.array.shape[0]-size)
y0 = np.random.randint(I.array.shape[1]-size)
z0 = np.random.randint(I.array.shape[2]-size)
matcher.set(corner=[x0,y0,z0], size=[size,size,size])
# Explore neighborhood
tx = I.voxsize[0] * (np.aran | ge(2*depth + 1)-depth)
s, p = matcher.explore(tx=tx)
# Display
pl.plot(p[:,0],(1-s)**(-.5*size**3))
pl.show()
|
bnaul/scikit-learn | sklearn/ensemble/tests/test_stacking.py | Python | bsd-3-clause | 19,101 | 0 | """Test the stacking classifier and regressor."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# License: BSD 3 clause
import pytest
import numpy as np
import scipy.sparse as sparse
from sklearn.base import BaseEstimator
from sklearn.base import ClassifierMixin
from sklearn.base import RegressorMixin
from sklearn.base import clone
from sklearn.exceptions import ConvergenceWarning
from sklearn.datasets import load_iris
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_breast_cancer
from sklearn.datasets import make_regression
from sklearn.datasets import make_classification
from sklearn.dummy import DummyClassifier
from sklearn.dummy import DummyRegressor
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.svm import LinearSVC
from sklearn.svm import LinearSVR
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.preprocessing import scale
from sklearn.ensemble import StackingClassifier
from sklearn.ensemble import StackingRegressor
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import KFold
from sklearn.utils._mocking import CheckingClassifier
from sklearn.utils._testing import assert_allclose
from sklearn.utils._testing import assert_allclose_dense_sparse
from sklearn.utils._testing import ignore_warnings
from sklearn.utils.estimator_checks import check_estimator
from sklearn.utils.estimator_checks import check_no_attributes_set_in_init
X_diabetes, y_diabetes = load_diabetes(return_X_y=True)
X_iris, y_iris = load_iris(return_X_y=True)
@pytest.mark.parametrize(
"cv", [3, StratifiedKFold(n_splits=3, shuffle=True, random_state=42)]
)
@pytest.ma | rk.parametrize(
"final_estimator", [None, RandomForestClassifier(random_state=42)]
)
@pytest.mark.parametrize("passthrough", [False, True])
def test_stacking_classifier_iris(cv, final_estimator, passthrough):
# prescale the data to avoid convergence warning without using a pipeline
# for later assert
X_train, X_test, y_train, y_test = train_test_split(
scale(X_iris), y_iris, stratify=y_iris, random_state=42
)
estimators = [('lr', LogisticReg | ression()), ('svc', LinearSVC())]
clf = StackingClassifier(
estimators=estimators, final_estimator=final_estimator, cv=cv,
passthrough=passthrough
)
clf.fit(X_train, y_train)
clf.predict(X_test)
clf.predict_proba(X_test)
assert clf.score(X_test, y_test) > 0.8
X_trans = clf.transform(X_test)
expected_column_count = 10 if passthrough else 6
assert X_trans.shape[1] == expected_column_count
if passthrough:
assert_allclose(X_test, X_trans[:, -4:])
clf.set_params(lr='drop')
clf.fit(X_train, y_train)
clf.predict(X_test)
clf.predict_proba(X_test)
if final_estimator is None:
# LogisticRegression has decision_function method
clf.decision_function(X_test)
X_trans = clf.transform(X_test)
expected_column_count_drop = 7 if passthrough else 3
assert X_trans.shape[1] == expected_column_count_drop
if passthrough:
assert_allclose(X_test, X_trans[:, -4:])
def test_stacking_classifier_drop_column_binary_classification():
# check that a column is dropped in binary classification
X, y = load_breast_cancer(return_X_y=True)
X_train, X_test, y_train, _ = train_test_split(
scale(X), y, stratify=y, random_state=42
)
# both classifiers implement 'predict_proba' and will both drop one column
estimators = [('lr', LogisticRegression()),
('rf', RandomForestClassifier(random_state=42))]
clf = StackingClassifier(estimators=estimators, cv=3)
clf.fit(X_train, y_train)
X_trans = clf.transform(X_test)
assert X_trans.shape[1] == 2
# LinearSVC does not implement 'predict_proba' and will not drop one column
estimators = [('lr', LogisticRegression()), ('svc', LinearSVC())]
clf.set_params(estimators=estimators)
clf.fit(X_train, y_train)
X_trans = clf.transform(X_test)
assert X_trans.shape[1] == 2
def test_stacking_classifier_drop_estimator():
# prescale the data to avoid convergence warning without using a pipeline
# for later assert
X_train, X_test, y_train, _ = train_test_split(
scale(X_iris), y_iris, stratify=y_iris, random_state=42
)
estimators = [('lr', 'drop'), ('svc', LinearSVC(random_state=0))]
rf = RandomForestClassifier(n_estimators=10, random_state=42)
clf = StackingClassifier(
estimators=[('svc', LinearSVC(random_state=0))],
final_estimator=rf, cv=5
)
clf_drop = StackingClassifier(
estimators=estimators, final_estimator=rf, cv=5
)
clf.fit(X_train, y_train)
clf_drop.fit(X_train, y_train)
assert_allclose(clf.predict(X_test), clf_drop.predict(X_test))
assert_allclose(clf.predict_proba(X_test), clf_drop.predict_proba(X_test))
assert_allclose(clf.transform(X_test), clf_drop.transform(X_test))
def test_stacking_regressor_drop_estimator():
# prescale the data to avoid convergence warning without using a pipeline
# for later assert
X_train, X_test, y_train, _ = train_test_split(
scale(X_diabetes), y_diabetes, random_state=42
)
estimators = [('lr', 'drop'), ('svr', LinearSVR(random_state=0))]
rf = RandomForestRegressor(n_estimators=10, random_state=42)
reg = StackingRegressor(
estimators=[('svr', LinearSVR(random_state=0))],
final_estimator=rf, cv=5
)
reg_drop = StackingRegressor(
estimators=estimators, final_estimator=rf, cv=5
)
reg.fit(X_train, y_train)
reg_drop.fit(X_train, y_train)
assert_allclose(reg.predict(X_test), reg_drop.predict(X_test))
assert_allclose(reg.transform(X_test), reg_drop.transform(X_test))
@pytest.mark.parametrize(
"cv", [3, KFold(n_splits=3, shuffle=True, random_state=42)]
)
@pytest.mark.parametrize(
"final_estimator, predict_params",
[(None, {}),
(RandomForestRegressor(random_state=42), {}),
(DummyRegressor(), {'return_std': True})]
)
@pytest.mark.parametrize("passthrough", [False, True])
def test_stacking_regressor_diabetes(cv, final_estimator, predict_params,
passthrough):
# prescale the data to avoid convergence warning without using a pipeline
# for later assert
X_train, X_test, y_train, _ = train_test_split(
scale(X_diabetes), y_diabetes, random_state=42
)
estimators = [('lr', LinearRegression()), ('svr', LinearSVR())]
reg = StackingRegressor(
estimators=estimators, final_estimator=final_estimator, cv=cv,
passthrough=passthrough
)
reg.fit(X_train, y_train)
result = reg.predict(X_test, **predict_params)
expected_result_length = 2 if predict_params else 1
if predict_params:
assert len(result) == expected_result_length
X_trans = reg.transform(X_test)
expected_column_count = 12 if passthrough else 2
assert X_trans.shape[1] == expected_column_count
if passthrough:
assert_allclose(X_test, X_trans[:, -10:])
reg.set_params(lr='drop')
reg.fit(X_train, y_train)
reg.predict(X_test)
X_trans = reg.transform(X_test)
expected_column_count_drop = 11 if passthrough else 1
assert X_trans.shape[1] == expected_column_count_drop
if passthrough:
assert_allclose(X_test, X_trans[:, -10:])
@pytest.mark.parametrize('fmt', ['csc', 'csr', 'coo'])
def test_stacking_regressor_sparse_passthrough(fmt):
# Check passthrough behavior on a sparse X matrix
X_train, X_test, y_train, _ = train_test_split(
sparse.coo_matrix(scale(X_diabetes)).asformat(fmt),
y_diabetes, random_state=42
)
estimators = [('lr', LinearRegression()), ('svr', LinearSVR())]
rf = RandomForestRegressor(n_estimators=10, random_state=42)
clf = StackingRegressor(
estimat |
vzhong/sent2rel | tests/test_adaptors.py | Python | mit | 10,128 | 0.005924 | __author__ = 'victor'
import unittest
from data.adaptors import *
import csv
import os
mydir = os.path.dirname(os.path.abspath(__file__))
class TestAdaptor(object):
def test_words(self):
ex = self.adaptor.to_example(self.raw)
self.assertEqual(ex.words, [w.lower() for w in self.words])
def test_lemmas(self):
ex = self.adaptor.to_example(self.raw)
self.assertEqual(ex.lemmas, [w.lower() for w in self.lemmas])
def test_ner(self):
ex = self.adaptor.to_example(self.raw)
self.assertEqual(ex.ner, self.ner)
def test_pos(self):
ex = self.adaptor.to_example(self.raw)
self.assertEqual(ex.pos, self.pos)
def test_subject(self):
ex = self.adaptor.to_example(self.raw)
self.assertEqual(ex.subject, self.subject.lower())
self.assertEqual(ex.subject_ner, self.subject_ner)
self.assertEqual(ex.subject_begin, self.subject_begin)
self.assertEqual(ex.subject_end, self.subject_end)
def test_object(self):
ex = self.adaptor.to_example(self.raw)
self.assertEqual(ex.object, self.object.lower())
self.assertEqual(ex.object_ner, self.object_ner)
self.assertEqual(ex.object_begin, self.object_begin)
self.assertEqual(ex.object_end, self.object_end)
def test_relation(self):
ex = self.adaptor.to_example(self.raw)
self.assertEqual(ex.relation, self.relation)
def test_read_file(self):
for ex in self.adaptor.to_examples(self.file):
pass
class TestSupervised(unittest.TestCase, TestAdaptor):
def setUp(self):
self.file = os.path.join(mydir, '..', 'data', 'raw', 'supervision.csv')
with open(self.file) as f:
reader = csv.reader(f)
self.raw = reader.next()
self.adaptor = SupervisedDataAdaptor()
self.words = [
"Alexandra", | "of", "Denmark", "-LRB-", "0000", "-", "0000", "-RRB-", "was", "Queen",
"Consort", "to", "Edward", "VII", "of", "the", "United", "Kingdom", "and", "thus",
"Empress", "of", " | India", "during", "her", "husband", "\'s", "reign", "."
]
self.lemmas = [
"Alexandra", "of", "Denmark", "-lrb-", "0000", "-", "0000", "-rrb-", "be", "Queen",
"Consort", "to", "Edward", "VII", "of", "the", "United", "Kingdom", "and", "thus",
"empress", "of", "India", "during", "she", "husband", "'s", "reign", "."
]
self.ner = [
"PERSON", "PERSON", "PERSON", "O", "DATE", "DURATION", "DATE", "O", "O", "LOCATION",
"LOCATION", "O", "PERSON", "PERSON", "O", "O", "LOCATION", "LOCATION", "O", "O", "O",
"O", "LOCATION", "O", "O", "O", "O", "O", "O"
]
self.pos = [
"NNP", "IN", "NNP", "-LRB-", "CD", ":", "CD", "-RRB-", "VBD", "NNP", "NNP", "TO", "NNP",
"NNP", "IN", "DT", "NNP", "NNP", "CC", "RB", "NN", "IN", "NNP", "IN", "PRP$", "NN",
"POS", "NN", ".",
]
self.subject_begin = 0
self.subject_end = 3
self.subject = 'Alexandra of Denmark'
self.subject_ner = 'PERSON'
self.object_begin = 12
self.object_end = 13
self.object = 'Edward'
self.object_ner = 'PERSON'
self.relation = 'per:spouse'
class TestKBPTest(unittest.TestCase, TestAdaptor):
def setUp(self):
self.file = os.path.join(mydir, '..', 'data', 'raw', 'test.sample.tsv')
with open(self.file) as f:
reader = csv.reader(f, delimiter="\t")
self.raw = reader.next()
self.adaptor = KBPDataAdaptor()
self.words = [
'This', 'recipe', 'from', 'Sean', 'Baker', 'of', 'Gather', 'in', 'Berkeley', 'is', 'a',
'vegan', 'interpretation', 'of', 'a', 'rustic', 'seafood', 'salad', 'that', 'typically',
'includes', 'mussels', ',', 'squid', 'and', 'other', 'shellfish', '.'
]
self.lemmas = ['this', 'recipe', 'from', 'Sean', 'Baker', 'of', 'Gather', 'in',
'Berkeley', 'be', 'a', 'vegan', 'interpretation', 'of', 'a', 'rustic',
'seafood', 'salad', 'that', 'typically', 'include', 'mussel', ',',
'squid', 'and', 'other', 'shellfish', '.']
self.ner = [
'O', 'O', 'O', 'PERSON', 'PERSON', 'O', 'O', 'O', 'CITY', 'O', 'O', 'O', 'O', 'O', 'O',
'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'CAUSE_OF_DEATH', 'O'
]
self.pos = [
'DT', 'NN', 'IN', 'NNP', 'NNP', 'IN', 'NNP', 'IN', 'NNP', 'VBZ', 'DT', 'JJ', 'NN',
'IN', 'DT', 'JJ', 'NN', 'NN', 'WDT', 'RB', 'VBZ', 'NNS', ',', 'NN', 'CC', 'JJ',
'NN', '.'
]
self.subject_begin = 3
self.subject_end = 5
self.subject = 'Sean Baker'
self.subject_ner = 'PERSON'
self.object_begin = 8
self.object_end = 9
self.object = 'Berkeley'
self.object_ner = 'CITY'
self.relation = None
class TestKBPEvaluationTest(unittest.TestCase, TestAdaptor):
def setUp(self):
self.file = os.path.join(mydir, '..', 'data', 'raw', 'evaluation.tsv')
with open(self.file) as f:
reader = csv.reader(f, delimiter="\t")
self.raw = reader.next()
self.adaptor = KBPEvaluationDataAdaptor()
self.words = [
'She', 'waited', 'for', 'him', 'to', 'phone', 'her', 'that', 'night', 'so', 'they',
'could', 'continue', 'their', 'discussion', ',', 'but', 'Pekar', 'never', 'called',
';', 'he', 'was', 'found', 'dead', 'early', 'the', 'next', 'morning', 'by', 'his',
'wife', ',', 'Joyce', 'Brabner', '.']
self.lemmas = [
'she', 'wait', 'for', 'he', 'to', 'phone', 'she', 'that', 'night', 'so', 'they',
'could', 'continue', 'they', 'discussion', ',', 'but', 'Pekar', 'never', 'call', ';',
'he', 'be', 'find', 'dead', 'early', 'the', 'next', 'morning', 'by', 'he', 'wife',
',', 'Joyce', 'Brabner', '.']
self.ner = [
'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'TIME', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O',
'PERSON', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'DATE', 'DATE', 'DATE', 'DATE', 'O', 'O',
'O', 'O', 'PERSON', 'PERSON', 'O'
]
self.pos = [
'PRP', 'VBD', 'IN', 'PRP', 'TO', 'VB', 'PRP$', 'DT', 'NN', 'IN', 'PRP', 'MD', 'VB',
'PRP$', 'NN', ',', 'CC', 'NNP', 'RB', 'VBD', ':', 'PRP', 'VBD', 'VBN', 'JJ', 'RB',
'DT', 'JJ', 'NN', 'IN', 'PRP$', 'NN', ",", 'NNP', 'NNP', '.'
]
self.subject_begin = 17
self.subject_end = 18
self.subject = 'Pekar'
self.subject_ner = 'PERSON'
self.object_begin = 33
self.object_end = 35
self.object = 'Joyce Brabner'
self.object_ner = 'PERSON'
self.relation = 'per:spouse'
class TestSelfTrainingAdaptor(unittest.TestCase, TestAdaptor):
def setUp(self):
self.file = os.path.join(mydir, '..', 'data', 'raw', 'self_training.tsv')
with open(self.file) as f:
reader = csv.reader(f, delimiter="\t")
self.raw = reader.next()
self.adaptor = SelfTrainingAdaptor()
self.words = ['-LSB-', '00', '-RSB-', 'Y.F.', 'Sasaki', ',', 'K.', 'Fujikawa', ',', 'K.',
'Ishida', ',', 'N.', 'Kawamura', ',', 'Y.', 'Nishikawa', ',', 'S.', 'Ohta',
',', 'M.', 'Satoh', ',', 'H.', 'Madarame', ',', 'S.', 'Ueno', ',', 'N.',
'Susa', ',', 'N.', 'Matsusaka', ',', 'S.', 'Tsuda', ',', 'The', 'alkaline',
'single-cell', 'gel', 'electrophoresis', 'assay', 'with', 'mouse',
'multiple', 'organs', ':', 'results', 'with', '00', 'aromatic', 'amines',
'evaluated', 'by', 'the', 'IARC', 'and', 'US', 'NTP', ',', 'Mutat', '.']
self.lemmas = ['-lsb-', '00', '-rsb-', 'Y.F.', 'Sasaki', ',', 'K.', 'Fujikawa', ',', 'K.',
'Ishida', ',', 'N.', 'Kawamura', ',', 'Y.', 'Nishi |
denny820909/builder | lib/python2.7/site-packages/buildbot-0.8.8-py2.7.egg/buildbot/monkeypatches/sqlalchemy2189.py | Python | mit | 4,442 | 0.005853 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import re
from buildbot.util import sautils
from sqlalchemy.engine import reflection
from sqlalchemy.dialects.sqlite.base import SQLiteDialect, _pragma_cursor
from sqlalchemy.dialects.sqlite.base import sqltypes, util
@reflection.cache
def get_columns_06x_fixed(self, connection, table_name, schema=None, **kw):
quote = self.identifier_preparer.quote_identifier
if schema is not None:
pragma = "PRAGMA %s." % quote(schema)
else:
pragma = "PRAGMA "
qtable = quote(table_name)
c = _pragma_cursor(connection.execute("%stable_info(%s)" % (pragma, qtable)))
#### found_table = False (pyflake)
columns = []
while True:
row = c.fetchone()
if row is None:
break
(name, type_, nullable, default, has_default, primary_key) = (row[1], row[2].upper(), not row[3], row[4], row[4] is not None, row[5])
name = re.sub(r'^\"|\"$', '', name)
#### if default:
#### default = re.sub(r"^\'|\'$", '', default)
match = re.match(r'(\w+)(\(.*?\))?', type_)
if match:
coltype = match.group(1)
args = match.group(2)
else:
coltype = "VARCHAR"
args = ''
try:
coltype = self.ischema_names[coltype]
except KeyError:
util.warn("Did not recognize type '%s' of column '%s'" %
(coltype, name))
coltype = sqltypes.NullType
| if args is not None:
args = re.findall(r'(\d+)', args)
| coltype = coltype(*[int(a) for a in args])
columns.append({
'name' : name,
'type' : coltype,
'nullable' : nullable,
'default' : default,
'primary_key': primary_key
})
return columns
@reflection.cache
def get_columns_07x_fixed(self, connection, table_name, schema=None, **kw):
quote = self.identifier_preparer.quote_identifier
if schema is not None:
pragma = "PRAGMA %s." % quote(schema)
else:
pragma = "PRAGMA "
qtable = quote(table_name)
c = _pragma_cursor(connection.execute("%stable_info(%s)" % (pragma, qtable)))
#### found_table = False (pyflake)
columns = []
while True:
row = c.fetchone()
if row is None:
break
(name, type_, nullable, default, has_default, primary_key) = (row[1], row[2].upper(), not row[3], row[4], row[4] is not None, row[5])
name = re.sub(r'^\"|\"$', '', name)
#### if default:
#### default = re.sub(r"^\'|\'$", '', default)
match = re.match(r'(\w+)(\(.*?\))?', type_)
if match:
coltype = match.group(1)
args = match.group(2)
else:
coltype = "VARCHAR"
args = ''
try:
coltype = self.ischema_names[coltype]
if args is not None:
args = re.findall(r'(\d+)', args)
coltype = coltype(*[int(a) for a in args])
except KeyError:
util.warn("Did not recognize type '%s' of column '%s'" %
(coltype, name))
coltype = sqltypes.NullType()
columns.append({
'name' : name,
'type' : coltype,
'nullable' : nullable,
'default' : default,
'autoincrement':default is None,
'primary_key': primary_key
})
return columns
def patch():
# fix for http://www.sqlalchemy.org/trac/ticket/2189, backported to 0.6.0
if sautils.sa_version()[:2] == (0, 6):
get_columns_fixed = get_columns_06x_fixed
else:
get_columns_fixed = get_columns_07x_fixed
SQLiteDialect.get_columns = get_columns_fixed
|
n4hy/gnuradio | gnuradio-core/src/python/gnuradio/blks2impl/fm_demod.py | Python | gpl-3.0 | 4,236 | 0.017469 | #
# Copyright 2006,2007 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, optfir
from gnuradio.blks2impl.fm_emph import fm_deemp | h
from math import pi
class fm_demod_cf(gr.hier_block2):
"""
Generalized FM demodulation block with deemphasis and audio
filtering.
This block demodulates a band-limited, complex down-converted FM
channel into the the original baseband signal, optionally applying
deemphasis. Low pass filtering is done on the resultant signal. It
pr | oduces an output float strem in the range of [-1.0, +1.0].
@param channel_rate: incoming sample rate of the FM baseband
@type sample_rate: integer
@param deviation: maximum FM deviation (default = 5000)
@type deviation: float
@param audio_decim: input to output decimation rate
@type audio_decim: integer
@param audio_pass: audio low pass filter passband frequency
@type audio_pass: float
@param audio_stop: audio low pass filter stop frequency
@type audio_stop: float
@param gain: gain applied to audio output (default = 1.0)
@type gain: float
@param tau: deemphasis time constant (default = 75e-6), specify 'None'
to prevent deemphasis
"""
def __init__(self, channel_rate, audio_decim, deviation,
audio_pass, audio_stop, gain=1.0, tau=75e-6):
gr.hier_block2.__init__(self, "fm_demod_cf",
gr.io_signature(1, 1, gr.sizeof_gr_complex), # Input signature
gr.io_signature(1, 1, gr.sizeof_float)) # Output signature
k = channel_rate/(2*pi*deviation)
QUAD = gr.quadrature_demod_cf(k)
audio_taps = optfir.low_pass(gain, # Filter gain
channel_rate, # Sample rate
audio_pass, # Audio passband
audio_stop, # Audio stopband
0.1, # Passband ripple
60) # Stopband attenuation
LPF = gr.fir_filter_fff(audio_decim, audio_taps)
if tau is not None:
DEEMPH = fm_deemph(channel_rate, tau)
self.connect(self, QUAD, DEEMPH, LPF, self)
else:
self.connect(self, QUAD, LPF, self)
class demod_20k0f3e_cf(fm_demod_cf):
"""
NBFM demodulation block, 20 KHz channels
This block demodulates a complex, downconverted, narrowband FM
channel conforming to 20K0F3E emission standards, outputting
floats in the range [-1.0, +1.0].
@param sample_rate: incoming sample rate of the FM baseband
@type sample_rate: integer
@param audio_decim: input to output decimation rate
@type audio_decim: integer
"""
def __init__(self, channel_rate, audio_decim):
fm_demod_cf.__init__(self, channel_rate, audio_decim,
5000, # Deviation
3000, # Audio passband frequency
4500) # Audio stopband frequency
class demod_200kf3e_cf(fm_demod_cf):
"""
WFM demodulation block, mono.
This block demodulates a complex, downconverted, wideband FM
channel conforming to 200KF3E emission standards, outputting
floats in the range [-1.0, +1.0].
@param sample_rate: incoming sample rate of the FM baseband
@type sample_rate: integer
@param audio_decim: input to output decimation rate
@type audio_decim: integer
"""
def __init__(self, channel_rate, audio_decim):
fm_demod_cf.__init__(self, channel_rate, audio_decim,
75000, # Deviation
15000, # Audio passband
16000, # Audio stopband
20.0) # Audio gain
|
oliverlee/pydy | pydy/viz/camera.py | Python | bsd-3-clause | 9,435 | 0.000212 | #!/usr/bin/env python
# standard lib
import warnings
# local
from ..utils import PyDyUserWarning
from .shapes import Shape
from .visualization_frame import VisualizationFrame
__all__ = ['PerspectiveCamera', 'OrthoGraphicCamera']
warnings.simplefilter('once', PyDyUserWarning)
class PerspectiveCamera(VisualizationFrame):
"""Creates a perspective camera for use in a scene. The camera is inherited
from ``VisualizationFrame``, and thus behaves similarly. It can be attached
to dynamics objects, hence we can get a moving camera. All the
transformation matrix generation methods are applicable to a
``PerspectiveCamera``.
"""
def __init__(self, *args, **kwargs):
"""Initialises a PerspectiveCamera object. To initialize a
PerspectiveCamera, one needs to supply a name (optional), a reference
frame, a point, field of view (fov) (optional), near plane distance
(optional) and far plane distance (optional).
Like ``VisualizationFrame``, it can also be initialized using one of
these three argument sequences:
Rigidbody
``PerspectiveCamera(rigid_body)``
ReferenceFrame, Point
``PerspectiveCamera(ref_frame, point)``
ReferenceFrame, Particle
``PerspectiveCamera(ref_frame, particle)``
Note that you can also supply and optional name as the first positional
argument, e.g.::
``PerspectiveCamera('camera_name', rigid_body)``
Additional optional keyword arguments are below:
Parameters
==========
fov : float, default=45.0
Field Of View, It determines the angle between the top and bottom
of the viewable area (in degrees).
near : float
The distance of near plane of the PerspectiveCamera. All objects
closer to this distance are not displayed.
far : int or float
The distance of far plane of the PerspectiveCamera. All objects
farther than this distance are not displayed.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.mechanics import (ReferenceFrame, Point,
... RigidBody, Particle,
... inertia)
>>> from pydy.viz import PerspectiveCamera
>>> I = ReferenceFrame('I')
>>> O = Point('O')
>>> # initializing with reference frame, point
>>> camera1 = PerspectiveCamera('frame1', I, O)
>>> # Initializing with a RigidBody
>>> Ixx, Iyy, Izz, mass = symbols('Ixx Iyy Izz mass')
>>> i = inertia(I, Ixx, Iyy, Izz)
>>> rbody = RigidBody('rbody', O, I, mass, (inertia, O))
>>> camera2 = PerspectiveCamera('frame2', rbody)
>>> # initializing with Particle, reference_frame
>>> Pa = Particle('Pa', O, mass)
>>> camera3 = PerspectiveCamera('frame3', I, Pa)
"""
# NOTE: This allows us to use inhertiance even though cameras don't
# need a shape. In the future, this could be a camera shape that could
# be made visible in the scene (only important for multiple cameras).
args = list(args) + [Shape()]
super(PerspectiveCamera, self).__init__(*args)
self.fov = 45.0
self.near = 1.0
self.far = 1000.0
for k, v in kwargs.items():
setattr(self, k, v)
def __str__(self):
return 'PerspectiveCamera: ' + self.name
def __repr__(self):
return 'PerspectiveCamera'
@property
def fov(self):
return self._fov
@fov.setter
def fov(self, new_fov):
self._fov = float(new_fov)
@property
def near(self):
return self._near
@near.setter
def near(self, new_near):
self._near = float(new_near)
@property
def far(self):
return self._far
@far.setter
def far(self, new_far):
self._far = float(new_far)
def generate_scene_dict(self, **kwargs):
"""This method generates information for a static visualization in the
initial conditions, in the form of dictionary. This contains camera
parameters followed by an init_orientation key.
Before calling this method, all the transformation matrix generation
methods should be called, or it will give an error.
Returns
=======
A dict with following Keys:
1. name: name for the camera
2. fov: Field of View value of the camera
3. near: near value of the camera
4. far: far value of the camera
5. init_orientation: Initial orientation of the camera
"""
scene_dict = s | uper(PerspectiveCamera, self).generate_scene_dict(**kwargs)
scene_dict[id(self)]['type'] = self.__class__.__name__
scene_dict[id(self)]['fov'] = self.fov
scene_dict[id(self)]['near'] = self.near
scene_dict[id(self)]['far'] = self.far
return scene_dict
class OrthoGraphicCamera(VisualizationFrame):
"""Creates a orthographic camera for use in a sc | ene. The camera is
inherited from ``VisualizationFrame``, and thus behaves similarly. It can
be attached to dynamics objects, hence we can get a moving camera. All the
transformation matrix generation methods are applicable to a
``OrthoGraphicCameraCamera``.
"""
def __init__(self, *args, **kwargs):
"""Initialises a OrthoGraphicCameraCamera object. To initialize a
OrthoGraphicCameraCamera, one needs to supply a name (optional), a
reference frame, a point, field of view (fov) (optional), near plane
distance (optional) and far plane distance (optional).
Like ``VisualizationFrame``, it can also be initialized using one of
these three argument sequences:
Rigidbody
``OrthoGraphicCameraCamera(rigid_body)``
ReferenceFrame, Point
``OrthoGraphicCameraCamera(ref_frame, point)``
ReferenceFrame, Particle
``OrthoGraphicCameraCamera(ref_frame, particle)``
Note that you can also supply and optional name as the first positional
argument, e.g.::
OrthoGraphicCameraCamera('camera_name', rigid_body)
Additional optional keyword arguments are below:
Parameters
==========
near : float
The distance of near plane of the OrthoGraphicCameraCamera. All
objects closer to this distance are not displayed.
far : int or float
The distance of far plane of the OrthoGraphicCameraCamera. All
objects farther than this distance are not displayed.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.mechanics import (ReferenceFrame, Point,
... RigidBody, Particle,
... inertia)
>>> from pydy.viz import OrthoGraphicCameraCamera
>>> I = ReferenceFrame('I')
>>> O = Point('O')
>>> # initializing with reference frame, point
>>> camera1 = OrthoGraphicCameraCamera('frame1', I, O)
>>> # Initializing with a RigidBody
>>> Ixx, Iyy, Izz, mass = symbols('Ixx Iyy Izz mass')
>>> i = inertia(I, Ixx, Iyy, Izz)
>>> rbody = RigidBody('rbody', O, I, mass, (inertia, O))
>>> camera2 = OrthoGraphicCameraCamera('frame2', rbody)
>>> # initializing with Particle, reference_frame
>>> Pa = Particle('Pa', O, mass)
>>> camera3 = OrthoGraphicCameraCamera('frame3', I, Pa)
"""
# NOTE: This allows us to use inhertiance even though cameras don't
# need a shape. In the future, this could be a camera shape that could
# be made visible in the scene (only important for multiple cameras).
args = list(args) + [Shape()]
super(OrthoGraphicCamera, self).__init__(*args)
self.near = 1.0
self.far = 1000.0
for k, v in kwargs.items():
setattr(self, k, v)
def __str__(self):
ret |
moses1984/aafmt | LaTeXtoLaTeX-master/myoperations.py | Python | gpl-2.0 | 1,718 | 0.018044 |
import re
import utilities
import component
def setvariables(text):
component.chapter_abbrev = utilities.argument_of_macro(text,"chap",2)
####### | ############
def mytransform(text):
thetext = text
# get rid of extraneous spaces after \begin and \end
thetext = re.sub(r"\\begin\s+{",r"\\begin{",thetext)
thetext = re.sub(r"\\end\s+{",r"\end{",thetext)
# replace \begin{prop}{the_label} by
# \begin{prop}\label{proposition:chaptername:the_label}
thetext = uti | lities.replacemacro(thetext,r"\begin{prop}",1,
r"\begin{prop}\label{proposition:"+component.chapter_abbrev+":#1}")
# and similarly for example and exercise (yes, this can be in a loop)
thetext = utilities.replacemacro(thetext,r"\begin{example}",1,
r"\begin{example}\label{example:"+component.chapter_abbrev+":#1}")
thetext = utilities.replacemacro(thetext,r"\begin{exercise}",1,
r"\begin{exercise}\label{exercise:"+component.chapter_abbrev+":#1}")
thetext = re.sub(r"\\chap\s*{([^{}]*)}{([^{}]*)}",r"\\chapter{\1}\\label{\2}",thetext)
# in actions.tex and crypt.tex many examples start with something like
# \noindent {\bf Example 2.}
# and end with
# \hspace{\fill} $\blacksquare$
# so we convert these to \begin{example} \end{example}.
# Labels and references still need to be added by hand.
thetext = re.sub(r"\\noindent\s*{\\bf\s+Example\s+[0-9.]+\s*}",r"\\begin{example}",thetext)
thetext = re.sub(r"\\hspace{\\fill}\s*\$\\blacksquare\$",r"\\end{example}",thetext)
# delete empty label arguments
thetext = re.sub(r"\\label{[a-zA-Z]+:[a-zA-Z]+:}","",thetext)
return thetext
|
minlexx/pyevemon | esi_client/models/put_fleets_fleet_id_members_member_id_unprocessable_entity.py | Python | gpl-3.0 | 3,138 | 0.002231 | # coding: utf-8
"""
EVE Swagger Interface
An OpenAPI for EVE Online
OpenAPI spec version: 0.4.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class PutFleetsFleetIdMembersMemberIdUnprocessableEntity(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, error=None):
"""
PutFleetsFleetIdMembersMemberIdUnprocessableEntity - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'error': 'str'
}
self.attribute_map = {
'error': 'error'
}
self._error = error
@property
def error(self):
"""
Gets the error of this PutFleetsFleetIdMembersMemberIdUnprocessableEntity.
error message
:return: The error of this PutFleetsFleetIdMembersMemberIdUnprocessableEntity.
:rtype: str
"""
return self._error
@error.setter
def error(self, error):
"""
Sets the error of this PutFleetsFleetIdMembersMemberIdUnprocessableEntity.
error message
:param error: The error of this PutFleetsFleetIdMembersMemberIdUnprocessableEntity.
:type: str
"""
self._error = error
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
| result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
| ))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, PutFleetsFleetIdMembersMemberIdUnprocessableEntity):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
materialsproject/custodian | custodian/vasp/handlers.py | Python | mit | 72,810 | 0.002857 | """
This module implements specific error handlers for VASP runs. These handlers
try to detect common errors in vasp runs and attempt to fix them on the fly
by modifying the input files.
"""
import datetime
import logging
import operator
import os
import re
import shutil
import time
import warnings
from collections import Counter
from functools import reduce
import numpy as np
from monty.dev import deprecated
from | monty.os.path import zpath
from monty.serializatio | n import loadfn
from pymatgen.core.structure import Structure
from pymatgen.io.vasp.inputs import Incar, Kpoints, Poscar, VaspInput
from pymatgen.io.vasp.outputs import Oszicar, Outcar, Vasprun
from pymatgen.io.vasp.sets import MPScanRelaxSet
from pymatgen.transformations.standard_transformations import SupercellTransformation
from custodian.ansible.actions import FileActions
from custodian.ansible.interpreter import Modder
from custodian.custodian import ErrorHandler
from custodian.utils import backup
from custodian.vasp.interpreter import VaspModder
__author__ = "Shyue Ping Ong, William Davidson Richards, Anubhav Jain, Wei Chen, Stephen Dacek, Andrew Rosen"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "ongsp@ucsd.edu"
__status__ = "Beta"
__date__ = "2/4/13"
VASP_BACKUP_FILES = {
"INCAR",
"KPOINTS",
"POSCAR",
"OUTCAR",
"CONTCAR",
"OSZICAR",
"vasprun.xml",
"vasp.out",
"std_err.txt",
}
class VaspErrorHandler(ErrorHandler):
"""
Master VaspErrorHandler class that handles a number of common errors
that occur during VASP runs.
"""
is_monitor = True
error_msgs = {
"tet": [
"Tetrahedron method fails",
"tetrahedron method fails",
"Fatal error detecting k-mesh",
"Fatal error: unable to match k-point",
"Routine TETIRR needs special values",
"Tetrahedron method fails (number of k-points < 4)",
"BZINTS",
],
"inv_rot_mat": ["rotation matrix was not found (increase SYMPREC)"],
"brmix": ["BRMIX: very serious problems"],
"subspacematrix": ["WARNING: Sub-Space-Matrix is not hermitian in DAV"],
"tetirr": ["Routine TETIRR needs special values"],
"incorrect_shift": ["Could not get correct shifts"],
"real_optlay": ["REAL_OPTLAY: internal error", "REAL_OPT: internal ERROR"],
"rspher": ["ERROR RSPHER"],
"dentet": ["DENTET"],
"too_few_bands": ["TOO FEW BANDS"],
"triple_product": ["ERROR: the triple product of the basis vectors"],
"rot_matrix": ["Found some non-integer element in rotation matrix", "SGRCON"],
"brions": ["BRIONS problems: POTIM should be increased"],
"pricel": ["internal error in subroutine PRICEL"],
"zpotrf": ["LAPACK: Routine ZPOTRF failed"],
"amin": ["One of the lattice vectors is very long (>50 A), but AMIN"],
"zbrent": ["ZBRENT: fatal internal in", "ZBRENT: fatal error in bracketing"],
"pssyevx": ["ERROR in subspace rotation PSSYEVX"],
"eddrmm": ["WARNING in EDDRMM: call to ZHEGV failed"],
"edddav": ["Error EDDDAV: Call to ZHEGV failed"],
"algo_tet": ["ALGO=A and IALGO=5X tend to fail"],
"grad_not_orth": ["EDWAV: internal error, the gradient is not orthogonal"],
"nicht_konv": ["ERROR: SBESSELITER : nicht konvergent"],
"zheev": ["ERROR EDDIAG: Call to routine ZHEEV failed!"],
"elf_kpar": ["ELF: KPAR>1 not implemented"],
"elf_ncl": ["WARNING: ELF not implemented for non collinear case"],
"rhosyg": ["RHOSYG"],
"posmap": ["POSMAP"],
"point_group": ["group operation missing"],
"symprec_noise": ["determination of the symmetry of your systems shows a strong"],
"dfpt_ncore": ["PEAD routines do not work for NCORE", "remove the tag NPAR from the INCAR file"],
"bravais": ["Inconsistent Bravais lattice"],
"nbands_not_sufficient": ["number of bands is not sufficient"],
"hnform": ["HNFORM: k-point generating"],
}
def __init__(
self,
output_filename="vasp.out",
natoms_large_cell=None,
errors_subset_to_catch=None,
vtst_fixes=False,
):
"""
Initializes the handler with the output file to check.
Args:
output_filename (str): This is the file where the stdout for vasp
is being redirected. The error messages that are checked are
present in the stdout. Defaults to "vasp.out", which is the
default redirect used by :class:`custodian.vasp.jobs.VaspJob`.
natoms_large_cell (int): Number of atoms threshold to treat cell
as large. Affects the correction of certain errors. Defaults to
None (not used). Deprecated.
errors_subset_to_detect (list): A subset of errors to catch. The
default is None, which means all supported errors are detected.
Use this to only catch only a subset of supported errors.
E.g., ["eddrrm", "zheev"] will only catch the eddrmm and zheev
errors, and not others. If you wish to only excluded one or
two of the errors, you can create this list by the following
lines:
vtst_fixes (bool): Whether to consider VTST optimizers. Defaults to
False for compatibility purposes.
```
subset = list(VaspErrorHandler.error_msgs.keys())
subset.pop("eddrrm")
handler = VaspErrorHandler(errors_subset_to_catch=subset)
```
"""
self.output_filename = output_filename
self.errors = set()
self.error_count = Counter()
# threshold of number of atoms to treat the cell as large.
self.natoms_large_cell = natoms_large_cell # (deprecated)
if self.natoms_large_cell:
warnings.warn(
"natoms_large_cell is deprecated and currently does nothing.",
DeprecationWarning,
)
self.errors_subset_to_catch = errors_subset_to_catch or list(VaspErrorHandler.error_msgs.keys())
self.vtst_fixes = vtst_fixes
self.logger = logging.getLogger(self.__class__.__name__)
def check(self):
"""
Check for error.
"""
incar = Incar.from_file("INCAR")
self.errors = set()
error_msgs = set()
with open(self.output_filename) as file:
text = file.read()
for err in self.errors_subset_to_catch:
for msg in self.error_msgs[err]:
if text.find(msg) != -1:
# this checks if we want to run a charged
# computation (e.g., defects) if yes we don't
# want to kill it because there is a change in
# e-density (brmix error)
if err == "brmix" and "NELECT" in incar:
continue
self.errors.add(err)
error_msgs.add(msg)
for msg in error_msgs:
self.logger.error(msg, extra={"incar": incar.as_dict()})
return len(self.errors) > 0
def correct(self):
"""
Perform corrections.
"""
backup(VASP_BACKUP_FILES | {self.output_filename})
actions = []
vi = VaspInput.from_directory(".")
if self.errors.intersection(["tet", "dentet"]):
if vi["INCAR"].get("KSPACING"):
# decrease KSPACING by 20% in each direction (approximately double no. of kpoints)
actions.append(
{
"dict": "INCAR",
"action": {"_set": {"KSPACING": vi["INCAR"].get("KSPACING") * 0.8}},
}
)
else:
actions.append({"dict": "INCAR", "action": {"_set": {"ISMEAR": 0, "SIGMA": 0.05}}})
if "inv_rot_mat" in self.errors:
actions.append({"dict": "INCAR", "action": {"_set": {"SYMPREC |
openstack/heat-translator | translator/hot/tosca/tosca_network_network.py | Python | apache-2.0 | 4,835 | 0 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from toscaparser.common.exception import InvalidPropertyValueError
from translator.hot.syntax.hot_resource import HotResource
# Name used to dynamically load appropriate map class.
TARGET_CLASS_NAME = 'ToscaNetwork'
class ToscaNetwork(HotResource):
'''Translate TOSCA node type tosca.nodes.network.Network.'''
toscatype = 'tosca.nodes.network.Network'
SUBNET_SUFFIX = '_subnet'
NETWORK_PROPS = ['network_name', 'network_id', 'segmentation_id']
SUBNET_PROPS = ['ip_version', 'cidr', 'start_ip', 'end_ip', 'gateway_ip']
existing_resource_id = None
def __init__(self, nodetemplate, csar_dir=None):
super(ToscaNetwork, self).__init__(nodetemplate,
type='OS::Neutron::Net',
| csar_dir=csar_dir)
pass
def handle_properties(self):
| tosca_props = self.get_tosca_props()
net_props = {}
for key, value in tosca_props.items():
if key in self.NETWORK_PROPS:
if key == 'network_name':
# If CIDR is specified network_name should
# be used as the name for the new network.
if 'cidr' in tosca_props.keys():
net_props['name'] = value
# If CIDR is not specified network_name will be used
# to lookup existing network. If network_id is specified
# together with network_name then network_id should be
# used to lookup the network instead
elif 'network_id' not in tosca_props.keys():
self.hide_resource = True
self.existing_resource_id = value
break
elif key == 'network_id':
self.hide_resource = True
self.existing_resource_id = value
break
elif key == 'segmentation_id':
# Hardcode to vxlan for now until we add the network type
# and physical network to the spec.
net_props['value_specs'] = {'provider:segmentation_id':
value, 'provider:network_type':
'vxlan'}
self.properties = net_props
def handle_expansion(self):
# If the network resource should not be output (they are hidden),
# there is no need to generate subnet resource
if self.hide_resource:
return
tosca_props = self.get_tosca_props()
subnet_props = {}
ip_pool_start = None
ip_pool_end = None
for key, value in tosca_props.items():
if key in self.SUBNET_PROPS:
if key == 'start_ip':
ip_pool_start = value
elif key == 'end_ip':
ip_pool_end = value
elif key == 'dhcp_enabled':
subnet_props['enable_dhcp'] = value
else:
subnet_props[key] = value
if 'network_id' in tosca_props:
subnet_props['network'] = tosca_props['network_id']
else:
subnet_props['network'] = '{ get_resource: %s }' % (self.name)
# Handle allocation pools
# Do this only if both start_ip and end_ip are provided
# If one of them is missing throw an exception.
if ip_pool_start and ip_pool_end:
allocation_pool = {}
allocation_pool['start'] = ip_pool_start
allocation_pool['end'] = ip_pool_end
allocation_pools = [allocation_pool]
subnet_props['allocation_pools'] = allocation_pools
elif ip_pool_start:
raise InvalidPropertyValueError(what=_('start_ip'))
elif ip_pool_end:
raise InvalidPropertyValueError(what=_('end_ip'))
subnet_resource_name = self.name + self.SUBNET_SUFFIX
hot_resources = [HotResource(self.nodetemplate,
type='OS::Neutron::Subnet',
name=subnet_resource_name,
properties=subnet_props)]
return hot_resources
|
LePastis/pyload | module/plugins/hooks/DeleteFinished.py | Python | gpl-3.0 | 2,839 | 0.011272 | # -*- coding: utf-8 -*-
from module.database import style
from module.plugins.internal.Addon import Addon
class DeleteFinished(Addon):
__name__ = "DeleteFinished"
__type__ = "hook"
__version__ = "1.14"
__status__ = "testing"
__config__ = [("interval" , "int" , "Check interval in hours" , 72 ),
("deloffline", "bool", "Delete package with offline links", False)]
__description__ = """Automatically delete all finished packages from queue"""
__license__ = "GPLv3"
__authors__ = [("Wa | lter Purcaro", "vuolter@gmail.com")]
MIN_CHECK_INTERVAL = 1 * 60 * 60 #: 1 hour
## overwritten methods ##
def init | (self):
# self.event_map = {'pluginConfigChanged': "plugin_config_changed"}
self.interval = self.MIN_CHECK_INTERVAL
def periodical(self):
if not self.info['sleep']:
deloffline = self.get_config('deloffline')
mode = "0,1,4" if deloffline else "0,4"
msg = _('delete all finished packages in queue list (%s packages with offline links)')
self.log_info(msg % (_('including') if deloffline else _('excluding')))
self.delete_finished(mode)
self.info['sleep'] = True
self.add_event('package_finished', self.wakeup)
# def plugin_config_changed(self, plugin, name, value):
# if name == "interval" and value is not self.interval:
# self.interval = value * 3600
# self.init_periodical()
def deactivate(self):
self.manager.removeEvent('package_finished', self.wakeup)
def activate(self):
self.info['sleep'] = True
# interval = self.get_config('interval')
# self.plugin_config_changed(self.__name__, 'interval', interval)
self.interval = max(self.MIN_CHECK_INTERVAL, self.get_config('interval') * 60 * 60)
self.add_event('package_finished', self.wakeup)
## own methods ##
@style.queue
def delete_finished(self, mode):
self.c.execute('DELETE FROM packages WHERE NOT EXISTS(SELECT 1 FROM links WHERE package=packages.id AND status NOT IN (%s))' % mode)
self.c.execute('DELETE FROM links WHERE NOT EXISTS(SELECT 1 FROM packages WHERE id=links.package)')
def wakeup(self, pypack):
self.manager.removeEvent('package_finished', self.wakeup)
self.info['sleep'] = False
## event managing ##
def add_event(self, event, func):
"""
Adds an event listener for event name
"""
if event in self.manager.events:
if func in self.manager.events[event]:
self.log_debug("Function already registered", func)
else:
self.manager.events[event].append(func)
else:
self.manager.events[event] = [func]
|
skibblenybbles/django-commando | commando/django/core/management/cleanup.py | Python | mit | 1,234 | 0.008914 | from commando import management
BaseCleanupCommand = management.get_command_class(
"cleanup", exclude_packages=("commando",))
if BaseCleanupCommand is not None:
base = BaseCleanupCommand()
class CleanupCommandOptions(management.CommandOptions):
"""
Cleanup command options.
"""
args = base.args
help = base.help
option_list = base.option_list[
len(management.BaseCommandOptions.option_list):]
option_groups = (
("[cleanup options]",
"These options will be passed to cleanup.",
option_list,
),) if option_list else ()
actions = ("cleanup",)
def handle_cleanup(self, *args, **options):
| return self.call_command("cleanup", *args, **options)
class CleanupCommand(CleanupCommandOptions, management. | StandardCommand):
"""
Cleanup command.
"""
option_list = management.StandardCommand.option_list
option_groups = \
CleanupCommandOptions.option_groups + \
management.StandardCommand.option_groups
else:
CleanupCommand = management.StandardCommand
|
ronkitay/Rons-Tutorials | Python/APIs/tor_json_processing.py | Python | mit | 3,684 | 0.001357 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Python considers any map or collection as JSON elements. 'Serializing/DeSerializing' them is VERY easy
Read:
- json.load(data_file) --> Loads an object from a File
- json.loads(json_as_string) --> Loads an object from a string representing a JSON object
Write:
- json.dump(json_object, target_file) --> Saved te object into the file
- json.dumps(json_object) --> Returns the JSON Object in its String representation
"""
import json
f | rom sets import Set
phase = 0
def load_json_from_file(input_file_name):
with | open(input_file_name) as data_file:
return json.load(data_file)
def save_json_to_file(output_file, json_object):
with open(output_file, 'w') as outfile:
json.dump(json_object, outfile)
def print_json_object(json_object):
print json.dumps(json_object, indent=True, sort_keys=True)
def print_phase():
global phase
print '==== Step %d ====' % phase
phase += 1
tor_details_full_list = load_json_from_file('/Users/rkitay/tmp/tor2/tor_details_full.json')
relays = tor_details_full_list["relays"]
output = Set()
for relay in relays:
exit_policy = relay["exit_policy"]
if exit_policy[0] == "reject *:*":
continue
# print relay["nickname"]
if "or_addresses" in relay:
or_addresses = relay["or_addresses"]
# print or_addresses
for ip in or_addresses:
if '.' in ip:
ip = ip[0:ip.index(':')]
output.add(ip)
if "exit_addresses" in relay:
exit_addresses = relay["exit_addresses"]
# print exit_addresses
for ip in exit_addresses:
if '.' in ip:
output.add(ip)
print "all tor addresses size is: " + str (output.__len__())
print output
check_tor_file = open("/Users/rkitay/tmp/tor2/check-tor.ips", "r")
check_tor_lines = check_tor_file.readlines()
for check_tor_ip in check_tor_lines:
if check_tor_ip in output:
continue
# print check_tor_ip + " was found in the onionoo list"
else:
print check_tor_ip + " was NOT found in the onionoo list"
# print_phase()
# print type(data) # Data is a dictionary
# print data
#
# print_phase()
# data['object1']['field2'] = 'new value'
# print data
#
# print_phase()
# data['object2'] = {'fieldX': 'valueY', 'fieldZ': 12940.43}
# print_json_object(data)
#
# print_phase()
# if 'object3' not in data:
# data['object3'] = {}
# print_json_object(data)
#
# print_phase()
# if 'object2' not in data:
# data['object2'] = {}
# else:
# data['object2']['fieldZ'] = 96943.34
# print_json_object(data)
#
# print_phase()
# dummy_json_output_file = '/tmp/dummy_output.json'
# save_json_to_file(dummy_json_output_file, data)
# contents_of_file_i_just_saved = load_json_from_file(dummy_json_output_file)
# print_json_object(contents_of_file_i_just_saved)
#
# print_phase()
# sample_data = [
# {"action": "Allow", "key": "ALLOW", "rules": 20},
# {"action": "Alternate Content", "key": "ALT_CONT", "rules": 20},
# {"action": "Alternate Origin", "key": "ALT_ORG", "rules": 20},
# {"action": "Get from Cache", "key": "CACHE", "rules": 20},
# {"action": "Annoy user", "key": "CUSTOM_1234", "rules": 20},
# {"action": "Distract user", "key": "CUSTOM_5678", "rules": 20},
# {"action": "Delay the user", "key": "DELAY", "rules": 20},
# {"action": "DROP", "key": "DROP", "rules": 20},
# {"action": "Do nothing", "key": "NONE", "rules": 20},
# {"action": "Slow the user", "key": "SLOW", "rules": 20}
# ]
# print type(sample_data)
# print_json_object(sample_data)
#
# print_phase()
# sample_data[1:3] = []
# print_json_object(sample_data)
|
mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/scipy/interpolate/tests/test_regression.py | Python | mit | 484 | 0.002066 | fro | m __future__ import division, print_function, absolute_import
import numpy as np
import scipy.interpolate as interp
from numpy.testing import assert_almost_equal
class TestRegression(object):
def test_spalde_scalar_input(self):
"""Ticket #629"""
x = np.linspace(0,10)
y = x**3
tck = interp.splrep(x, y, k=3, t=[5])
res = interp.spalde(np.float64(1), tck)
des = np.array([1., 3., 6., 6.])
assert_a | lmost_equal(res, des)
|
espadrine/opera | chromium/src/third_party/chromite/buildbot/portage_utilities.py | Python | bsd-3-clause | 30,932 | 0.007015 | # Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Routines and classes for working with Portage overlays and ebuilds."""
import collections
import filecmp
import fileinput
import glob
import logging
import multiprocessing
import os
import re
import shutil
import sys
from chromite.buildbot import constants
from chromite.lib import cros_build_lib
from chromite.lib import gerrit
from chromite.lib import git
from chromite.lib import osutils
_PRIVATE_PREFIX = '%(buildroot)s/src/private-overlays'
_GLOBAL_OVERLAYS = [
'%s/chromeos-overlay' % _PRIVATE_PREFIX,
'%s/chromeos-partner-overlay' % _PRIVATE_PREFIX,
'%(buildroot)s/src/third_party/chromiumos-overlay',
'%(buildroot)s/src/third_party/portage-stable',
]
# Define datastructures for holding PV and CPV objects.
_PV_FIELDS = ['pv', 'package', 'version', 'version_no_rev', 'rev']
PV = collections.namedtuple('PV', _PV_FIELDS)
CPV = collections.namedtuple('CPV', ['category'] + _PV_FIELDS)
# Package matching regexp, as dictated by package manager specification:
# http://www.gentoo.org/proj/en/qa/pms.xml
_pkg = r'(?P<package>' + r'[\w+][\w+-]*)'
_ver = r'(?P<version>' + \
r'(?P<version_no_rev>(\d+)((\.\d+)*)([a-z]?)' + \
r'((_(pre|p|beta|alpha|rc)\d*)*))' + \
r'(-(?P<rev>r(\d+)))?)'
_pvr_re = re.compile(r'^(?P<pv>%s-%s)$' % (_pkg, _ver), re.VERBOSE)
# This regex matches blank lines, commented lines, and the EAPI line.
_blank_or_eapi_re = re.compile(r'^\s*(?:#|EAPI=|$)')
def _ListOverlays(board=None, buildroot=constants.SOURCE_ROOT):
"""Return the list of overlays to use for a given buildbot.
Always returns all overlays, and does not perform any filtering.
Args:
board: Board to look at.
buildroot: Source root to find overlays.
"""
overlays, patterns = [], []
if board is None:
patterns += ['overlay*']
else:
board_no_variant, _, variant = board.partition('_')
patterns += ['overlay-%s' % board_no_variant]
if variant:
patterns += ['overlay-variant-%s' % board.replace('_', '-')]
for d in _GLOBAL_OVERLAYS:
d %= dict(buildroot=buildroot)
if os.path.isdir(d):
overlays.append(d)
for p in patterns:
overlays += glob.glob('%s/src/overlays/%s' % (buildroot, p))
overlays += glob.glob('%s/src/private-overlays/%s-private' % (buildroot, p))
return overlays
def FindOverlays(overlay_type, board=None, buildroot=constants.SOURCE_ROOT):
"""Return the list of overlays to use for a given buildbot.
Args:
board: Board to look at.
buildroot: Source root to find overlays.
overlay_type: A string describing which overlays you want.
'private': Just the private overlays.
'public': Just the public overlays.
'both': Both the public and private overlays.
"""
overlays = _ListOverlays(board=board, buildroot=buildroot)
private_prefix = _PRIVATE_PREFIX % dict(buildroot=buildroot)
if overlay_type == constants.PRIVATE_OVERLAYS:
return [x for x in overlays if x.startswith(private_prefix)]
elif overlay_type == constants.PUBLIC_OVERLAYS:
return [x for x in overlays if not x.startswith(private_prefix)]
elif overlay_type == constants.BOTH_OVERLAYS:
return overlays
else:
assert overlay_type is None
return []
class MissingOverlayException(Exception):
"""This exception indicates that a needed overlay is missing."""
def FindPrimaryOverlay(overlay_type, board, buildroot=constants.SOURCE_ROOT):
"""Return the primary overlay to use for a given buildbot.
An overlay is only considered a primary overlay if it has a make.conf and a
toolchain.conf. If multiple primary overlays are found, the first primary
overlay is returned.
Args:
overlay_type: A string describing which overlays you want.
'private': Just the private overlays.
'public': Just the public overlays.
'both': Both the public and private overlays.
board: Board to look at.
Raises:
MissingOverlayException: No primary overlay found.
"""
for overlay in FindOverlays(overlay_type, board, buildroot):
if (os.path.exists(os.path.join(overlay, 'make.conf')) and
os.path.exists(os.path.join(overlay, 'toolchain.conf'))):
return overlay
raise MissingOverlayException('No primary overlay found for board=%r' % board)
def GetOverlayName(overlay):
try:
return open('%s/profiles/repo_name' % overlay).readline().rstrip()
except IOError:
# Not all overlays have a repo_name, so don't make a fuss.
return None
class EBuildVersionFormatException(Exception):
def __init__(self, filename):
self.filename = filename
message = ('Ebuild file name %s '
| 'does not match expected format.' % filename)
super(EBuildVersionFormatException, self).__init__(message)
class EbuildFormatIncorrectException(Exception):
def __init__(self, filename, message):
message = | 'Ebuild %s has invalid format: %s ' % (filename, message)
super(EbuildFormatIncorrectException, self).__init__(message)
class EBuild(object):
"""Wrapper class for information about an ebuild."""
VERBOSE = False
_PACKAGE_VERSION_PATTERN = re.compile(
r'.*-(([0-9][0-9a-z_.]*)(-r[0-9]+)?)[.]ebuild')
_WORKON_COMMIT_PATTERN = re.compile(r'^CROS_WORKON_COMMIT="(.*)"$')
@classmethod
def _Print(cls, message):
"""Verbose print function."""
if cls.VERBOSE:
cros_build_lib.Info(message)
@classmethod
def _RunCommand(cls, command, **kwargs):
return cros_build_lib.RunCommandCaptureOutput(
command, print_cmd=cls.VERBOSE, **kwargs).output
def IsSticky(self):
"""Returns True if the ebuild is sticky."""
return self.is_stable and self.current_revision == 0
@classmethod
def UpdateEBuild(cls, ebuild_path, variables, redirect_file=None,
make_stable=True):
"""Static function that updates WORKON information in the ebuild.
This function takes an ebuild_path and updates WORKON information.
Args:
ebuild_path: The path of the ebuild.
variables: Dictionary of variables to update in ebuild.
redirect_file: Optionally redirect output of new ebuild somewhere else.
make_stable: Actually make the ebuild stable.
"""
written = False
for line in fileinput.input(ebuild_path, inplace=1):
# Has to be done here to get changes to sys.stdout from fileinput.input.
if not redirect_file:
redirect_file = sys.stdout
# Always add variables at the top of the ebuild, before the first
# nonblank line other than the EAPI line.
if not written and not _blank_or_eapi_re.match(line):
for key, value in sorted(variables.items()):
assert key is not None and value is not None
redirect_file.write('%s=%s\n' % (key, value))
written = True
# Mark KEYWORDS as stable by removing ~'s.
if line.startswith('KEYWORDS=') and make_stable:
line = line.replace('~', '')
varname, eq, _ = line.partition('=')
if not (eq == '=' and varname.strip() in variables):
# Don't write out the old value of the variable.
redirect_file.write(line)
fileinput.close()
@classmethod
def MarkAsStable(cls, unstable_ebuild_path, new_stable_ebuild_path,
variables, redirect_file=None, make_stable=True):
"""Static function that creates a revved stable ebuild.
This function assumes you have already figured out the name of the new
stable ebuild path and then creates that file from the given unstable
ebuild and marks it as stable. If the commit_value is set, it also
set the commit_keyword=commit_value pair in the ebuild.
Args:
unstable_ebuild_path: The path to the unstable ebuild.
new_stable_ebuild_path: The path you want to use for the new stable
ebuild.
variables: Dictionary of variables to update in ebuild.
redirect_file: Optionally redirect output of new ebuild somewhere else.
make_stable: Actually make the ebuild stable.
"""
shutil.copyfile(unstable_ebuild_path, new_stable_ebuil |
forseti-security/forseti-security | tests/common/util/__init__.py | Python | apache-2.0 | 658 | 0 | # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the s | pecific language governing permissions and
# limitations under the Licen | se.
"""Common utilities usable by all tests."""
|
digidotcom/transport_examples | WR31/doormon.py | Python | mpl-2.0 | 3,979 | 0.000503 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (c) 2016 Digi International Inc. All Rights Reserved.
"""
Monitor the WR31 door enclosure
"""
import time
import sys
import sarcli
import idigidata
def millisecond_timestamp():
"""
Return a timestamp, in milliseconds
:return ms_timestamp: int, Timestamp in milliseconds
"""
ms_timestamp = int(time.time() * 1000)
return ms_timestamp
def cli_command(cmd) | :
"""
Send a command to the SarOS CLI and receive the response
:param cmd: str, Command to run
:return response: str, Response to cmd
"""
cli = sarcli.open()
cli.write(cmd)
response = cli.read()
cli.close()
return response
class SmsAlert(obje | ct):
"""
Send an SMS alert
"""
def __init__(self, destination, custom_text):
self.destination = destination
self.custom_text = custom_text
def send_alert(self, message):
"""
Send an SMS alert
:param message: str, Content of SMS message
:return response: str, Response to sendsms command
"""
message = "{0}: {1}".format(self.custom_text, message)
command = 'sendsms ' + self.destination + ' "' + message + '" '
response = cli_command(command)
return response
class DatapointAlert(object):
"""
Send a Datapoint alert
"""
def __init__(self, destination):
self.destination = destination
def send_alert(self, message):
"""
Send a Datapoint alert
:param message: str, Datapoint content
:return response: tuple, Result code of datapoint upload attempt
"""
timestamp = millisecond_timestamp()
dpoint = """\
<DataPoint>
<dataType>STRING</dataType>
<data>{0}</data>
<timestamp>{1}</timestamp>
<streamId>{2}</streamId>
</DataPoint>""".format(message, timestamp, self.destination)
response = idigidata.send_to_idigi(dpoint, "DataPoint/stream.xml")
return response
class DoorMonitor(object):
"""
Provides methods to monitor the enclosure door status
"""
def __init__(self, alert_list):
self.d1_status = ""
self.alert_list = alert_list
@classmethod
def switch_status(cls):
"""
Reads line status and sends an alert if the status is different
:return status: str, Door status, "OPEN" or "CLOSED"
"""
response = cli_command("gpio dio")
if "D1: DOUT=OFF, DIN=LOW" in response:
if not "D0: DOUT=ON" in response:
# Door is closed
status = "CLOSED"
else:
# Door is open
status = "OPEN"
return status
def send_alert(self, text):
"""
:param text: str, Alert content
:return:
"""
for alert in self.alert_list:
alert.send_alert(text)
def monitor_switch(self):
"""
Runs line monitoring and alerting in a loop
:return:
"""
while True:
status = self.switch_status()
if status != self.d1_status:
print "WR31 door is: {0}".format(status)
self.send_alert(status)
self.d1_status = status
time.sleep(.5)
if __name__ == '__main__':
ALERT_FUNCTIONS = [DatapointAlert("WR31_door")]
if len(sys.argv) >= 3:
CUSTOM_TEXT = sys.argv[2]
else:
CUSTOM_TEXT = "WR31 Door"
if len(sys.argv) >= 2:
ALERT_FUNCTIONS.append(SmsAlert(sys.argv[1], CUSTOM_TEXT))
MONITOR = DoorMonitor(ALERT_FUNCTIONS)
MONITOR.monitor_switch()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.