repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dr4ke616/LazyTorrent
|
twisted/plugins/torrent_plugin.py
|
1
|
1555
|
from zope.interface import implements
from twisted.python import usage
from twisted.plugin import IPlugin
from twisted.application import internet
from twisted.application.service import IServiceMaker
from mamba.utils import config
from mamba.enterprise import database
from mamba.core.session import Session
from mamba.core.services.threadpool import ThreadPoolService
from torrent import MambaApplicationFactory
settings = config.Application('config/application.json')
class Options(usage.Options):
optParameters = [
['port', 'p', settings.port, 'The port number to listen on']
]
class MambaServiceMaker(object):
implements(IServiceMaker, IPlugin)
tapname = settings.name.lower()
description = settings.description
options = Options
def makeService(self, options):
"""Construct a TCPServer from a factory defined in torrent
"""
factory, application = MambaApplicationFactory(settings)
factory.sessionFactory = Session
httpserver = internet.TCPServer(int(options['port']), factory)
httpserver.setName('{} Application'.format(settings.name))
application.addService(httpserver)
thread_pool = ThreadPoolService(database.Database.pool)
application.addService(thread_pool)
return application
# Now construct an object which *provides* the relevant interfaces
# The name of this variable is irrelevant, as long as there is *some*
# name bound to a provider of IPlugin and IServiceMaker
mamba_service_maker = MambaServiceMaker()
|
gpl-3.0
| 8,606,297,883,575,277,000
| 29.490196
| 70
| 0.748553
| false
| 4.367978
| false
| false
| false
|
google-research/open-covid-19-data
|
src/scripts/locations/generate_iso_3166_1.py
|
1
|
7123
|
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=no-value-for-parameter
import requests
import pandas as pd
import streamlit as st
import os
import sys
PIPELINE_DIR = os.path.join(os.path.dirname(__file__), '../../', 'src/pipeline')
sys.path.append(PIPELINE_DIR)
import path_utils
################################################################################
##### Query wikidata for all ISO-3166-1 countries ######
################################################################################
# Wikidata query for ISO-3166-1 codes
# Use at https://query.wikidata.org/
# Workaround for a bug in generating urls for wikidata queries:
# Use the UI at https://query.wikidata.org/ to get the query url by entering these queries
# and then click the "Link" button -> SPARQL endpoint -> copy link address.
# This gives you the url for the query.
# SELECT DISTINCT ?country ?countryLabel ?capital ?capitalLabel
# WHERE
# {
# ?country wdt:P31 wd:Q3624078 .
# #not a former country
# FILTER NOT EXISTS {?country wdt:P31 wd:Q3024240}
# #and no an ancient civilisation (needed to exclude ancient Egypt)
# FILTER NOT EXISTS {?country wdt:P31 wd:Q28171280}
# OPTIONAL { ?country wdt:P36 ?capital } .
#
# SERVICE wikibase:label { bd:serviceParam wikibase:language "en" }
# }
# ORDER BY ?countryLabel
iso_3166_1_url = 'https://query.wikidata.org/sparql?query=%23added%20before%202016-10%0ASELECT%20DISTINCT%20%3Fcountry%20%3FcountryLabel%20%3FthreeLetterCode%20%3FnumericCode%20%3FtwoLetterCode%0AWHERE%0A%7B%0A%20%20%3Fcountry%20wdt%3AP298%20%3FthreeLetterCode.%0A%20%20%3Fcountry%20wdt%3AP299%20%3FnumericCode.%0A%20%20%3Fcountry%20wdt%3AP297%20%3FtwoLetterCode.%0A%20%20%23not%20a%20former%20country%0A%20%20FILTER%20NOT%20EXISTS%20%7B%3Fcountry%20wdt%3AP31%20wd%3AQ3024240%7D%0A%20%20%23and%20no%20an%20ancient%20civilisation%20(needed%20to%20exclude%20ancient%20Egypt)%0A%20%20FILTER%20NOT%20EXISTS%20%7B%3Fcountry%20wdt%3AP31%20wd%3AQ28171280%7D%0A%0A%20%20SERVICE%20wikibase%3Alabel%20%7B%20bd%3AserviceParam%20wikibase%3Alanguage%20%22en%22%20%7D%0A%7D%0AORDER%20BY%20%3FcountryLabel' # pylint: disable=line-too-long
countries = requests.get(iso_3166_1_url, params={'format': 'json'}).json()['results']['bindings']
country_df = pd.json_normalize(countries)
country_df = country_df.rename(columns={
'country.value': 'wikidata_id',
'twoLetterCode.value': 'country_iso_3166-1_alpha-2',
'numericCode.value': 'country_iso_3166-1_numeric',
'threeLetterCode.value': 'region_code',
'countryLabel.value': 'region_name'
})
country_df = country_df[['wikidata_id', 'country_iso_3166-1_alpha-2', 'country_iso_3166-1_numeric',
'region_code', 'region_name']]
country_df['wikidata_id'] = country_df['wikidata_id'].apply(lambda s: s.split('/')[-1])
country_df['region_code_type'] = 'iso_3166-1'
country_df['country_iso_3166-1_alpha-3'] = country_df['region_code']
country_df['region_code_level'] = 1
country_df['parent_region_code'] = 'WORLD'
country_df['subdivision_type'] = 'countries'
country_df['region_type'] = 'country'
country_df['leaf_region_code'] = country_df['region_code']
country_df['level_1_region_code'] = country_df['region_code']
country_df['level_2_region_code'] = None
country_df['level_3_region_code'] = None
st.subheader('Countries including duplicate ISO-3166-1 / ISO-3166-2 regions')
st.write(country_df)
################################################################################
##### Remove duplicates for regions that could appear as either Level 1 ######
##### or as Level 2 regions, based on whether data sources are separate ######
################################################################################
# Treat Netherlands + Aruba + Curaçao + Sint Maarten (Dutch part) as a single level 1 entity
country_df = country_df[country_df['wikidata_id'] != 'Q55']
# Keep Western Sahara wikidata entry (Q6250) instead of Q40362
country_df = country_df[country_df['wikidata_id'] != 'Q40362']
# These regions appear as both ISO-1 and ISO-2, but we will count them as ISO-2
# so we remove them from the ISO-1 list
# Leave as ISO1 because they have separate data sources: Taiwain, Hong Kong, Macao
regions_to_remove_from_iso1 = {
'ALA': 'Åland Islands', # Finland: FI-01
'BLM': 'Saint Barthélemy', # France: FR-BL Saint Barthélemy (BL)
'GUF': 'French Guiana', # France: FR-GF French Guiana (GF)
'GLP': 'Guadeloupe', # France: FR-GP Guadeloupe (GP)
'MAF': 'Saint Martin (French part)', # France: FR-MF Saint Martin (MF)
'MTQ': 'Martinique', # France: FR-MQ Martinique (MQ)
'NCL': 'New Caledonia', # France: FR-NC New Caledonia (NC)
'PYF': 'French Polynesia', # France: FR-PF French Polynesia (PF)
'SPM': 'Saint Pierre and Miquelon', # France: FR-PM Saint Pierre and Miquelon (PM)
'REU': 'Réunion', # France: FR-RE Réunion (RE)
'ATF': 'French Southern and Antarctic Lands', # France: FR-TF French Southern Territories (TF)
'WLF': 'Wallis and Futuna', # France: FR-WF Wallis and Futuna (WF)
'MYT': 'Mayotte', # France: FR-YT Mayotte (YT)
'SJM': 'Svalbard and Jan Mayen', # Norway: NO-21 Svalbard, NO-22 Jan Mayen
'BES': 'Caribbean Netherlands', # Netherlands: NL-BQ1 Bonaire (BQ), NL-BQ2 Saba (BQ), NL-BQ3 Sint Eustatius (BQ)
'ABW': 'Aruba', # Netherlands: NL-AW Aruba (AW)
'CUW': 'Curaçao', # Netherlands: NL-CW Curaçao (CW)
'SXM': 'Sint Maarten (Dutch part)', # Netherlands: NL-SX Sint Maarten (SX)
'ASM': 'American Samoa', # United States: US-AS
'GUM': 'Guam', # United States: US-GU
'MNP': 'Northern Mariana Islands', # United States: US-MP
'PRI': 'Puerto Rico', # United States: US-PR
'UMI': 'United States Minor Outlying Islands', # United States: US-UM
'VIR': 'United States Virgin Islands', # United States: US-VI
}
st.write(len(regions_to_remove_from_iso1))
country_df = country_df[~country_df['region_code'].isin(regions_to_remove_from_iso1.keys())]
st.subheader('Countries without duplicate ISO-3166-1 / ISO-3166-2 regions')
################################################################################
##### Generate datacommons ids using the known format for the dcids ######
################################################################################
country_df['datacommons_id'] = country_df.apply(lambda x: 'country/' + x['region_code'], axis=1)
st.write(country_df)
st.write(country_df.shape)
country_df.to_csv(
os.path.join(path_utils.path_to('locations_intermediate_dir'), 'iso_3166_1_locations.csv'), index=False)
|
apache-2.0
| -6,122,616,209,773,287,000
| 49.460993
| 824
| 0.657625
| false
| 2.919573
| false
| false
| false
|
LamCiuLoeng/fd
|
rpac/model/__init__.py
|
1
|
2426
|
# -*- coding: utf-8 -*-
"""The application's model objects"""
from zope.sqlalchemy import ZopeTransactionExtension
from sqlalchemy.orm import scoped_session, sessionmaker
#from sqlalchemy import MetaData
from sqlalchemy.ext.declarative import declarative_base
# Global session manager: DBSession() returns the Thread-local
# session object appropriate for the current web request.
maker = sessionmaker( autoflush = True, autocommit = False,
extension = ZopeTransactionExtension() )
DBSession = scoped_session( maker )
# Base class for all of our model classes: By default, the data model is
# defined with SQLAlchemy's declarative extension, but if you need more
# control, you can switch to the traditional method.
DeclarativeBase = declarative_base()
# There are two convenient ways for you to spare some typing.
# You can have a query property on all your model classes by doing this:
# DeclarativeBase.query = DBSession.query_property()
# Or you can use a session-aware mapper as it was used in TurboGears 1:
# DeclarativeBase = declarative_base(mapper=DBSession.mapper)
# Global metadata.
# The default metadata is the one from the declarative base.
metadata = DeclarativeBase.metadata
# If you have multiple databases with overlapping table names, you'll need a
# metadata for each database. Feel free to rename 'metadata2'.
#metadata2 = MetaData()
#####
# Generally you will not want to define your table's mappers, and data objects
# here in __init__ but will want to create modules them in the model directory
# and import them at the bottom of this file.
#
######
def init_model( engine ):
"""Call me before using any of the tables or classes in the model."""
DBSession.configure( bind = engine )
# If you are using reflection to introspect your database and create
# table objects for you, your tables must be defined and mapped inside
# the init_model function, so that the engine is available if you
# use the model outside tg2, you need to make sure this is called before
# you use the model.
#
# See the following example:
#global t_reflected
#t_reflected = Table("Reflected", metadata,
# autoload=True, autoload_with=engine)
#mapper(Reflected, t_reflected)
# Import your model modules here.
qry = DBSession.query
from auth import User, Group, Permission
from logic import *
from ordering import *
from master import *
|
mit
| -2,430,196,997,975,855,000
| 34.676471
| 78
| 0.741962
| false
| 4.084175
| false
| false
| false
|
gtaylor/paypal-python
|
paypal/interface.py
|
1
|
19787
|
# coding=utf-8
"""
The end developer will do most of their work with the PayPalInterface class
found in this module. Configuration, querying, and manipulation can all be done
with it.
"""
import types
import logging
from pprint import pformat
import warnings
import requests
from paypal.settings import PayPalConfig
from paypal.response import PayPalResponse
from paypal.response_list import PayPalResponseList
from paypal.exceptions import (PayPalError,
PayPalAPIResponseError,
PayPalConfigError)
from paypal.compat import is_py3
if is_py3:
#noinspection PyUnresolvedReferences
from urllib.parse import urlencode
else:
from urllib import urlencode
logger = logging.getLogger('paypal.interface')
class PayPalInterface(object):
__credentials = ['USER', 'PWD', 'SIGNATURE', 'SUBJECT']
"""
The end developers will do 95% of their work through this class. API
queries, configuration, etc, all go through here. See the __init__ method
for config related details.
"""
def __init__(self, config=None, **kwargs):
"""
Constructor, which passes all config directives to the config class
via kwargs. For example:
paypal = PayPalInterface(API_USERNAME='somevalue')
Optionally, you may pass a 'config' kwarg to provide your own
PayPalConfig object.
"""
if config:
# User provided their own PayPalConfig object.
self.config = config
else:
# Take the kwargs and stuff them in a new PayPalConfig object.
self.config = PayPalConfig(**kwargs)
def _encode_utf8(self, **kwargs):
"""
UTF8 encodes all of the NVP values.
"""
if is_py3:
# This is only valid for Python 2. In Python 3, unicode is
# everywhere (yay).
return kwargs
unencoded_pairs = kwargs
for i in unencoded_pairs.keys():
#noinspection PyUnresolvedReferences
if isinstance(unencoded_pairs[i], types.UnicodeType):
unencoded_pairs[i] = unencoded_pairs[i].encode('utf-8')
return unencoded_pairs
def _check_required(self, requires, **kwargs):
"""
Checks kwargs for the values specified in 'requires', which is a tuple
of strings. These strings are the NVP names of the required values.
"""
for req in requires:
# PayPal api is never mixed-case.
if req.lower() not in kwargs and req.upper() not in kwargs:
raise PayPalError('missing required : %s' % req)
def _sanitize_locals(self, data):
"""
Remove the 'self' key in locals()
It's more explicit to do it in one function
"""
if 'self' in data:
data = data.copy()
del data['self']
return data
def _call(self, method, **kwargs):
"""
Wrapper method for executing all API commands over HTTP. This method is
further used to implement wrapper methods listed here:
https://www.x.com/docs/DOC-1374
``method`` must be a supported NVP method listed at the above address.
``kwargs`` the actual call parameters
"""
post_params = self._get_call_params(method, **kwargs)
payload = post_params['data']
api_endpoint = post_params['url']
# This shows all of the key/val pairs we're sending to PayPal.
if logger.isEnabledFor(logging.DEBUG):
logger.debug('PayPal NVP Query Key/Vals:\n%s' % pformat(payload))
http_response = requests.post(**post_params)
response = PayPalResponse(http_response.text, self.config)
logger.debug('PayPal NVP API Endpoint: %s' % api_endpoint)
if not response.success:
raise PayPalAPIResponseError(response)
return response
def _get_call_params(self, method, **kwargs):
"""
Returns the prepared call parameters. Mind, these will be keyword
arguments to ``requests.post``.
``method`` the NVP method
``kwargs`` the actual call parameters
"""
payload = {'METHOD': method,
'VERSION': self.config.API_VERSION}
certificate = None
if self.config.API_AUTHENTICATION_MODE == "3TOKEN":
payload['USER'] = self.config.API_USERNAME
payload['PWD'] = self.config.API_PASSWORD
payload['SIGNATURE'] = self.config.API_SIGNATURE
elif self.config.API_AUTHENTICATION_MODE == "CERTIFICATE":
payload['USER'] = self.config.API_USERNAME
payload['PWD'] = self.config.API_PASSWORD
certificate = (self.config.API_CERTIFICATE_FILENAME,
self.config.API_KEY_FILENAME)
elif self.config.API_AUTHENTICATION_MODE == "UNIPAY":
payload['SUBJECT'] = self.config.UNIPAY_SUBJECT
none_configs = [config for config, value in payload.items()
if value is None]
if none_configs:
raise PayPalConfigError(
"Config(s) %s cannot be None. Please, check this "
"interface's config." % none_configs)
# all keys in the payload must be uppercase
for key, value in kwargs.items():
payload[key.upper()] = value
return {'data': payload,
'cert': certificate,
'url': self.config.API_ENDPOINT,
'timeout': self.config.HTTP_TIMEOUT,
'verify': self.config.API_CA_CERTS}
def address_verify(self, email, street, zip):
"""Shortcut for the AddressVerify method.
``email``::
Email address of a PayPal member to verify.
Maximum string length: 255 single-byte characters
Input mask: ?@?.??
``street``::
First line of the billing or shipping postal address to verify.
To pass verification, the value of Street must match the first three
single-byte characters of a postal address on file for the PayPal member.
Maximum string length: 35 single-byte characters.
Alphanumeric plus - , . ‘ # \
Whitespace and case of input value are ignored.
``zip``::
Postal code to verify.
To pass verification, the value of Zip mustmatch the first five
single-byte characters of the postal code of the verified postal
address for the verified PayPal member.
Maximumstring length: 16 single-byte characters.
Whitespace and case of input value are ignored.
"""
args = self._sanitize_locals(locals())
return self._call('AddressVerify', **args)
def create_recurring_payments_profile(self, **kwargs):
"""Shortcut for the CreateRecurringPaymentsProfile method.
Currently, this method only supports the Direct Payment flavor.
It requires standard credit card information and a few additional
parameters related to the billing. e.g.:
profile_info = {
# Credit card information
'creditcardtype': 'Visa',
'acct': '4812177017895760',
'expdate': '102015',
'cvv2': '123',
'firstname': 'John',
'lastname': 'Doe',
'street': '1313 Mockingbird Lane',
'city': 'Beverly Hills',
'state': 'CA',
'zip': '90110',
'countrycode': 'US',
'currencycode': 'USD',
# Recurring payment information
'profilestartdate': '2010-10-25T0:0:0',
'billingperiod': 'Month',
'billingfrequency': '6',
'amt': '10.00',
'desc': '6 months of our product.'
}
response = create_recurring_payments_profile(**profile_info)
The above NVPs compose the bare-minimum request for creating a
profile. For the complete list of parameters, visit this URI:
https://www.x.com/docs/DOC-1168
"""
return self._call('CreateRecurringPaymentsProfile', **kwargs)
def do_authorization(self, transactionid, amt):
"""Shortcut for the DoAuthorization method.
Use the TRANSACTIONID from DoExpressCheckoutPayment for the
``transactionid``. The latest version of the API does not support the
creation of an Order from `DoDirectPayment`.
The `amt` should be the same as passed to `DoExpressCheckoutPayment`.
Flow for a payment involving a `DoAuthorization` call::
1. One or many calls to `SetExpressCheckout` with pertinent order
details, returns `TOKEN`
1. `DoExpressCheckoutPayment` with `TOKEN`, `PAYMENTACTION` set to
Order, `AMT` set to the amount of the transaction, returns
`TRANSACTIONID`
1. `DoAuthorization` with `TRANSACTIONID` and `AMT` set to the
amount of the transaction.
1. `DoCapture` with the `AUTHORIZATIONID` (the `TRANSACTIONID`
returned by `DoAuthorization`)
"""
args = self._sanitize_locals(locals())
return self._call('DoAuthorization', **args)
def do_capture(self, authorizationid, amt, completetype='Complete',
**kwargs):
"""Shortcut for the DoCapture method.
Use the TRANSACTIONID from DoAuthorization, DoDirectPayment or
DoExpressCheckoutPayment for the ``authorizationid``.
The `amt` should be the same as the authorized transaction.
"""
kwargs.update(self._sanitize_locals(locals()))
return self._call('DoCapture', **kwargs)
def do_direct_payment(self, paymentaction="Sale", **kwargs):
"""Shortcut for the DoDirectPayment method.
``paymentaction`` could be 'Authorization' or 'Sale'
To issue a Sale immediately::
charge = {
'amt': '10.00',
'creditcardtype': 'Visa',
'acct': '4812177017895760',
'expdate': '012010',
'cvv2': '962',
'firstname': 'John',
'lastname': 'Doe',
'street': '1 Main St',
'city': 'San Jose',
'state': 'CA',
'zip': '95131',
'countrycode': 'US',
'currencycode': 'USD',
}
direct_payment("Sale", **charge)
Or, since "Sale" is the default:
direct_payment(**charge)
To issue an Authorization, simply pass "Authorization" instead
of "Sale".
You may also explicitly set ``paymentaction`` as a keyword argument:
...
direct_payment(paymentaction="Sale", **charge)
"""
kwargs.update(self._sanitize_locals(locals()))
return self._call('DoDirectPayment', **kwargs)
def do_void(self, **kwargs):
"""Shortcut for the DoVoid method.
Use the TRANSACTIONID from DoAuthorization, DoDirectPayment or
DoExpressCheckoutPayment for the ``AUTHORIZATIONID``.
Required Kwargs
---------------
* AUTHORIZATIONID
"""
return self._call('DoVoid', **kwargs)
def get_express_checkout_details(self, **kwargs):
"""Shortcut for the GetExpressCheckoutDetails method.
Required Kwargs
---------------
* TOKEN
"""
return self._call('GetExpressCheckoutDetails', **kwargs)
def get_transaction_details(self, **kwargs):
"""Shortcut for the GetTransactionDetails method.
Use the TRANSACTIONID from DoAuthorization, DoDirectPayment or
DoExpressCheckoutPayment for the ``transactionid``.
Required Kwargs
---------------
* TRANSACTIONID
"""
return self._call('GetTransactionDetails', **kwargs)
def transaction_search(self, **kwargs):
"""Shortcut for the TransactionSearch method.
Returns a PayPalResponseList object, which merges the L_ syntax list
to a list of dictionaries with properly named keys.
Note that the API will limit returned transactions to 100.
Required Kwargs
---------------
* STARTDATE
Optional Kwargs
---------------
STATUS = one of ['Pending','Processing','Success','Denied','Reversed']
"""
plain = self._call('TransactionSearch', **kwargs)
return PayPalResponseList(plain.raw, self.config)
def set_express_checkout(self, **kwargs):
"""Start an Express checkout.
You'll want to use this in conjunction with
:meth:`generate_express_checkout_redirect_url` to create a payment,
then figure out where to redirect the user to for them to
authorize the payment on PayPal's website.
Required Kwargs
---------------
* PAYMENTREQUEST_0_AMT
* PAYMENTREQUEST_0_PAYMENTACTION
* RETURNURL
* CANCELURL
"""
return self._call('SetExpressCheckout', **kwargs)
def refund_transaction(self, transactionid=None, payerid=None, **kwargs):
"""Shortcut for RefundTransaction method.
Note new API supports passing a PayerID instead of a transaction id,
exactly one must be provided.
Optional:
INVOICEID
REFUNDTYPE
AMT
CURRENCYCODE
NOTE
RETRYUNTIL
REFUNDSOURCE
MERCHANTSTOREDETAILS
REFUNDADVICE
REFUNDITEMDETAILS
MSGSUBID
MERCHANSTOREDETAILS has two fields:
STOREID
TERMINALID
"""
# This line seems like a complete waste of time... kwargs should not
# be populated
if (transactionid is None) and (payerid is None):
raise PayPalError(
'RefundTransaction requires either a transactionid or '
'a payerid')
if (transactionid is not None) and (payerid is not None):
raise PayPalError(
'RefundTransaction requires only one of transactionid %s '
'and payerid %s' % (transactionid, payerid))
if transactionid is not None:
kwargs['TRANSACTIONID'] = transactionid
else:
kwargs['PAYERID'] = payerid
return self._call('RefundTransaction', **kwargs)
def do_express_checkout_payment(self, **kwargs):
"""Finishes an Express checkout.
TOKEN is the token that was returned earlier by
:meth:`set_express_checkout`. This identifies the transaction.
Required
--------
* TOKEN
* PAYMENTACTION
* PAYERID
* AMT
"""
return self._call('DoExpressCheckoutPayment', **kwargs)
def generate_express_checkout_redirect_url(self, token, useraction=None):
"""Returns the URL to redirect the user to for the Express checkout.
Express Checkouts must be verified by the customer by redirecting them
to the PayPal website. Use the token returned in the response from
:meth:`set_express_checkout` with this function to figure out where
to redirect the user to.
The button text on the PayPal page can be controlled via `useraction`.
The documented possible values are `commit` and `continue`. However,
any other value will only result in a warning.
:param str token: The unique token identifying this transaction.
:param str useraction: Control the button text on the PayPal page.
:rtype: str
:returns: The URL to redirect the user to for approval.
"""
url_vars = (self.config.PAYPAL_URL_BASE, token)
url = "%s?cmd=_express-checkout&token=%s" % url_vars
if useraction:
if not useraction.lower() in ('commit', 'continue'):
warnings.warn('useraction=%s is not documented' % useraction,
RuntimeWarning)
url += '&useraction=%s' % useraction
return url
def generate_cart_upload_redirect_url(self, **kwargs):
"""https://www.sandbox.paypal.com/webscr
?cmd=_cart
&upload=1
"""
required_vals = ('business', 'item_name_1', 'amount_1', 'quantity_1')
self._check_required(required_vals, **kwargs)
url = "%s?cmd=_cart&upload=1" % self.config.PAYPAL_URL_BASE
additional = self._encode_utf8(**kwargs)
additional = urlencode(additional)
return url + "&" + additional
def get_recurring_payments_profile_details(self, profileid):
"""Shortcut for the GetRecurringPaymentsProfile method.
This returns details for a recurring payment plan. The ``profileid`` is
a value included in the response retrieved by the function
``create_recurring_payments_profile``. The profile details include the
data provided when the profile was created as well as default values
for ignored fields and some pertinent stastics.
e.g.:
response = create_recurring_payments_profile(**profile_info)
profileid = response.PROFILEID
details = get_recurring_payments_profile(profileid)
The response from PayPal is somewhat self-explanatory, but for a
description of each field, visit the following URI:
https://www.x.com/docs/DOC-1194
"""
args = self._sanitize_locals(locals())
return self._call('GetRecurringPaymentsProfileDetails', **args)
def manage_recurring_payments_profile_status(self, profileid, action,
note=None):
"""Shortcut to the ManageRecurringPaymentsProfileStatus method.
``profileid`` is the same profile id used for getting profile details.
``action`` should be either 'Cancel', 'Suspend', or 'Reactivate'.
``note`` is optional and is visible to the user. It contains the
reason for the change in status.
"""
args = self._sanitize_locals(locals())
if not note:
del args['note']
return self._call('ManageRecurringPaymentsProfileStatus', **args)
def update_recurring_payments_profile(self, profileid, **kwargs):
"""Shortcut to the UpdateRecurringPaymentsProfile method.
``profileid`` is the same profile id used for getting profile details.
The keyed arguments are data in the payment profile which you wish to
change. The profileid does not change. Anything else will take the new
value. Most of, though not all of, the fields available are shared
with creating a profile, but for the complete list of parameters, you
can visit the following URI:
https://www.x.com/docs/DOC-1212
"""
kwargs.update(self._sanitize_locals(locals()))
return self._call('UpdateRecurringPaymentsProfile', **kwargs)
def bm_create_button(self, **kwargs):
"""Shortcut to the BMCreateButton method.
See the docs for details on arguments:
https://cms.paypal.com/mx/cgi-bin/?cmd=_render-content&content_ID=developer/e_howto_api_nvp_BMCreateButton
The L_BUTTONVARn fields are especially important, so make sure to
read those and act accordingly. See unit tests for some examples.
"""
kwargs.update(self._sanitize_locals(locals()))
return self._call('BMCreateButton', **kwargs)
|
apache-2.0
| -4,156,943,831,509,777,000
| 36.471591
| 114
| 0.598838
| false
| 4.505807
| true
| false
| false
|
NeCTAR-RC/ceilometer
|
ceilometer/compute/pollsters/cpu.py
|
1
|
4105
|
# -*- encoding: utf-8 -*-
#
# Copyright © 2012 eNovance <licensing@enovance.com>
# Copyright © 2012 Red Hat, Inc
#
# Author: Julien Danjou <julien@danjou.info>
# Author: Eoghan Glynn <eglynn@redhat.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ceilometer.compute import plugin
from ceilometer.compute.pollsters import util
from ceilometer.compute.virt import inspector as virt_inspector
from ceilometer.openstack.common.gettextutils import _ # noqa
from ceilometer.openstack.common import log
from ceilometer import sample
LOG = log.getLogger(__name__)
class CPUPollster(plugin.ComputePollster):
def get_samples(self, manager, cache, resources):
for instance in resources:
LOG.info(_('checking instance %s'), instance.id)
instance_name = util.instance_name(instance)
try:
cpu_info = manager.inspector.inspect_cpus(instance_name)
LOG.info(_("CPUTIME USAGE: %(instance)s %(time)d"),
{'instance': instance.__dict__,
'time': cpu_info.time})
cpu_num = {'cpu_number': cpu_info.number}
yield util.make_sample_from_instance(
instance,
name='cpu',
type=sample.TYPE_CUMULATIVE,
unit='ns',
volume=cpu_info.time,
additional_metadata=cpu_num,
)
except virt_inspector.InstanceNotFoundException as err:
# Instance was deleted while getting samples. Ignore it.
LOG.debug(_('Exception while getting samples %s'), err)
except NotImplementedError:
# Selected inspector does not implement this pollster.
LOG.debug(_('Obtaining CPU time is not implemented for %s'
), manager.inspector.__class__.__name__)
except Exception as err:
LOG.error(_('could not get CPU time for %(id)s: %(e)s') % (
{'id': instance.id, 'e': err}))
LOG.exception(err)
class CPUUtilPollster(plugin.ComputePollster):
def get_samples(self, manager, cache, resources):
self._inspection_duration = self._record_poll_time()
for instance in resources:
LOG.debug(_('Checking CPU util for instance %s'), instance.id)
try:
cpu_info = manager.inspector.inspect_cpu_util(
instance, self._inspection_duration)
LOG.debug(_("CPU UTIL: %(instance)s %(util)d"),
({'instance': instance.__dict__,
'util': cpu_info.util}))
yield util.make_sample_from_instance(
instance,
name='cpu_util',
type=sample.TYPE_GAUGE,
unit='%',
volume=cpu_info.util,
)
except virt_inspector.InstanceNotFoundException as err:
# Instance was deleted while getting samples. Ignore it.
LOG.debug(_('Exception while getting samples %s'), err)
except NotImplementedError:
# Selected inspector does not implement this pollster.
LOG.debug(_('Obtaining CPU Util is not implemented for %s'
), manager.inspector.__class__.__name__)
except Exception as err:
LOG.error(_('Could not get CPU Util for %(id)s: %(e)s'), (
{'id': instance.id, 'e': err}))
|
apache-2.0
| -2,454,638,988,634,142,700
| 43.597826
| 75
| 0.574945
| false
| 4.474373
| false
| false
| false
|
openstack/ironic
|
ironic/common/context.py
|
1
|
1975
|
# -*- encoding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_context import context
class RequestContext(context.RequestContext):
"""Extends security contexts from the oslo.context library."""
def __init__(self, is_public_api=False, **kwargs):
"""Initialize the RequestContext
:param is_public_api: Specifies whether the request should be processed
without authentication.
:param kwargs: additional arguments passed to oslo.context.
"""
super(RequestContext, self).__init__(**kwargs)
self.is_public_api = is_public_api
def to_policy_values(self):
policy_values = super(RequestContext, self).to_policy_values()
policy_values.update({
'project_name': self.project_name,
'is_public_api': self.is_public_api,
})
return policy_values
def ensure_thread_contain_context(self):
"""Ensure threading contains context
For async/periodic tasks, the context of local thread is missing.
Set it with request context and this is useful to log the request_id
in log messages.
"""
if context.get_current():
return
self.update_store()
def get_admin_context():
"""Create an administrator context."""
context = RequestContext(auth_token=None,
project_id=None,
overwrite=False)
return context
|
apache-2.0
| -8,362,864,803,926,115,000
| 33.051724
| 79
| 0.65519
| false
| 4.519451
| false
| false
| false
|
openstack/os-win
|
os_win/tests/unit/test_utils.py
|
1
|
14238
|
# Copyright 2015 Cloudbase Solutions SRL
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit tests for the os_win._utils module.
"""
from unittest import mock
import ddt
from os_win import _utils
from os_win import constants
from os_win import exceptions
from os_win.tests.unit import test_base
@ddt.ddt
class UtilsTestCase(test_base.BaseTestCase):
@mock.patch('oslo_concurrency.processutils.execute')
def test_execute(self, mock_execute):
_utils.execute(mock.sentinel.cmd, kwarg=mock.sentinel.kwarg)
mock_execute.assert_called_once_with(mock.sentinel.cmd,
kwarg=mock.sentinel.kwarg)
def test_parse_server_string(self):
result = _utils.parse_server_string('::1')
self.assertEqual(('::1', ''), result)
result = _utils.parse_server_string('[::1]:8773')
self.assertEqual(('::1', '8773'), result)
result = _utils.parse_server_string('2001:db8::192.168.1.1')
self.assertEqual(('2001:db8::192.168.1.1', ''), result)
result = _utils.parse_server_string('[2001:db8::192.168.1.1]:8773')
self.assertEqual(('2001:db8::192.168.1.1', '8773'), result)
result = _utils.parse_server_string('192.168.1.1')
self.assertEqual(('192.168.1.1', ''), result)
result = _utils.parse_server_string('192.168.1.2:8773')
self.assertEqual(('192.168.1.2', '8773'), result)
result = _utils.parse_server_string('192.168.1.3')
self.assertEqual(('192.168.1.3', ''), result)
result = _utils.parse_server_string('www.example.com:8443')
self.assertEqual(('www.example.com', '8443'), result)
result = _utils.parse_server_string('www.example.com')
self.assertEqual(('www.example.com', ''), result)
# error case
result = _utils.parse_server_string('www.exa:mple.com:8443')
self.assertEqual(('', ''), result)
result = _utils.parse_server_string('')
self.assertEqual(('', ''), result)
def _get_fake_func_with_retry_decorator(self, side_effect,
decorator=_utils.retry_decorator,
*args, **kwargs):
func_side_effect = mock.Mock(side_effect=side_effect)
@decorator(*args, **kwargs)
def fake_func(*_args, **_kwargs):
return func_side_effect(*_args, **_kwargs)
return fake_func, func_side_effect
@mock.patch.object(_utils, 'time')
def test_retry_decorator(self, mock_time):
err_code = 1
max_retry_count = 5
max_sleep_time = 2
timeout = max_retry_count + 1
mock_time.time.side_effect = range(timeout)
raised_exc = exceptions.Win32Exception(message='fake_exc',
error_code=err_code)
side_effect = [raised_exc] * max_retry_count
side_effect.append(mock.sentinel.ret_val)
(fake_func,
fake_func_side_effect) = self._get_fake_func_with_retry_decorator(
error_codes=err_code,
exceptions=exceptions.Win32Exception,
max_retry_count=max_retry_count,
max_sleep_time=max_sleep_time,
timeout=timeout,
side_effect=side_effect)
ret_val = fake_func(mock.sentinel.arg,
kwarg=mock.sentinel.kwarg)
self.assertEqual(mock.sentinel.ret_val, ret_val)
fake_func_side_effect.assert_has_calls(
[mock.call(mock.sentinel.arg, kwarg=mock.sentinel.kwarg)] *
(max_retry_count + 1))
self.assertEqual(max_retry_count + 1, mock_time.time.call_count)
mock_time.sleep.assert_has_calls(
[mock.call(sleep_time)
for sleep_time in [1, 2, 2, 2, 1]])
@mock.patch.object(_utils, 'time')
def _test_retry_decorator_exceeded(self, mock_time, expected_try_count,
mock_time_side_eff=None,
timeout=None, max_retry_count=None):
raised_exc = exceptions.Win32Exception(message='fake_exc')
mock_time.time.side_effect = mock_time_side_eff
(fake_func,
fake_func_side_effect) = self._get_fake_func_with_retry_decorator(
exceptions=exceptions.Win32Exception,
timeout=timeout,
side_effect=raised_exc)
self.assertRaises(exceptions.Win32Exception, fake_func)
fake_func_side_effect.assert_has_calls(
[mock.call()] * expected_try_count)
def test_retry_decorator_tries_exceeded(self):
self._test_retry_decorator_exceeded(
max_retry_count=2,
expected_try_count=3)
def test_retry_decorator_time_exceeded(self):
self._test_retry_decorator_exceeded(
mock_time_side_eff=[0, 1, 4],
timeout=3,
expected_try_count=1)
@mock.patch('time.sleep')
def _test_retry_decorator_no_retry(self, mock_sleep,
expected_exceptions=(),
expected_error_codes=()):
err_code = 1
raised_exc = exceptions.Win32Exception(message='fake_exc',
error_code=err_code)
fake_func, fake_func_side_effect = (
self._get_fake_func_with_retry_decorator(
error_codes=expected_error_codes,
exceptions=expected_exceptions,
side_effect=raised_exc))
self.assertRaises(exceptions.Win32Exception,
fake_func, mock.sentinel.arg,
fake_kwarg=mock.sentinel.kwarg)
self.assertFalse(mock_sleep.called)
fake_func_side_effect.assert_called_once_with(
mock.sentinel.arg, fake_kwarg=mock.sentinel.kwarg)
def test_retry_decorator_unexpected_err_code(self):
self._test_retry_decorator_no_retry(
expected_exceptions=exceptions.Win32Exception,
expected_error_codes=2)
def test_retry_decorator_unexpected_exc(self):
self._test_retry_decorator_no_retry(
expected_exceptions=(IOError, AttributeError))
@mock.patch('time.sleep')
def test_retry_decorator_explicitly_avoid_retry(self, mock_sleep):
# Tests the case when there is a function aware of the retry
# decorator and explicitly requests that no retry should be
# performed.
def func_side_effect(fake_arg, retry_context):
self.assertEqual(mock.sentinel.arg, fake_arg)
self.assertEqual(retry_context, dict(prevent_retry=False))
retry_context['prevent_retry'] = True
raise exceptions.Win32Exception(message='fake_exc',
error_code=1)
fake_func, mock_side_effect = (
self._get_fake_func_with_retry_decorator(
exceptions=exceptions.Win32Exception,
side_effect=func_side_effect,
pass_retry_context=True))
self.assertRaises(exceptions.Win32Exception,
fake_func, mock.sentinel.arg)
self.assertEqual(1, mock_side_effect.call_count)
self.assertFalse(mock_sleep.called)
@mock.patch.object(_utils.socket, 'getaddrinfo')
def test_get_ips(self, mock_getaddrinfo):
ips = ['1.2.3.4', '5.6.7.8']
mock_getaddrinfo.return_value = [
(None, None, None, None, (ip, 0)) for ip in ips]
resulted_ips = _utils.get_ips(mock.sentinel.addr)
self.assertEqual(ips, resulted_ips)
mock_getaddrinfo.assert_called_once_with(
mock.sentinel.addr, None, 0, 0, 0)
@mock.patch('eventlet.tpool.execute')
@mock.patch('eventlet.getcurrent')
@ddt.data(mock.Mock(), None)
def test_avoid_blocking_call(self, gt_parent, mock_get_current_gt,
mock_execute):
mock_get_current_gt.return_value.parent = gt_parent
mock_execute.return_value = mock.sentinel.ret_val
def fake_blocking_func(*args, **kwargs):
self.assertEqual((mock.sentinel.arg, ), args)
self.assertEqual(dict(kwarg=mock.sentinel.kwarg),
kwargs)
return mock.sentinel.ret_val
fake_blocking_func_decorated = (
_utils.avoid_blocking_call_decorator(fake_blocking_func))
ret_val = fake_blocking_func_decorated(mock.sentinel.arg,
kwarg=mock.sentinel.kwarg)
self.assertEqual(mock.sentinel.ret_val, ret_val)
if gt_parent:
mock_execute.assert_called_once_with(fake_blocking_func,
mock.sentinel.arg,
kwarg=mock.sentinel.kwarg)
else:
self.assertFalse(mock_execute.called)
@mock.patch.object(_utils, 'time')
@ddt.data(True, False)
def test_wmi_retry_decorator(self, expect_hres, mock_time):
expected_hres = 0x8007beef
expected_err_code = expected_hres if expect_hres else 0xbeef
other_hres = 0x80070001
max_retry_count = 5
# The second exception will contain an unexpected error code,
# in which case we expect the function to propagate the error.
expected_try_count = 2
side_effect = [test_base.FakeWMIExc(hresult=expected_hres),
test_base.FakeWMIExc(hresult=other_hres)]
decorator = (_utils.wmi_retry_decorator_hresult if expect_hres
else _utils.wmi_retry_decorator)
(fake_func,
fake_func_side_effect) = self._get_fake_func_with_retry_decorator(
error_codes=expected_err_code,
max_retry_count=max_retry_count,
decorator=decorator,
side_effect=side_effect)
self.assertRaises(test_base.FakeWMIExc,
fake_func,
mock.sentinel.arg,
kwarg=mock.sentinel.kwarg)
fake_func_side_effect.assert_has_calls(
[mock.call(mock.sentinel.arg, kwarg=mock.sentinel.kwarg)] *
expected_try_count)
def test_get_com_error_hresult(self):
fake_hres = -5
expected_hres = (1 << 32) + fake_hres
mock_excepinfo = [None] * 5 + [fake_hres]
mock_com_err = mock.Mock(excepinfo=mock_excepinfo)
ret_val = _utils.get_com_error_hresult(mock_com_err)
self.assertEqual(expected_hres, ret_val)
def get_com_error_hresult_missing_excepinfo(self):
ret_val = _utils.get_com_error_hresult(None)
self.assertIsNone(ret_val)
def test_hresult_to_err_code(self):
# This could differ based on the error source.
# Only the last 2 bytes of the hresult the error code.
fake_file_exists_hres = -0x7ff8ffb0
file_exists_err_code = 0x50
ret_val = _utils.hresult_to_err_code(fake_file_exists_hres)
self.assertEqual(file_exists_err_code, ret_val)
@mock.patch.object(_utils, 'get_com_error_hresult')
@mock.patch.object(_utils, 'hresult_to_err_code')
def test_get_com_error_code(self, mock_hres_to_err_code, mock_get_hresult):
ret_val = _utils.get_com_error_code(mock.sentinel.com_err)
self.assertEqual(mock_hres_to_err_code.return_value, ret_val)
mock_get_hresult.assert_called_once_with(mock.sentinel.com_err)
mock_hres_to_err_code.assert_called_once_with(
mock_get_hresult.return_value)
@ddt.data(_utils._WBEM_E_NOT_FOUND, mock.sentinel.wbem_error)
def test_is_not_found_exc(self, hresult):
exc = test_base.FakeWMIExc(hresult=hresult)
result = _utils._is_not_found_exc(exc)
expected = hresult == _utils._WBEM_E_NOT_FOUND
self.assertEqual(expected, result)
@mock.patch.object(_utils, 'get_com_error_hresult')
def test_not_found_decorator(self, mock_get_com_error_hresult):
mock_get_com_error_hresult.side_effect = lambda x: x
translated_exc = exceptions.HyperVVMNotFoundException
@_utils.not_found_decorator(
translated_exc=translated_exc)
def f(to_call):
to_call()
to_call = mock.Mock()
to_call.side_effect = exceptions.x_wmi(
'expected error', com_error=_utils._WBEM_E_NOT_FOUND)
self.assertRaises(translated_exc, f, to_call)
to_call.side_effect = exceptions.x_wmi()
self.assertRaises(exceptions.x_wmi, f, to_call)
def test_hex_str_to_byte_array(self):
fake_hex_str = '0x0010A'
resulted_array = _utils.hex_str_to_byte_array(fake_hex_str)
expected_array = bytearray([0, 1, 10])
self.assertEqual(expected_array, resulted_array)
def test_byte_array_to_hex_str(self):
fake_byte_array = bytearray(range(3))
resulted_string = _utils.byte_array_to_hex_str(fake_byte_array)
expected_string = '000102'
self.assertEqual(expected_string, resulted_string)
def test_required_vm_version(self):
@_utils.required_vm_version()
def foo(bar, vmsettings):
pass
mock_vmsettings = mock.Mock()
for good_version in [constants.VM_VERSION_5_0,
constants.VM_VERSION_254_0]:
mock_vmsettings.Version = good_version
foo(mock.sentinel.bar, mock_vmsettings)
for bad_version in ['4.99', '254.1']:
mock_vmsettings.Version = bad_version
self.assertRaises(exceptions.InvalidVMVersion, foo,
mock.sentinel.bar, mock_vmsettings)
|
apache-2.0
| -7,459,312,965,330,679,000
| 38.994382
| 79
| 0.599874
| false
| 3.680021
| true
| false
| false
|
MVilstrup/mosquito_1
|
mosquito/networking/sink.py
|
1
|
1926
|
# Task sink
# Binds PULL socket to tcp://localhost:5558
# Collects results from workers via that socket
import zmq
class Sink(object):
def __init__(self,
result_port,
send_port,
break_port="tcp://127.0.0.1:9999"):
context = zmq.Context()
# Socket to receive messages on
self.receive_socket = context.socket(zmq.PULL)
self.receive_socket.bind(result_port)
# Socket to reschedule domains that timed out
self.send_socket = context.socket(zmq.PUSH)
self.send_socket.bind(send_port)
# Socket to reschedule domains that timed out
self.break_socket = context.socket(zmq.SUB)
self.break_socket.bind(break_port)
# Poller used to switch between the two sockets
poller = zmq.Poller()
poller.register(self.receive_socket, zmq.POLLIN)
poller.register(self.break_socket, zmq.POLLIN)
def start(self):
should_continue = True
while should_continue:
sockets = dict(poller.poll())
if self.receive_socket in sockets and sockets[
self.receive_socket] == zmq.POLLIN:
message = self.receive_socket.recv_json()
self._handle_messages(message)
if self.break_socket in sockets and sockets[
self.break_socket] == zmq.POLLIN:
signal = self.break_socket.recv_string()
if signal == "QUIT":
should_continue = False
return
def _handle_messages(self, message):
raise NotImplementedError("_work should be implemented")
def send_json(self, message):
return self.send_socket.send_json(message)
def send_string(self, message):
return self.send_socket.send_string(message)
def send(self, message):
return self.send_socket.send(message)
|
apache-2.0
| 3,985,627,639,154,338,300
| 31.644068
| 64
| 0.59865
| false
| 4.20524
| false
| false
| false
|
gouzongmei/t1
|
src/kimchi/model/vms.py
|
1
|
40069
|
#
# Project Kimchi
#
# Copyright IBM, Corp. 2014
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
from lxml.builder import E
import lxml.etree as ET
from lxml import etree, objectify
import os
import random
import string
import time
import uuid
from xml.etree import ElementTree
import libvirt
from cherrypy.process.plugins import BackgroundTask
from kimchi import model, vnc
from kimchi.config import READONLY_POOL_TYPE, config
from kimchi.exception import InvalidOperation, InvalidParameter
from kimchi.exception import NotFoundError, OperationFailed
from kimchi.model.config import CapabilitiesModel
from kimchi.model.tasks import TaskModel
from kimchi.model.templates import TemplateModel
from kimchi.model.utils import get_vm_name
from kimchi.model.utils import get_metadata_node
from kimchi.model.utils import set_metadata_node
from kimchi.rollbackcontext import RollbackContext
from kimchi.screenshot import VMScreenshot
from kimchi.utils import add_task, get_next_clone_name, import_class
from kimchi.utils import kimchi_log, run_setfacl_set_attr
from kimchi.utils import template_name_from_uri
from kimchi.xmlutils.utils import xpath_get_text, xml_item_update
from kimchi.xmlutils.utils import dictize
DOM_STATE_MAP = {0: 'nostate',
1: 'running',
2: 'blocked',
3: 'paused',
4: 'shutdown',
5: 'shutoff',
6: 'crashed',
7: 'pmsuspended'}
GUESTS_STATS_INTERVAL = 5
VM_STATIC_UPDATE_PARAMS = {'name': './name',
'cpus': './vcpu',
'memory': './memory'}
VM_LIVE_UPDATE_PARAMS = {}
stats = {}
XPATH_DOMAIN_DISK = "/domain/devices/disk[@device='disk']/source/@file"
XPATH_DOMAIN_DISK_BY_FILE = "./devices/disk[@device='disk']/source[@file='%s']"
XPATH_DOMAIN_NAME = '/domain/name'
XPATH_DOMAIN_MAC = "/domain/devices/interface[@type='network']/mac/@address"
XPATH_DOMAIN_MAC_BY_ADDRESS = "./devices/interface[@type='network']/"\
"mac[@address='%s']"
XPATH_DOMAIN_UUID = '/domain/uuid'
class VMsModel(object):
def __init__(self, **kargs):
self.conn = kargs['conn']
self.objstore = kargs['objstore']
self.caps = CapabilitiesModel(**kargs)
self.guests_stats_thread = BackgroundTask(GUESTS_STATS_INTERVAL,
self._update_guests_stats)
self.guests_stats_thread.start()
def _update_guests_stats(self):
vm_list = self.get_list()
for name in vm_list:
try:
dom = VMModel.get_vm(name, self.conn)
vm_uuid = dom.UUIDString()
info = dom.info()
state = DOM_STATE_MAP[info[0]]
if state != 'running':
stats[vm_uuid] = {}
continue
if stats.get(vm_uuid, None) is None:
stats[vm_uuid] = {}
timestamp = time.time()
prevStats = stats.get(vm_uuid, {})
seconds = timestamp - prevStats.get('timestamp', 0)
stats[vm_uuid].update({'timestamp': timestamp})
self._get_percentage_cpu_usage(vm_uuid, info, seconds)
self._get_network_io_rate(vm_uuid, dom, seconds)
self._get_disk_io_rate(vm_uuid, dom, seconds)
except Exception as e:
# VM might be deleted just after we get the list.
# This is OK, just skip.
kimchi_log.debug('Error processing VM stats: %s', e.message)
continue
def _get_percentage_cpu_usage(self, vm_uuid, info, seconds):
prevCpuTime = stats[vm_uuid].get('cputime', 0)
cpus = info[3]
cpuTime = info[4] - prevCpuTime
base = (((cpuTime) * 100.0) / (seconds * 1000.0 * 1000.0 * 1000.0))
percentage = max(0.0, min(100.0, base / cpus))
stats[vm_uuid].update({'cputime': info[4], 'cpu': percentage})
def _get_network_io_rate(self, vm_uuid, dom, seconds):
prevNetRxKB = stats[vm_uuid].get('netRxKB', 0)
prevNetTxKB = stats[vm_uuid].get('netTxKB', 0)
currentMaxNetRate = stats[vm_uuid].get('max_net_io', 100)
rx_bytes = 0
tx_bytes = 0
tree = ElementTree.fromstring(dom.XMLDesc(0))
for target in tree.findall('devices/interface/target'):
dev = target.get('dev')
io = dom.interfaceStats(dev)
rx_bytes += io[0]
tx_bytes += io[4]
netRxKB = float(rx_bytes) / 1000
netTxKB = float(tx_bytes) / 1000
rx_stats = (netRxKB - prevNetRxKB) / seconds
tx_stats = (netTxKB - prevNetTxKB) / seconds
rate = rx_stats + tx_stats
max_net_io = round(max(currentMaxNetRate, int(rate)), 1)
stats[vm_uuid].update({'net_io': rate, 'max_net_io': max_net_io,
'netRxKB': netRxKB, 'netTxKB': netTxKB})
def _get_disk_io_rate(self, vm_uuid, dom, seconds):
prevDiskRdKB = stats[vm_uuid].get('diskRdKB', 0)
prevDiskWrKB = stats[vm_uuid].get('diskWrKB', 0)
currentMaxDiskRate = stats[vm_uuid].get('max_disk_io', 100)
rd_bytes = 0
wr_bytes = 0
tree = ElementTree.fromstring(dom.XMLDesc(0))
for target in tree.findall("devices/disk/target"):
dev = target.get("dev")
io = dom.blockStats(dev)
rd_bytes += io[1]
wr_bytes += io[3]
diskRdKB = float(rd_bytes) / 1024
diskWrKB = float(wr_bytes) / 1024
rd_stats = (diskRdKB - prevDiskRdKB) / seconds
wr_stats = (diskWrKB - prevDiskWrKB) / seconds
rate = rd_stats + wr_stats
max_disk_io = round(max(currentMaxDiskRate, int(rate)), 1)
stats[vm_uuid].update({'disk_io': rate,
'max_disk_io': max_disk_io,
'diskRdKB': diskRdKB,
'diskWrKB': diskWrKB})
def create(self, params):
conn = self.conn.get()
t_name = template_name_from_uri(params['template'])
vm_uuid = str(uuid.uuid4())
vm_list = self.get_list()
name = get_vm_name(params.get('name'), t_name, vm_list)
# incoming text, from js json, is unicode, do not need decode
if name in vm_list:
raise InvalidOperation("KCHVM0001E", {'name': name})
vm_overrides = dict()
pool_uri = params.get('storagepool')
if pool_uri:
vm_overrides['storagepool'] = pool_uri
vm_overrides['fc_host_support'] = self.caps.fc_host_support
t = TemplateModel.get_template(t_name, self.objstore, self.conn,
vm_overrides)
if not self.caps.qemu_stream and t.info.get('iso_stream', False):
raise InvalidOperation("KCHVM0005E")
t.validate()
# Store the icon for displaying later
icon = t.info.get('icon')
if icon:
try:
with self.objstore as session:
session.store('vm', vm_uuid, {'icon': icon})
except Exception as e:
# It is possible to continue Kimchi executions without store
# vm icon info
kimchi_log.error('Error trying to update database with guest '
'icon information due error: %s', e.message)
# If storagepool is SCSI, volumes will be LUNs and must be passed by
# the user from UI or manually.
vol_list = []
if t._get_storage_type() not in ["iscsi", "scsi"]:
vol_list = t.fork_vm_storage(vm_uuid)
graphics = params.get('graphics', {})
stream_protocols = self.caps.libvirt_stream_protocols
xml = t.to_vm_xml(name, vm_uuid,
libvirt_stream_protocols=stream_protocols,
qemu_stream_dns=self.caps.qemu_stream_dns,
graphics=graphics,
volumes=vol_list)
try:
conn.defineXML(xml.encode('utf-8'))
except libvirt.libvirtError as e:
if t._get_storage_type() not in READONLY_POOL_TYPE:
for v in vol_list:
vol = conn.storageVolLookupByPath(v['path'])
vol.delete(0)
raise OperationFailed("KCHVM0007E", {'name': name,
'err': e.get_error_message()})
VMModel.vm_update_os_metadata(VMModel.get_vm(name, self.conn), t.info,
self.caps.metadata_support)
return name
def get_list(self):
return self.get_vms(self.conn)
@staticmethod
def get_vms(conn):
conn = conn.get()
names = [dom.name().decode('utf-8') for dom in conn.listAllDomains(0)]
return sorted(names, key=unicode.lower)
class VMModel(object):
def __init__(self, **kargs):
self.conn = kargs['conn']
self.objstore = kargs['objstore']
self.caps = CapabilitiesModel(**kargs)
self.vmscreenshot = VMScreenshotModel(**kargs)
self.users = import_class('kimchi.model.users.UsersModel')(**kargs)
self.groups = import_class('kimchi.model.groups.GroupsModel')(**kargs)
self.vms = VMsModel(**kargs)
self.task = TaskModel(**kargs)
self.storagepool = model.storagepools.StoragePoolModel(**kargs)
self.storagevolume = model.storagevolumes.StorageVolumeModel(**kargs)
self.storagevolumes = model.storagevolumes.StorageVolumesModel(**kargs)
cls = import_class('kimchi.model.vmsnapshots.VMSnapshotModel')
self.vmsnapshot = cls(**kargs)
cls = import_class('kimchi.model.vmsnapshots.VMSnapshotsModel')
self.vmsnapshots = cls(**kargs)
def update(self, name, params):
dom = self.get_vm(name, self.conn)
dom = self._static_vm_update(dom, params)
self._live_vm_update(dom, params)
return dom.name().decode('utf-8')
def clone(self, name):
"""Clone a virtual machine based on an existing one.
The new virtual machine will have the exact same configuration as the
original VM, except for the name, UUID, MAC addresses and disks. The
name will have the form "<name>-clone-<number>", with <number> starting
at 1; the UUID will be generated randomly; the MAC addresses will be
generated randomly with no conflicts within the original and the new
VM; and the disks will be new volumes [mostly] on the same storage
pool, with the same content as the original disks. The storage pool
'default' will always be used when cloning SCSI and iSCSI disks and
when the original storage pool cannot hold the new volume.
An exception will be raised if the virtual machine <name> is not
shutoff, if there is no available space to copy a new volume to the
storage pool 'default' (when there was also no space to copy it to the
original storage pool) and if one of the virtual machine's disks belong
to a storage pool not supported by Kimchi.
Parameters:
name -- The name of the existing virtual machine to be cloned.
Return:
A Task running the clone operation.
"""
name = name.decode('utf-8')
# VM must be shutoff in order to clone it
info = self.lookup(name)
if info['state'] != u'shutoff':
raise InvalidParameter('KCHVM0033E', {'name': name})
# this name will be used as the Task's 'target_uri' so it needs to be
# defined now.
new_name = get_next_clone_name(self.vms.get_list(), name)
# create a task with the actual clone function
taskid = add_task(u'/vms/%s' % new_name, self._clone_task,
self.objstore,
{'name': name, 'new_name': new_name})
return self.task.lookup(taskid)
def _clone_task(self, cb, params):
"""Asynchronous function which performs the clone operation.
Parameters:
cb -- A callback function to signal the Task's progress.
params -- A dict with the following values:
"name": the name of the original VM.
"new_name": the name of the new VM.
"""
name = params['name']
new_name = params['new_name']
vir_conn = self.conn.get()
# fetch base XML
cb('reading source VM XML')
try:
vir_dom = vir_conn.lookupByName(name)
flags = libvirt.VIR_DOMAIN_XML_SECURE
xml = vir_dom.XMLDesc(flags).decode('utf-8')
except libvirt.libvirtError, e:
raise OperationFailed('KCHVM0035E', {'name': name,
'err': e.message})
# update UUID
cb('updating VM UUID')
old_uuid = xpath_get_text(xml, XPATH_DOMAIN_UUID)[0]
new_uuid = unicode(uuid.uuid4())
xml = xml_item_update(xml, './uuid', new_uuid)
# update MAC addresses
cb('updating VM MAC addresses')
xml = self._clone_update_mac_addresses(xml)
with RollbackContext() as rollback:
# copy disks
cb('copying VM disks')
xml = self._clone_update_disks(xml, rollback)
# update objstore entry
cb('updating object store')
self._clone_update_objstore(old_uuid, new_uuid, rollback)
# update name
cb('updating VM name')
xml = xml_item_update(xml, './name', new_name)
# create new guest
cb('defining new VM')
try:
vir_conn.defineXML(xml)
except libvirt.libvirtError, e:
raise OperationFailed('KCHVM0035E', {'name': name,
'err': e.message})
rollback.commitAll()
cb('OK', True)
@staticmethod
def _clone_update_mac_addresses(xml):
"""Update the MAC addresses with new values in the XML descriptor of a
cloning domain.
The new MAC addresses will be generated randomly, and their values are
guaranteed to be distinct from the ones in the original VM.
Arguments:
xml -- The XML descriptor of the original domain.
Return:
The XML descriptor <xml> with the new MAC addresses instead of the
old ones.
"""
old_macs = xpath_get_text(xml, XPATH_DOMAIN_MAC)
new_macs = []
for mac in old_macs:
while True:
new_mac = model.vmifaces.VMIfacesModel.random_mac()
# make sure the new MAC doesn't conflict with the original VM
# and with the new values on the new VM.
if new_mac not in (old_macs + new_macs):
new_macs.append(new_mac)
break
xml = xml_item_update(xml, XPATH_DOMAIN_MAC_BY_ADDRESS % mac,
new_mac, 'address')
return xml
def _clone_update_disks(self, xml, rollback):
"""Clone disks from a virtual machine. The disks are copied as new
volumes and the new VM's XML is updated accordingly.
Arguments:
xml -- The XML descriptor of the original VM + new value for
"/domain/uuid".
rollback -- A rollback context so the new volumes can be removed if an
error occurs during the cloning operation.
Return:
The XML descriptor <xml> with the new disk paths instead of the
old ones.
"""
# the UUID will be used to create the disk paths
uuid = xpath_get_text(xml, XPATH_DOMAIN_UUID)[0]
all_paths = xpath_get_text(xml, XPATH_DOMAIN_DISK)
vir_conn = self.conn.get()
for i, path in enumerate(all_paths):
try:
vir_orig_vol = vir_conn.storageVolLookupByPath(path)
vir_pool = vir_orig_vol.storagePoolLookupByVolume()
orig_pool_name = vir_pool.name().decode('utf-8')
orig_vol_name = vir_orig_vol.name().decode('utf-8')
except libvirt.libvirtError, e:
domain_name = xpath_get_text(xml, XPATH_DOMAIN_NAME)[0]
raise OperationFailed('KCHVM0035E', {'name': domain_name,
'err': e.message})
orig_pool = self.storagepool.lookup(orig_pool_name)
orig_vol = self.storagevolume.lookup(orig_pool_name, orig_vol_name)
new_pool_name = orig_pool_name
new_pool = orig_pool
if orig_pool['type'] in ['dir', 'netfs', 'logical']:
# if a volume in a pool 'dir', 'netfs' or 'logical' cannot hold
# a new volume with the same size, the pool 'default' should
# be used
if orig_vol['capacity'] > orig_pool['available']:
kimchi_log.warning('storage pool \'%s\' doesn\'t have '
'enough free space to store image '
'\'%s\'; falling back to \'default\'',
orig_pool_name, path)
new_pool_name = u'default'
new_pool = self.storagepool.lookup(u'default')
# ...and if even the pool 'default' cannot hold a new
# volume, raise an exception
if orig_vol['capacity'] > new_pool['available']:
domain_name = xpath_get_text(xml, XPATH_DOMAIN_NAME)[0]
raise InvalidOperation('KCHVM0034E',
{'name': domain_name})
elif orig_pool['type'] in ['scsi', 'iscsi']:
# SCSI and iSCSI always fall back to the storage pool 'default'
kimchi_log.warning('cannot create new volume for clone in '
'storage pool \'%s\'; falling back to '
'\'default\'', orig_pool_name)
new_pool_name = u'default'
new_pool = self.storagepool.lookup(u'default')
# if the pool 'default' cannot hold a new volume, raise
# an exception
if orig_vol['capacity'] > new_pool['available']:
domain_name = xpath_get_text(xml, XPATH_DOMAIN_NAME)[0]
raise InvalidOperation('KCHVM0034E', {'name': domain_name})
else:
# unexpected storage pool type
raise InvalidOperation('KCHPOOL0014E',
{'type': orig_pool['type']})
# new volume name: <UUID>-<loop-index>.<original extension>
# e.g. 1234-5678-9012-3456-0.img
ext = os.path.splitext(path)[1]
new_vol_name = u'%s-%d%s' % (uuid, i, ext)
task = self.storagevolume.clone(orig_pool_name, orig_vol_name,
new_name=new_vol_name)
self.task.wait(task['id'], 3600) # 1 h
# get the new volume path and update the XML descriptor
new_vol = self.storagevolume.lookup(new_pool_name, new_vol_name)
xml = xml_item_update(xml, XPATH_DOMAIN_DISK_BY_FILE % path,
new_vol['path'], 'file')
# remove the new volume should an error occur later
rollback.prependDefer(self.storagevolume.delete, new_pool_name,
new_vol_name)
return xml
def _clone_update_objstore(self, old_uuid, new_uuid, rollback):
"""Update Kimchi's object store with the cloning VM.
Arguments:
old_uuid -- The UUID of the original VM.
new_uuid -- The UUID of the new, clonning VM.
rollback -- A rollback context so the object store entry can be removed
if an error occurs during the cloning operation.
"""
with self.objstore as session:
try:
vm = session.get('vm', old_uuid)
icon = vm['icon']
session.store('vm', new_uuid, {'icon': icon})
except NotFoundError:
# if we cannot find an object store entry for the original VM,
# don't store one with an empty value.
pass
else:
# we need to define a custom function to prepend to the
# rollback context because the object store session needs to be
# opened and closed correctly (i.e. "prependDefer" only
# accepts one command at a time but we need more than one to
# handle an object store).
def _rollback_objstore():
with self.objstore as session_rb:
session_rb.delete('vm', new_uuid, ignore_missing=True)
# remove the new object store entry should an error occur later
rollback.prependDefer(_rollback_objstore)
def _build_access_elem(self, users, groups):
auth = config.get("authentication", "method")
auth_elem = E.auth(type=auth)
for user in users:
auth_elem.append(E.user(user))
for group in groups:
auth_elem.append(E.group(group))
access = E.access()
access.append(auth_elem)
return access
def _vm_update_access_metadata(self, dom, params):
users = groups = None
if "users" in params:
users = params["users"]
for user in users:
if not self.users.validate(user):
raise InvalidParameter("KCHVM0027E",
{'users': user})
if "groups" in params:
groups = params["groups"]
for group in groups:
if not self.groups.validate(group):
raise InvalidParameter("KCHVM0028E",
{'groups': group})
if users is None and groups is None:
return
old_users, old_groups = self._get_access_info(dom)
users = old_users if users is None else users
groups = old_groups if groups is None else groups
node = self._build_access_elem(users, groups)
set_metadata_node(dom, node, self.caps.metadata_support)
def _get_access_info(self, dom):
users = groups = list()
access_xml = (get_metadata_node(dom, "access",
self.caps.metadata_support) or
"""<access></access>""")
access_info = dictize(access_xml)
auth = config.get("authentication", "method")
if ('auth' in access_info['access'] and
('type' in access_info['access']['auth'] or
len(access_info['access']['auth']) > 1)):
users = xpath_get_text(access_xml,
"/access/auth[@type='%s']/user" % auth)
groups = xpath_get_text(access_xml,
"/access/auth[@type='%s']/group" % auth)
elif auth == 'pam':
# Compatible to old permission tagging
users = xpath_get_text(access_xml, "/access/user")
groups = xpath_get_text(access_xml, "/access/group")
return users, groups
@staticmethod
def vm_get_os_metadata(dom, metadata_support):
os_xml = (get_metadata_node(dom, "os", metadata_support) or
"""<os></os>""")
os_elem = ET.fromstring(os_xml)
return (os_elem.attrib.get("version"), os_elem.attrib.get("distro"))
@staticmethod
def vm_update_os_metadata(dom, params, metadata_support):
distro = params.get("os_distro")
version = params.get("os_version")
if distro is None:
return
os_elem = E.os({"distro": distro, "version": version})
set_metadata_node(dom, os_elem, metadata_support)
def _update_graphics(self, dom, xml, params):
root = objectify.fromstring(xml)
graphics = root.devices.find("graphics")
if graphics is None:
return xml
password = params['graphics'].get("passwd")
if password is not None and len(password.strip()) == 0:
password = "".join(random.sample(string.ascii_letters +
string.digits, 8))
if password is not None:
graphics.attrib['passwd'] = password
expire = params['graphics'].get("passwdValidTo")
to = graphics.attrib.get('passwdValidTo')
if to is not None:
if (time.mktime(time.strptime(to, '%Y-%m-%dT%H:%M:%S'))
- time.time() <= 0):
expire = expire if expire is not None else 30
if expire is not None:
expire_time = time.gmtime(time.time() + float(expire))
valid_to = time.strftime('%Y-%m-%dT%H:%M:%S', expire_time)
graphics.attrib['passwdValidTo'] = valid_to
if not dom.isActive():
return ET.tostring(root, encoding="utf-8")
xml = dom.XMLDesc(libvirt.VIR_DOMAIN_XML_SECURE)
dom.updateDeviceFlags(etree.tostring(graphics),
libvirt.VIR_DOMAIN_AFFECT_LIVE)
return xml
def _static_vm_update(self, dom, params):
state = DOM_STATE_MAP[dom.info()[0]]
old_xml = new_xml = dom.XMLDesc(0)
for key, val in params.items():
if key in VM_STATIC_UPDATE_PARAMS:
if key == 'memory':
# Libvirt saves memory in KiB. Retrieved xml has memory
# in KiB too, so new valeu must be in KiB here
val = val * 1024
if type(val) == int:
val = str(val)
xpath = VM_STATIC_UPDATE_PARAMS[key]
new_xml = xml_item_update(new_xml, xpath, val)
if 'graphics' in params:
new_xml = self._update_graphics(dom, new_xml, params)
conn = self.conn.get()
try:
if 'name' in params:
if state == 'running':
msg_args = {'name': dom.name(), 'new_name': params['name']}
raise InvalidParameter("KCHVM0003E", msg_args)
# Undefine old vm, only if name is going to change
dom.undefine()
root = ET.fromstring(new_xml)
currentMem = root.find('.currentMemory')
if currentMem is not None:
root.remove(currentMem)
dom = conn.defineXML(ET.tostring(root, encoding="utf-8"))
except libvirt.libvirtError as e:
dom = conn.defineXML(old_xml)
raise OperationFailed("KCHVM0008E", {'name': dom.name(),
'err': e.get_error_message()})
return dom
def _live_vm_update(self, dom, params):
self._vm_update_access_metadata(dom, params)
def _has_video(self, dom):
dom = ElementTree.fromstring(dom.XMLDesc(0))
return dom.find('devices/video') is not None
def lookup(self, name):
dom = self.get_vm(name, self.conn)
info = dom.info()
state = DOM_STATE_MAP[info[0]]
screenshot = None
# (type, listen, port, passwd, passwdValidTo)
graphics = self._vm_get_graphics(name)
graphics_port = graphics[2]
graphics_port = graphics_port if state == 'running' else None
try:
if state == 'running' and self._has_video(dom):
screenshot = self.vmscreenshot.lookup(name)
elif state == 'shutoff':
# reset vm stats when it is powered off to avoid sending
# incorrect (old) data
stats[dom.UUIDString()] = {}
except NotFoundError:
pass
with self.objstore as session:
try:
extra_info = session.get('vm', dom.UUIDString())
except NotFoundError:
extra_info = {}
icon = extra_info.get('icon')
vm_stats = stats.get(dom.UUIDString(), {})
res = {}
res['cpu_utilization'] = vm_stats.get('cpu', 0)
res['net_throughput'] = vm_stats.get('net_io', 0)
res['net_throughput_peak'] = vm_stats.get('max_net_io', 100)
res['io_throughput'] = vm_stats.get('disk_io', 0)
res['io_throughput_peak'] = vm_stats.get('max_disk_io', 100)
users, groups = self._get_access_info(dom)
return {'name': name,
'state': state,
'stats': res,
'uuid': dom.UUIDString(),
'memory': info[2] >> 10,
'cpus': info[3],
'screenshot': screenshot,
'icon': icon,
# (type, listen, port, passwd, passwdValidTo)
'graphics': {"type": graphics[0],
"listen": graphics[1],
"port": graphics_port,
"passwd": graphics[3],
"passwdValidTo": graphics[4]},
'users': users,
'groups': groups,
'access': 'full',
'persistent': True if dom.isPersistent() else False
}
def _vm_get_disk_paths(self, dom):
xml = dom.XMLDesc(0)
xpath = "/domain/devices/disk[@device='disk']/source/@file"
return xpath_get_text(xml, xpath)
@staticmethod
def get_vm(name, conn):
conn = conn.get()
try:
# outgoing text to libvirt, encode('utf-8')
return conn.lookupByName(name.encode("utf-8"))
except libvirt.libvirtError as e:
if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
raise NotFoundError("KCHVM0002E", {'name': name})
else:
raise OperationFailed("KCHVM0009E", {'name': name,
'err': e.message})
def delete(self, name):
conn = self.conn.get()
dom = self.get_vm(name, self.conn)
self._vmscreenshot_delete(dom.UUIDString())
paths = self._vm_get_disk_paths(dom)
info = self.lookup(name)
if info['state'] == 'running':
self.poweroff(name)
# delete existing snapshots before deleting VM
# libvirt's Test driver does not support the function
# "virDomainListAllSnapshots", so "VMSnapshots.get_list" will raise
# "OperationFailed" in that case.
try:
snapshot_names = self.vmsnapshots.get_list(name)
except OperationFailed, e:
kimchi_log.error('cannot list snapshots: %s; '
'skipping snapshot deleting...' % e.message)
else:
for s in snapshot_names:
self.vmsnapshot.delete(name, s)
try:
dom.undefine()
except libvirt.libvirtError as e:
raise OperationFailed("KCHVM0021E",
{'name': name, 'err': e.get_error_message()})
for path in paths:
try:
vol = conn.storageVolLookupByPath(path)
pool = vol.storagePoolLookupByVolume()
xml = pool.XMLDesc(0)
pool_type = xpath_get_text(xml, "/pool/@type")[0]
if pool_type not in READONLY_POOL_TYPE:
vol.delete(0)
# Update objstore to remove the volume
with self.objstore as session:
session.delete('storagevolume', path,
ignore_missing=True)
except libvirt.libvirtError as e:
kimchi_log.error('Unable to get storage volume by path: %s' %
e.message)
except Exception as e:
raise OperationFailed('KCHVOL0017E', {'err': e.message})
try:
with self.objstore as session:
if path in session.get_list('storagevolume'):
n = session.get('storagevolume', path)['ref_cnt']
session.store('storagevolume', path, {'ref_cnt': n-1})
except Exception as e:
raise OperationFailed('KCHVOL0017E', {'err': e.message})
try:
with self.objstore as session:
session.delete('vm', dom.UUIDString(), ignore_missing=True)
except Exception as e:
# It is possible to delete vm without delete its database info
kimchi_log.error('Error deleting vm information from database: '
'%s', e.message)
vnc.remove_proxy_token(name)
def start(self, name):
# make sure the ISO file has read permission
dom = self.get_vm(name, self.conn)
xml = dom.XMLDesc(0)
xpath = "/domain/devices/disk[@device='cdrom']/source/@file"
isofiles = xpath_get_text(xml, xpath)
for iso in isofiles:
run_setfacl_set_attr(iso)
dom = self.get_vm(name, self.conn)
try:
dom.create()
except libvirt.libvirtError as e:
raise OperationFailed("KCHVM0019E",
{'name': name, 'err': e.get_error_message()})
def poweroff(self, name):
dom = self.get_vm(name, self.conn)
try:
dom.destroy()
except libvirt.libvirtError as e:
raise OperationFailed("KCHVM0020E",
{'name': name, 'err': e.get_error_message()})
def shutdown(self, name):
dom = self.get_vm(name, self.conn)
try:
dom.shutdown()
except libvirt.libvirtError as e:
raise OperationFailed("KCHVM0029E",
{'name': name, 'err': e.get_error_message()})
def reset(self, name):
dom = self.get_vm(name, self.conn)
try:
dom.reset(flags=0)
except libvirt.libvirtError as e:
raise OperationFailed("KCHVM0022E",
{'name': name, 'err': e.get_error_message()})
def _vm_get_graphics(self, name):
dom = self.get_vm(name, self.conn)
xml = dom.XMLDesc(libvirt.VIR_DOMAIN_XML_SECURE)
expr = "/domain/devices/graphics/@type"
res = xpath_get_text(xml, expr)
graphics_type = res[0] if res else None
expr = "/domain/devices/graphics/@listen"
res = xpath_get_text(xml, expr)
graphics_listen = res[0] if res else None
graphics_port = graphics_passwd = graphics_passwdValidTo = None
if graphics_type:
expr = "/domain/devices/graphics[@type='%s']/@port"
res = xpath_get_text(xml, expr % graphics_type)
graphics_port = int(res[0]) if res else None
expr = "/domain/devices/graphics[@type='%s']/@passwd"
res = xpath_get_text(xml, expr % graphics_type)
graphics_passwd = res[0] if res else None
expr = "/domain/devices/graphics[@type='%s']/@passwdValidTo"
res = xpath_get_text(xml, expr % graphics_type)
if res:
to = time.mktime(time.strptime(res[0], '%Y-%m-%dT%H:%M:%S'))
graphics_passwdValidTo = to - time.mktime(time.gmtime())
return (graphics_type, graphics_listen, graphics_port,
graphics_passwd, graphics_passwdValidTo)
def connect(self, name):
# (type, listen, port, passwd, passwdValidTo)
graphics_port = self._vm_get_graphics(name)[2]
if graphics_port is not None:
vnc.add_proxy_token(name, graphics_port)
else:
raise OperationFailed("KCHVM0010E", {'name': name})
def _vmscreenshot_delete(self, vm_uuid):
screenshot = VMScreenshotModel.get_screenshot(vm_uuid, self.objstore,
self.conn)
screenshot.delete()
try:
with self.objstore as session:
session.delete('screenshot', vm_uuid)
except Exception as e:
# It is possible to continue Kimchi executions without delete
# screenshots
kimchi_log.error('Error trying to delete vm screenshot from '
'database due error: %s', e.message)
class VMScreenshotModel(object):
def __init__(self, **kargs):
self.objstore = kargs['objstore']
self.conn = kargs['conn']
def lookup(self, name):
dom = VMModel.get_vm(name, self.conn)
d_info = dom.info()
vm_uuid = dom.UUIDString()
if DOM_STATE_MAP[d_info[0]] != 'running':
raise NotFoundError("KCHVM0004E", {'name': name})
screenshot = self.get_screenshot(vm_uuid, self.objstore, self.conn)
img_path = screenshot.lookup()
# screenshot info changed after scratch generation
try:
with self.objstore as session:
session.store('screenshot', vm_uuid, screenshot.info)
except Exception as e:
# It is possible to continue Kimchi executions without store
# screenshots
kimchi_log.error('Error trying to update database with guest '
'screenshot information due error: %s', e.message)
return img_path
@staticmethod
def get_screenshot(vm_uuid, objstore, conn):
try:
with objstore as session:
try:
params = session.get('screenshot', vm_uuid)
except NotFoundError:
params = {'uuid': vm_uuid}
session.store('screenshot', vm_uuid, params)
except Exception as e:
# The 'except' outside of 'with' is necessary to catch possible
# exception from '__exit__' when calling 'session.store'
# It is possible to continue Kimchi vm executions without
# screenshots
kimchi_log.error('Error trying to update database with guest '
'screenshot information due error: %s', e.message)
return LibvirtVMScreenshot(params, conn)
class LibvirtVMScreenshot(VMScreenshot):
def __init__(self, vm_uuid, conn):
VMScreenshot.__init__(self, vm_uuid)
self.conn = conn
def _generate_scratch(self, thumbnail):
def handler(stream, buf, opaque):
fd = opaque
os.write(fd, buf)
fd = os.open(thumbnail, os.O_WRONLY | os.O_TRUNC | os.O_CREAT, 0644)
try:
conn = self.conn.get()
dom = conn.lookupByUUIDString(self.vm_uuid)
vm_name = dom.name()
stream = conn.newStream(0)
dom.screenshot(stream, 0, 0)
stream.recvAll(handler, fd)
except libvirt.libvirtError:
try:
stream.abort()
except:
pass
raise NotFoundError("KCHVM0006E", {'name': vm_name})
else:
stream.finish()
finally:
os.close(fd)
|
lgpl-3.0
| -8,180,474,466,544,877,000
| 39.028971
| 79
| 0.549827
| false
| 4.091178
| false
| false
| false
|
eppye-bots/bots
|
bots/botsinit.py
|
1
|
15231
|
from __future__ import unicode_literals
import sys
if sys.version_info[0] > 2:
basestring = unicode = str
import configparser as ConfigParser
else:
import ConfigParser
import os
import encodings
import codecs
import logging
import logging.handlers
#bots-modules
from . import botsglobal
from . import botslib
from . import node
class BotsConfig(ConfigParser.RawConfigParser):
''' As ConfigParser, but with defaults.
'''
def get(self,section, option, default='', **kwargs):
if self.has_option(section, option):
return ConfigParser.RawConfigParser.get(self,section, option)
elif default == '':
raise botslib.BotsError('No entry "%(option)s" in section "%(section)s" in "bots.ini".',{'option':option,'section':section})
else:
return default
def getint(self,section, option, default, **kwargs):
if self.has_option(section, option):
return ConfigParser.RawConfigParser.getint(self,section, option)
else:
return default
def getboolean(self,section, option, default, **kwargs):
if self.has_option(section, option):
return ConfigParser.RawConfigParser.getboolean(self,section, option)
else:
return default
def generalinit(configdir):
##########################################################################
#Configdir: settings.py & bots.ini#########################################
#Configdir MUST be importable. So configdir is relative to PYTHONPATH. Try several options for this import.
try: #first check if is configdir outside bots-directory: import configdir.settings.py
importnameforsettings = os.path.normpath(os.path.join(configdir,'settings')).replace(os.sep,'.')
settings = botslib.botsbaseimport(importnameforsettings)
except ImportError: #normal: configdir is in bots directory: import bots.configdir.settings.py
try:
importnameforsettings = os.path.normpath(os.path.join('bots',configdir,'settings')).replace(os.sep,'.')
settings = botslib.botsbaseimport(importnameforsettings)
except ImportError: #set pythonpath to config directory first
if not os.path.exists(configdir): #check if configdir exists.
raise botslib.PanicError('In initilisation: path to configuration does not exists: "%(configdir)s".',{'configdir':configdir})
addtopythonpath = os.path.abspath(os.path.dirname(configdir))
moduletoimport = os.path.basename(configdir)
sys.path.append(addtopythonpath)
importnameforsettings = os.path.normpath(os.path.join(moduletoimport,'settings')).replace(os.sep,'.')
settings = botslib.botsbaseimport(importnameforsettings)
#settings is imported, so now we know where to find settings.py: importnameforsettings
#note: the imported settings.py itself is NOT used, this is doen via django.conf.settings
configdirectory = os.path.abspath(os.path.dirname(settings.__file__))
#Read configuration-file bots.ini.
botsglobal.ini = BotsConfig()
botsglobal.ini.read(os.path.join(configdirectory,'bots.ini'))
# 'directories','botspath': absolute path for bots directory
botsglobal.ini.set('directories','botspath',os.path.abspath(os.path.dirname(__file__)))
# 'directories','config': absolute path for config directory
botsglobal.ini.set('directories','config',configdirectory)
#set config as originally received; used in starting engine via bots-monitor
botsglobal.ini.set('directories','config_org',configdir)
############################################################################
#Usersys####################################################################
#usersys MUST be importable. So usersys is relative to PYTHONPATH. Try several options for this import.
usersys = botsglobal.ini.get('directories','usersys','usersys')
try: #usersys outside bots-directory: import usersys
importnameforusersys = os.path.normpath(usersys).replace(os.sep,'.')
importedusersys = botslib.botsbaseimport(importnameforusersys)
except ImportError: #usersys is in bots directory: import bots.usersys
try:
importnameforusersys = os.path.normpath(os.path.join('bots',usersys)).replace(os.sep,'.')
importedusersys = botslib.botsbaseimport(importnameforusersys)
except ImportError: #set pythonpath to usersys directory first
if not os.path.exists(usersys): #check if configdir exists.
raise botslib.PanicError('In initilisation: path to configuration does not exists: "%(usersys)s".',{'usersys':usersys})
addtopythonpath = os.path.abspath(os.path.dirname(usersys)) #????
moduletoimport = os.path.basename(usersys)
sys.path.append(addtopythonpath)
importnameforusersys = os.path.normpath(usersys).replace(os.sep,'.')
importedusersys = botslib.botsbaseimport(importnameforusersys)
# 'directories','usersysabs': absolute path for config usersysabs
botsglobal.ini.set('directories','usersysabs',os.path.abspath(os.path.dirname(importedusersys.__file__))) #???Find pathname usersys using imported usersys
# botsglobal.usersysimportpath: used for imports from usersys
botsglobal.usersysimportpath = importnameforusersys
botsglobal.ini.set('directories','templatehtml',botslib.join(botsglobal.ini.get('directories','usersysabs'),'grammars/templatehtml/templates'))
############################################################################
#Botssys####################################################################
# 'directories','botssys': absolute path for config botssys
botssys = botsglobal.ini.get('directories','botssys','botssys')
botsglobal.ini.set('directories','botssys_org',botssys) #store original botssys setting
botsglobal.ini.set('directories','botssys',botslib.join(botssys)) #use absolute path
botsglobal.ini.set('directories','data',botslib.join(botssys,'data'))
botsglobal.ini.set('directories','logging',botslib.join(botssys,'logging'))
############################################################################
#other inits##############################################################
if botsglobal.ini.get('webserver','environment','development') != 'development': #values in bots.ini are also used in setting up cherrypy
logging.raiseExceptions = 0 # during production: if errors occurs in writing to log: ignore error. (leads to a missing log line, better than error;-).
botslib.dirshouldbethere(botsglobal.ini.get('directories','data'))
botslib.dirshouldbethere(botsglobal.ini.get('directories','logging'))
initbotscharsets() #initialise bots charsets
node.Node.checklevel = botsglobal.ini.getint('settings','get_checklevel',1)
botslib.settimeout(botsglobal.ini.getint('settings','globaltimeout',10))
############################################################################
#Init django#################################################################################
os.environ['DJANGO_SETTINGS_MODULE'] = importnameforsettings
import django
if hasattr(django,'setup'):
django.setup()
from django.conf import settings
botsglobal.settings = settings #settings are accessed using botsglobal
#**********************************************************************************
#*** bots specific handling of character-sets (eg UNOA charset) *******************
def initbotscharsets():
'''set up right charset handling for specific charsets (UNOA, UNOB, UNOC, etc).'''
#tell python how to search a codec defined by bots. Bots searches for this in usersys/charset
codecs.register(codec_search_function)
#syntax has parameters checkcharsetin or checkcharsetout. These can have value 'botsreplace'
#eg: 'checkcharsetin':'botsreplace', #strict, ignore or botsreplace
#in case of errors: the 'wrong' character is replaced with char as set in bots.ini. Default value in bots.ini is ' ' (space)
botsglobal.botsreplacechar = unicode(botsglobal.ini.get('settings','botsreplacechar',' '))
codecs.register_error('botsreplace', botsreplacechar_handler) #need to register the handler for botsreplacechar
#set aliases for the charsets in bots.ini
for key, value in botsglobal.ini.items('charsets'):
encodings.aliases.aliases[key] = value
def codec_search_function(encoding):
try:
module,filename = botslib.botsimport('charsets',encoding)
except botslib.BotsImportError: #charsetscript not there; other errors like syntax errors are not catched
return None
else:
if hasattr(module,'getregentry'):
return module.getregentry()
else:
return None
def botsreplacechar_handler(info):
'''replaces an char outside a charset by a user defined char. Useful eg for fixed records: recordlength does not change.'''
return (botsglobal.botsreplacechar, info.start+1)
#*** end of bots specific handling of character-sets ******************************
#**********************************************************************************
def connect():
''' connect to database for non-django modules eg engine '''
if botsglobal.settings.DATABASES['default']['ENGINE'] == 'django.db.backends.sqlite3':
#sqlite has some more fiddling; in separate file. Mainly because of some other method of parameter passing.
if not os.path.isfile(botsglobal.settings.DATABASES['default']['NAME']):
raise botslib.PanicError('Could not find database file for SQLite')
from . import botssqlite
botsglobal.db = botssqlite.connect(database = botsglobal.settings.DATABASES['default']['NAME'])
elif botsglobal.settings.DATABASES['default']['ENGINE'] == 'django.db.backends.mysql':
import MySQLdb
from MySQLdb import cursors
botsglobal.db = MySQLdb.connect(host=botsglobal.settings.DATABASES['default']['HOST'],
port=int(botsglobal.settings.DATABASES['default']['PORT']),
db=botsglobal.settings.DATABASES['default']['NAME'],
user=botsglobal.settings.DATABASES['default']['USER'],
passwd=botsglobal.settings.DATABASES['default']['PASSWORD'],
cursorclass=cursors.DictCursor,
**botsglobal.settings.DATABASES['default']['OPTIONS'])
elif botsglobal.settings.DATABASES['default']['ENGINE'] == 'django.db.backends.postgresql_psycopg2':
import psycopg2
import psycopg2.extensions
import psycopg2.extras
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
botsglobal.db = psycopg2.connect(host=botsglobal.settings.DATABASES['default']['HOST'],
port=botsglobal.settings.DATABASES['default']['PORT'],
database=botsglobal.settings.DATABASES['default']['NAME'],
user=botsglobal.settings.DATABASES['default']['USER'],
password=botsglobal.settings.DATABASES['default']['PASSWORD'],
connection_factory=psycopg2.extras.DictConnection)
botsglobal.db.set_client_encoding('UNICODE')
else:
raise botslib.PanicError('Unknown database engine "%(engine)s".',{'engine':botsglobal.settings.DATABASES['default']['ENGINE']})
#*******************************************************************
#*** init logging **************************************************
#*******************************************************************
logging.addLevelName(25, 'STARTINFO')
convertini2logger = {'DEBUG':logging.DEBUG,'INFO':logging.INFO,'WARNING':logging.WARNING,'ERROR':logging.ERROR,'CRITICAL':logging.CRITICAL,'STARTINFO':25}
def initenginelogging(logname):
#initialise file logging: create main logger 'bots'
logger = logging.getLogger(logname)
logger.setLevel(convertini2logger[botsglobal.ini.get('settings','log_file_level','INFO')])
if botsglobal.ini.get('settings','log_file_number',None) == 'daily':
handler = logging.handlers.TimedRotatingFileHandler(os.path.join(botsglobal.ini.get('directories','logging'),logname+'.log'),when='midnight',backupCount=10)
else:
handler = logging.handlers.RotatingFileHandler(botslib.join(botsglobal.ini.get('directories','logging'),logname+'.log'),backupCount=botsglobal.ini.getint('settings','log_file_number',10))
handler.doRollover() #each run a new log file is used; old one is rotated
fileformat = logging.Formatter('%(asctime)s %(levelname)-8s %(name)s : %(message)s','%Y%m%d %H:%M:%S')
handler.setFormatter(fileformat)
logger.addHandler(handler)
#initialise file logging: logger for trace of mapping; tried to use filters but got this not to work.....
botsglobal.logmap = logging.getLogger('engine.map')
if not botsglobal.ini.getboolean('settings','mappingdebug',False):
botsglobal.logmap.setLevel(logging.CRITICAL)
#logger for reading edifile. is now used only very limited (1 place); is done with 'if'
#~ botsglobal.ini.getboolean('settings','readrecorddebug',False)
# initialise console/screen logging
if botsglobal.ini.getboolean('settings','log_console',True):
console = logging.StreamHandler()
console.setLevel(logging.INFO)
consuleformat = logging.Formatter('%(levelname)-8s %(message)s')
console.setFormatter(consuleformat) # add formatter to console
logger.addHandler(console) # add console to logger
return logger
def initserverlogging(logname):
# initialise file logging
logger = logging.getLogger(logname)
logger.setLevel(convertini2logger[botsglobal.ini.get(logname,'log_file_level','INFO')])
handler = logging.handlers.TimedRotatingFileHandler(os.path.join(botsglobal.ini.get('directories','logging'),logname+'.log'),when='midnight',backupCount=10)
fileformat = logging.Formatter('%(asctime)s %(levelname)-9s: %(message)s','%Y%m%d %H:%M:%S')
handler.setFormatter(fileformat)
logger.addHandler(handler)
# initialise console/screen logging
if botsglobal.ini.getboolean(logname,'log_console',True):
console = logging.StreamHandler()
console.setLevel(convertini2logger[botsglobal.ini.get(logname,'log_console_level','STARTINFO')])
consoleformat = logging.Formatter('%(asctime)s %(levelname)-9s: %(message)s','%Y%m%d %H:%M:%S')
console.setFormatter(consoleformat) # add formatter to console
logger.addHandler(console) # add console to logger
return logger
|
gpl-3.0
| -7,892,572,289,152,995,000
| 62.812766
| 195
| 0.627142
| false
| 4.320851
| true
| false
| false
|
westernx/sgfs
|
sgfs/cache.py
|
1
|
5707
|
from subprocess import call
import collections
import errno
import logging
import os
import sqlite3
from sgsession import Entity
log = logging.getLogger(__name__)
class PathCache(collections.MutableMapping):
def __init__(self, sgfs, project_root):
self.sgfs = sgfs
self.project_root = os.path.abspath(project_root)
# We are in the middle of a transtion of where the SQLite file
# is located, and for now we prioritize the old location.
for name in ('.sgfs-cache.sqlite', '.sgfs/cache.sqlite'):
db_path = os.path.join(project_root, name)
if os.path.exists(db_path):
break
else:
# If it doesn't exist then touch it with read/write permissions for all.
db_dir = os.path.dirname(db_path)
umask = os.umask(0)
try:
try:
os.makedirs(db_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
os.umask(0111)
call(['touch', db_path])
finally:
os.umask(umask)
self.conn = sqlite3.connect(db_path)
self.conn.text_factory = str
with self.conn:
self.conn.execute('CREATE TABLE IF NOT EXISTS entity_paths (entity_type TEXT, entity_id INTEGER, path TEXT)')
self.conn.execute('CREATE UNIQUE INDEX IF NOT EXISTS entity_paths_entity ON entity_paths(entity_type, entity_id)')
def __repr__(self):
return '<%s for %r at 0x%x>' % (self.__class__.__name__, self.project_root, id(self))
def __setitem__(self, entity, path):
if not isinstance(entity, Entity):
raise TypeError('path cache keys must be entities; got %r %r' % (type(entity), entity))
if not isinstance(path, basestring):
raise TypeError('path cache values must be basestring; got %r %r' % (type(path), path))
path = os.path.relpath(os.path.abspath(path), self.project_root)
with self.conn:
self.conn.execute('INSERT OR REPLACE into entity_paths values (?, ?, ?)', (entity['type'], entity['id'], path))
def get(self, entity, default=None, check_tags=True):
"""Get a path for an entity.
:param Entity entity: The entity to look up in the path cache.
:param default: What to return if the entity is not in the cache;
defaults to ``None``.
:param bool check_tags: Should we check for the entity in the directory
tags at the cached path before returning it?
:returns: The cached path.
"""
if not isinstance(entity, Entity):
raise TypeError('path cache keys are entities; got %r %r' % (type(entity), entity))
with self.conn:
c = self.conn.cursor()
c.execute('SELECT path FROM entity_paths WHERE entity_type = ? AND entity_id = ?', (entity['type'], entity['id']))
row = c.fetchone()
if row is None:
return default
path = os.path.abspath(os.path.join(self.project_root, row[0]))
# Make sure that the entity is actually tagged in the given directory.
# This guards against moving tagged directories. This does NOT
# effectively guard against copied directories.
if check_tags:
if not any(tag['entity'] is entity for tag in self.sgfs.get_directory_entity_tags(path)):
log.warning('%s %d is not tagged at %s' % (
entity['type'], entity['id'], path,
))
return default
return path
def __getitem__(self, entity):
path = self.get(entity)
if path is None:
raise KeyError(entity)
else:
return path
def __delitem__(self, entity):
if not isinstance(entity, Entity):
raise TypeError('path cache keys must be entities; got %r %r' % (type(entity), entity))
with self.conn:
self.conn.execute('DELETE FROM entity_paths WHERE entity_type = ? AND entity_id = ?', (entity['type'], entity['id']))
def __len__(self):
with self.conn:
c = self.conn.cursor()
return c.execute('SELECT COUNT(1) FROM entity_paths').fetchone()[0]
def __iter__(self):
with self.conn:
c = self.conn.cursor()
for row in c.execute('SELECT entity_type, entity_id FROM entity_paths'):
yield self.sgfs.session.merge(dict(type=row[0], id=row[1]))
def walk_directory(self, path, entity_type=None, must_exist=True):
relative = os.path.relpath(path, self.project_root)
# Special case the Projects.
if relative == '.':
relative = ''
if relative.startswith('.'):
raise ValueError('path not in project; %r' % path)
with self.conn:
c = self.conn.cursor()
if entity_type is not None:
c.execute('SELECT entity_type, entity_id, path FROM entity_paths WHERE entity_type = ? AND path LIKE ?', (entity_type, relative + '%'))
else:
c.execute('SELECT entity_type, entity_id, path FROM entity_paths WHERE path LIKE ?', (relative + '%', ))
for row in c:
entity = self.sgfs.session.merge(dict(type=row[0], id=row[1]))
path = os.path.join(self.project_root, row[2])
if must_exist and not os.path.exists(path):
continue
yield path, entity
|
bsd-3-clause
| -2,746,726,976,391,183,000
| 37.823129
| 151
| 0.557736
| false
| 4.218034
| false
| false
| false
|
rammstein/0install
|
zeroinstall/injector/driver.py
|
1
|
7097
|
"""
This class brings together a L{solve.Solver} to choose a set of implmentations, a
L{fetch.Fetcher} to download additional components, and the user's configuration
settings.
@since: 0.53
"""
# Copyright (C) 2011, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from zeroinstall import _, logger
import os, logging
from zeroinstall.injector import arch, reader
from zeroinstall.injector.model import network_offline
from zeroinstall.support import tasks
class Driver(object):
"""Chooses a set of implementations based on a policy.
Typical use:
1. Create a Driver object, giving it the requirements about the program to be run.
2. Call L{solve_with_downloads}. If more information is needed, a L{fetch.Fetcher} will be used to download it.
3. When all downloads are complete, the L{solver} contains the chosen versions.
4. Use L{get_uncached_implementations} to find where to get these versions and download them
using L{download_uncached_implementations}.
@ivar target_arch: target architecture for binaries (deprecated)
@type target_arch: L{arch.Architecture}
@ivar solver: solver used to choose a set of implementations
@type solver: L{solve.Solver}
@ivar watchers: callbacks to invoke after solving
"""
__slots__ = ['watchers', 'requirements', 'config', 'target_arch', 'solver']
def __init__(self, config, requirements):
"""@param config: The configuration settings to use
@type config: L{config.Config}
@param requirements: Details about the program we want to run
@type requirements: L{requirements.Requirements}
@since: 0.53"""
self.watchers = []
assert config
self.config = config
assert requirements
self.requirements = requirements
self.target_arch = arch.get_architecture(requirements.os, requirements.cpu)
from zeroinstall.injector.solver import DefaultSolver
self.solver = DefaultSolver(self.config)
logger.debug(_("Supported systems: '%s'"), arch.os_ranks)
logger.debug(_("Supported processors: '%s'"), arch.machine_ranks)
self.solver.extra_restrictions = requirements.get_extra_restrictions(self.config.iface_cache)
def get_uncached_implementations(self):
"""List all chosen implementations which aren't yet available locally.
@rtype: [(L{model.Interface}, L{model.Implementation})]"""
iface_cache = self.config.iface_cache
stores = self.config.stores
uncached = []
for uri, selection in self.solver.selections.selections.items():
impl = selection.impl
assert impl, self.solver.selections
if not impl.is_available(stores):
uncached.append((iface_cache.get_interface(uri), impl))
return uncached
@tasks.async
def solve_with_downloads(self, force = False, update_local = False):
"""Run the solver, then download any feeds that are missing or
that need to be updated. Each time a new feed is imported into
the cache, the solver is run again, possibly adding new downloads.
@param force: whether to download even if we're already ready to run.
@type force: bool
@param update_local: fetch PackageKit feeds even if we're ready to run.
@type update_local: bool"""
downloads_finished = set() # Successful or otherwise
downloads_in_progress = {} # URL -> Download
# There are three cases:
# 1. We want to run immediately if possible. If not, download all the information we can.
# (force = False, update_local = False)
# 2. We're in no hurry, but don't want to use the network unnecessarily.
# We should still update local information (from PackageKit).
# (force = False, update_local = True)
# 3. The user explicitly asked us to refresh everything.
# (force = True)
try_quick_exit = not (force or update_local)
while True:
self.solver.solve_for(self.requirements)
for w in self.watchers: w()
if try_quick_exit and self.solver.ready:
break
try_quick_exit = False
if not self.solver.ready:
force = True
for f in self.solver.feeds_used:
if f in downloads_finished or f in downloads_in_progress:
continue
if os.path.isabs(f):
if force:
try:
self.config.iface_cache.get_feed(f, force = True)
except reader.MissingLocalFeed as ex:
logger.warning("Reloading %s: %s", f, ex,
exc_info = True if logger.isEnabledFor(logging.INFO) else None)
downloads_in_progress[f] = tasks.IdleBlocker('Refresh local feed')
continue
elif f.startswith('distribution:'):
if force or update_local:
downloads_in_progress[f] = self.config.fetcher.download_and_import_feed(f, self.config.iface_cache)
elif force and self.config.network_use != network_offline:
downloads_in_progress[f] = self.config.fetcher.download_and_import_feed(f, self.config.iface_cache)
# Once we've starting downloading some things,
# we might as well get them all.
force = True
if not downloads_in_progress:
if self.config.network_use == network_offline:
logger.info(_("Can't choose versions and in off-line mode, so aborting"))
break
# Wait for at least one download to finish
blockers = downloads_in_progress.values()
yield blockers
tasks.check(blockers, self.config.handler.report_error)
for f in list(downloads_in_progress.keys()):
if f in downloads_in_progress and downloads_in_progress[f].happened:
del downloads_in_progress[f]
downloads_finished.add(f)
# Need to refetch any "distribution" feed that
# depends on this one
distro_feed_url = 'distribution:' + f
if distro_feed_url in downloads_finished:
downloads_finished.remove(distro_feed_url)
if distro_feed_url in downloads_in_progress:
del downloads_in_progress[distro_feed_url]
@tasks.async
def solve_and_download_impls(self, refresh = False, select_only = False):
"""Run L{solve_with_downloads} and then get the selected implementations too.
@type refresh: bool
@type select_only: bool
@raise SafeException: if we couldn't select a set of implementations
@since: 0.40"""
refreshed = self.solve_with_downloads(refresh)
if refreshed:
yield refreshed
tasks.check(refreshed)
if not self.solver.ready:
raise self.solver.get_failure_reason()
if not select_only:
downloaded = self.download_uncached_implementations()
if downloaded:
yield downloaded
tasks.check(downloaded)
def need_download(self):
"""Decide whether we need to download anything (but don't do it!)
@return: true if we MUST download something (feeds or implementations)
@rtype: bool"""
self.solver.solve_for(self.requirements)
for w in self.watchers: w()
if not self.solver.ready:
return True # Maybe a newer version will work?
if self.get_uncached_implementations():
return True
return False
def download_uncached_implementations(self):
"""Download all implementations chosen by the solver that are missing from the cache.
@rtype: L{zeroinstall.support.tasks.Blocker}"""
assert self.solver.ready, "Solver is not ready!\n%s" % self.solver.selections
return self.solver.selections.download_missing(self.config, include_packages = True)
|
lgpl-2.1
| -4,867,127,432,715,335,000
| 35.963542
| 113
| 0.722418
| false
| 3.496059
| true
| false
| false
|
gleicon/RedisLive
|
src/dataprovider/redisprovider.py
|
1
|
11475
|
from api.util import settings
from datetime import datetime, timedelta
import redis
import json
import ast
class RedisStatsProvider(object):
"""A Redis based persistance to store and fetch stats"""
def __init__(self):
# redis server to use to store stats
stats_server = settings.get_redis_stats_server()
self.server = stats_server["server"]
self.port = stats_server["port"]
self.conn = redis.StrictRedis(host=self.server, port=self.port, db=0)
def save_memory_info(self, server, timestamp, used, peak):
"""Saves used and peak memory stats,
Args:
server (str): The server ID
timestamp (datetime): The time of the info.
used (int): Used memory value.
peak (int): Peak memory value.
"""
data = {"timestamp": timestamp.strftime('%s'),
"used": used,
"peak": peak}
self.conn.zadd(server + ":memory", timestamp.strftime('%s'), data)
def save_info_command(self, server, timestamp, info):
"""Save Redis info command dump
Args:
server (str): id of server
timestamp (datetime): Timestamp.
info (dict): The result of a Redis INFO command.
"""
self.conn.set(server + ":Info", json.dumps(info))
def save_monitor_command(self, server, timestamp, command, keyname,
argument):
"""save information about every command
Args:
server (str): Server ID
timestamp (datetime): Timestamp.
command (str): The Redis command used.
keyname (str): The key the command acted on.
argument (str): The args sent to the command.
"""
epoch = timestamp.strftime('%s')
current_date = timestamp.strftime('%y%m%d')
# start a redis MULTI/EXEC transaction
pipeline = self.conn.pipeline()
# store top command and key counts in sorted set for every second
# top N are easily available from sorted set in redis
# also keep a sorted set for every day
# switch to daily stats when stats requsted are for a longer time period
command_count_key = server + ":CommandCount:" + epoch
pipeline.zincrby(command_count_key, command, 1)
command_count_key = server + ":DailyCommandCount:" + current_date
pipeline.zincrby(command_count_key, command, 1)
key_count_key = server + ":KeyCount:" + epoch
pipeline.zincrby(key_count_key, keyname, 1)
key_count_key = server + ":DailyKeyCount:" + current_date
pipeline.zincrby(key_count_key, command, 1)
# keep aggregate command in a hash
command_count_key = server + ":CommandCountBySecond"
pipeline.hincrby(command_count_key, epoch, 1)
command_count_key = server + ":CommandCountByMinute"
field_name = current_date + ":" + str(timestamp.hour) + ":"
field_name += str(timestamp.minute)
pipeline.hincrby(command_count_key, field_name, 1)
command_count_key = server + ":CommandCountByHour"
field_name = current_date + ":" + str(timestamp.hour)
pipeline.hincrby(command_count_key, field_name, 1)
command_count_key = server + ":CommandCountByDay"
field_name = current_date
pipeline.hincrby(command_count_key, field_name, 1)
# commit transaction to redis
pipeline.execute()
def get_info(self, server):
"""Get info about the server
Args:
server (str): The server ID
"""
info = self.conn.get(server + ":Info")
# FIXME: If the collector has never been run we get a 500 here. `None`
# is not a valid type to pass to json.loads.
info = json.loads(info)
return info
def get_memory_info(self, server, from_date, to_date):
"""Get stats for Memory Consumption between a range of dates
Args:
server (str): The server ID
from_date (datetime): Get memory info from this date onwards.
to_date (datetime): Get memory info up to this date.
"""
memory_data = []
start = int(from_date.strftime("%s"))
end = int(to_date.strftime("%s"))
rows = self.conn.zrangebyscore(server + ":memory", start, end)
for row in rows:
# TODO: Check to see if there's not a better way to do this. Using
# eval feels like it could be wrong/dangerous... but that's just a
# feeling.
row = ast.literal_eval(row)
parts = []
# convert the timestamp
timestamp = datetime.fromtimestamp(int(row['timestamp']))
timestamp = timestamp.strftime('%Y-%m-%d %H:%M:%S')
memory_data.append([timestamp, row['peak'], row['used']])
return memory_data
def get_command_stats(self, server, from_date, to_date, group_by):
"""Get total commands processed in the given time period
Args:
server (str): The server ID
from_date (datetime): Get data from this date.
to_date (datetime): Get data to this date.
group_by (str): How to group the stats.
"""
s = []
time_stamps = []
key_name = ""
if group_by == "day":
key_name = server + ":CommandCountByDay"
t = from_date.date()
while t <= to_date.date():
s.append(t.strftime('%y%m%d'))
time_stamps.append(t.strftime('%s'))
t = t + timedelta(days=1)
elif group_by == "hour":
key_name = server + ":CommandCountByHour"
t = from_date
while t<= to_date:
field_name = t.strftime('%y%m%d') + ":" + str(t.hour)
s.append(field_name)
time_stamps.append(t.strftime('%s'))
t = t + timedelta(seconds=3600)
elif group_by == "minute":
key_name = server + ":CommandCountByMinute"
t = from_date
while t <= to_date:
field_name = t.strftime('%y%m%d') + ":" + str(t.hour)
field_name += ":" + str(t.minute)
s.append(field_name)
time_stamps.append(t.strftime('%s'))
t = t + timedelta(seconds=60)
else:
key_name = server + ":CommandCountBySecond"
start = int(from_date.strftime("%s"))
end = int(to_date.strftime("%s"))
for x in range(start, end + 1):
s.append(str(x))
time_stamps.append(x)
data = []
counts = self.conn.hmget(key_name, s)
for x in xrange(0,len(counts)):
# the default time format string
time_fmt = '%Y-%m-%d %H:%M:%S'
if group_by == "day":
time_fmt = '%Y-%m-%d'
elif group_by == "hour":
time_fmt = '%Y-%m-%d %H:00:00'
elif group_by == "minute":
time_fmt = '%Y-%m-%d %H:%M:00'
# get the count.
try:
if counts[x] is not None:
count = int(counts[x])
else:
count = 0
except Exception as e:
count = 0
# convert the timestamp
timestamp = int(time_stamps[x])
timestamp = datetime.fromtimestamp(timestamp)
timestamp = timestamp.strftime(time_fmt)
# add to the data
data.append([count, timestamp])
return reversed(data)
def get_top_commands_stats(self, server, from_date, to_date):
"""Get top commands processed in the given time period
Args:
server (str): Server ID
from_date (datetime): Get stats from this date.
to_date (datetime): Get stats to this date.
"""
counts = self.get_top_counts(server, from_date, to_date, "CommandCount",
"DailyCommandCount")
return reversed(counts)
def get_top_keys_stats(self, server, from_date, to_date):
"""Gets top comm processed
Args:
server (str): Server ID
from_date (datetime): Get stats from this date.
to_date (datetime): Get stats to this date.
"""
return self.get_top_counts(server, from_date, to_date, "KeyCount",
"DailyKeyCount")
# Helper methods
def get_top_counts(self, server, from_date, to_date, seconds_key_name,
day_key_name, result_count=None):
"""Top counts are stored in a sorted set for every second and for every
day. ZUNIONSTORE across the timeperiods generates the results.
Args:
server (str): The server ID
from_date (datetime): Get stats from this date.
to_date (datetime): Get stats to this date.
seconds_key_name (str): The key for stats at second resolution.
day_key_name (str): The key for stats at daily resolution.
Kwargs:
result_count (int): The number of results to return. Default: 10
"""
if result_count is None:
result_count = 10
# get epoch
start = int(from_date.strftime("%s"))
end = int(to_date.strftime("%s"))
diff = to_date - from_date
# start a redis MULTI/EXEC transaction
pipeline = self.conn.pipeline()
# store the set names to use in ZUNIONSTORE in a list
s = []
if diff.days > 2 :
# when difference is over 2 days, no need to check counts for every second
# Calculate:
# counts of every second on the start day
# counts of every day in between
# counts of every second on the end day
next_day = from_date.date() + timedelta(days=1)
prev_day = to_date.date() - timedelta(days=1)
from_date_end_epoch = int(next_day.strftime("%s")) - 1
to_date_begin_epoch = int(to_date.date().strftime("%s"))
# add counts of every second on the start day
for x in range(start, from_date_end_epoch + 1):
s.append(":".join([server, seconds_key_name, str(x)]))
# add counts of all days in between
t = next_day
while t <= prev_day:
s.append(":".join([server, day_key_name, t.strftime('%y%m%d')]))
t = t + timedelta(days=1)
# add counts of every second on the end day
for x in range(to_date_begin_epoch, end + 1):
s.append(server + ":" + seconds_key_name + ":" + str(x))
else:
# add counts of all seconds between start and end date
for x in range(start, end + 1):
s.append(server + ":" + seconds_key_name + ":" + str(x))
# store the union of all the sets in a temp set
temp_key_name = "_top_counts"
pipeline.zunionstore(temp_key_name, s)
pipeline.zrange(temp_key_name, 0, result_count - 1, True, True)
pipeline.delete(temp_key_name)
# commit transaction to redis
results = pipeline.execute()
result_data = []
for val, count in results[-2]:
result_data.append([val, count])
return result_data
|
mit
| -5,711,488,420,907,123,000
| 35.313291
| 88
| 0.546231
| false
| 4.080725
| false
| false
| false
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-network/azure/mgmt/network/v2017_10_01/models/verification_ip_flow_result_py3.py
|
1
|
1304
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class VerificationIPFlowResult(Model):
"""Results of IP flow verification on the target resource.
:param access: Indicates whether the traffic is allowed or denied.
Possible values include: 'Allow', 'Deny'
:type access: str or ~azure.mgmt.network.v2017_10_01.models.Access
:param rule_name: Name of the rule. If input is not matched against any
security rule, it is not displayed.
:type rule_name: str
"""
_attribute_map = {
'access': {'key': 'access', 'type': 'str'},
'rule_name': {'key': 'ruleName', 'type': 'str'},
}
def __init__(self, *, access=None, rule_name: str=None, **kwargs) -> None:
super(VerificationIPFlowResult, self).__init__(**kwargs)
self.access = access
self.rule_name = rule_name
|
mit
| -3,858,672,163,984,985,000
| 37.352941
| 78
| 0.597393
| false
| 4.30363
| false
| false
| false
|
CRFS/python3-ncplib
|
examples/time.py
|
1
|
1346
|
"""
NCP time data example.
Connects to a node and requests a single time capture to be performed. Prints the result to stdout.
"""
import asyncio
import ncplib
# The node to connect to. Can be a DNS name or an IP address.
NODE_HOST = "127.0.0.1"
# Frequency.
FREQ_HZ = 2400.12e6 # 2400.12 MHz
# Realtime bandwidth.
RTBW_HZ = 10e6 # 10 MHz
# Capture length.
DURATION_S = 1e-3 # 1 ms.
def split_milli(value):
n = int(value * 1e3)
return (n // 1000000000, n % 1000000000)
def split_nano(value):
n = int(value * 1e9)
return (n // 1000000000, n % 1000000000)
async def main():
"""
The async main method.
Connects to a node and requests a single time capture to be performed. Prints the result to stdout.
"""
# Connect to the node.
async with await ncplib.connect(NODE_HOST) as connection:
# Send a single DSPC command to the node.
fctr, fctm = split_milli(FREQ_HZ)
rbme, rbmi = split_milli(RTBW_HZ)
lsec, lnan = split_nano(DURATION_S)
response = connection.send("DSPC", "TIME", FCTR=fctr, FCTM=fctm, RBME=rbme, RBMI=rbmi, LSEC=lsec, LNAN=lnan)
# Wait for the node to reply.
field = await response.recv()
print(field)
# Run the async main method if this file is run as a script.
if __name__ == "__main__":
asyncio.run(main())
|
mit
| -7,047,356,326,211,177,000
| 24.396226
| 116
| 0.643388
| false
| 3.017937
| false
| false
| false
|
cmput404wi16/metablog
|
account/models.py
|
1
|
1216
|
from django.db import models
from django.core import serializers
# Create your models here.
class User(models.Model):
uid = models.CharField(max_length=250, primary_key=True)
nameFirst = models.CharField(max_length=250)
nameLast = models.CharField(max_length=250)
githubId = models.CharField(max_length=25, default="abramhindle")
email = models.EmailField(max_length=254, default="")
# TODO check to see if this would be simpler
# friends = models.CharField(max_length=250)
# pending = models.CharField(max_length=250)
following = models.CharField(max_length=250, default="")
# TODO remove followers
followers = models.CharField(max_length=250, default="")
origin = models.CharField(max_length=250, default="local")
password = models.CharField(max_length=25, default="default")
profileImg = models.ImageField(upload_to="account/img/")
def getUserData(self):
return serializers.serialize('json', self.objects.defer("password"))
class AdminUser(models.Model):
#Still in dev, for now class exist
#methods need to be added
user = models.OneToOneField( User,
on_delete=models.CASCADE,
primary_key=True )
def whatDoesTheScouterSay():
print ("It's over 9000!")
|
mit
| 4,983,296,740,722,061,000
| 35.848485
| 70
| 0.73273
| false
| 3.286486
| false
| false
| false
|
zoltan-fedor/robo_car
|
nodes/drive_control.py
|
1
|
11947
|
#!/usr/bin/env python
import rospy
from std_msgs.msg import String
#from std_msgs.msg import UInt16
from std_msgs.msg import Int32
import time
from pyfirmata import Arduino
on_hardware = True # whether we are running this node on the actual car (so it can access the IO board)
wheelpin = 5 # to which pin on the IO sheld the whee pin got connected
drivepin = 3 # to which pin on the IO shield the drive cable got connected
if on_hardware == True: # running on hardware -- we need to set the board connection
board = Arduino('/dev/ttyACM99', baudrate=57600)
board.servo_config(wheelpin, min_pulse=1, max_pulse=100, angle=90) # set initial direction to straight forward
board.servo_config(drivepin, min_pulse=1, max_pulse=20, angle=90) # set initial speed to natural
speed_natural = 90
speed_current_angle = speed_natural # this variable will carry the actual speed at any time and will be used to determine direction of change (in case of decay or full stop)
speed_min_angle_reverse = 75 # this is the angle below which the car start moving in reverse
speed_min_angle_forward = 107 # this is the angle above which the car start moving forward
speed_max_angle_reverse = 65 # maximum angle allowed in reverse (which is actually a minimum mathematically, as the angle goes 0-90)
speed_max_angle_forward = 115 # maximum angle allowed in forward
speed_decay_angle = 2 # how much we decrease the angle when there is a decay request
speed_change_angle = 1 # when we receive a request to change the speed, this is the angle change we will do
speed_change_angle_decrease = 2 # this is the speed slowdown (breaking)
speed_direction_change_delay = 2 # in sec - delay enforced between changing direction (forward-backward)
last_stop_timestamp = 0.0 # the last time we have reached the zero speed from a non-zero speed (used with the speed_direction_change_delay)
direction_natural = 90 # this is the natural (straight ahead) position of the wheel in angles
direction_current_angle = direction_natural # this variable will carry the actual direction angle at any time
direction_max_angle_left = 30 # maximum angle allowed when setting the direction to the left (which is actually a minimum mathematically, as the angle goes 0-90)
direction_max_angle_right = 145 # maximum angle allowed when setting the direction to the right
direction_decay_angle = 1 # how much we decrease the angle when there is a decay request
direction_change_angle = 7 # when we receive a request to change the direction, this is the angle change we will do
####
# create the publishers to which this node will publish data to
pub_speed_angle = rospy.Publisher('drive_control_speed_angle', Int32, queue_size=10)
pub_direction_angle = rospy.Publisher('drive_control_direction_angle', Int32, queue_size=10)
def message_callback(data):
rospy.loginfo(rospy.get_caller_id() + "I heard speed/direction message %s", data.data)
speed_direction_instructions(data.data)
def decay_callback(data):
rospy.loginfo(rospy.get_caller_id() + "I heard decay message %s", data.data)
speed_direction_instructions(data.data)
def listener():
global pub_drive_angle
# In ROS, nodes are uniquely named. If two nodes with the same
# node are launched, the previous one is kicked off. The
# anonymous=True flag means that rospy will choose a unique
# name for our 'listener' node so that multiple listeners can
# run simultaneously.
rospy.init_node('drive_control', anonymous=True)
if on_hardware == True:
rospy.loginfo("Running on hardware")
else:
rospy.loginfo("Not running on hardware (in simulation")
####
# initialization
# before starting we need to set the car to idle and wheels facing forward
set_speed_angle(speed_current_angle) # sets the speed to the current angle, which has a default of 90 at start
set_direction_angle(direction_current_angle) # sets the direction to the current angle, which has a default of 90 at start
rospy.loginfo("Started.")
####
# subscribe to the topics
rospy.Subscriber("drive_control_publish", String, message_callback)
rospy.Subscriber("drive_control_decay_publish", String, decay_callback)
# spin() simply keeps python from exiting until this node is stopped
rospy.spin()
# this function will set the speed and direction of the car, based on the received instruction
def speed_direction_instructions(instruction):
global speed_current_angle # so we can change this global variable in this function
if(instruction == 'decay'): # this was a decay request
if(speed_current_angle <= speed_min_angle_reverse): # we are currently moving in reverse
change_speed(speed_decay_angle)
if(speed_current_angle >= speed_min_angle_forward): # we are currently moving forward
change_speed(-1*speed_decay_angle)
if(direction_current_angle < direction_natural + direction_decay_angle and direction_current_angle > direction_natural - direction_decay_angle):
# we need to set the direction to the natural direction, because the current angle is within the decay range of natural
change_direction(direction_natural)
else: # we are not within a decay range from the natural direction, so we will decay the direction angle by the decay
if(direction_current_angle < direction_natural): # current direction is to the left
change_direction(direction_decay_angle)
if(direction_current_angle > direction_natural): # current direction is to the right
change_direction(-1*direction_decay_angle)
if(instruction == 'U'): # this is a speed up request (~up button pressed)
if(speed_current_angle > speed_min_angle_reverse and speed_current_angle < speed_min_angle_forward): # currently we are in the neutral zone
speed_current_angle = speed_min_angle_forward
if(speed_current_angle <= speed_min_angle_reverse): # we are currently moving in reverse
change_speed(speed_change_angle_decrease)
if(speed_current_angle >= speed_min_angle_forward): # we are currently moving forward
change_speed(speed_change_angle)
if(instruction == 'D'): # this is a speed down request (~down button pressed)
if(speed_current_angle > speed_min_angle_reverse and speed_current_angle < speed_min_angle_forward): # currently we are in the neutral zone
speed_current_angle = speed_min_angle_reverse
if(speed_current_angle <= speed_min_angle_reverse): # we are currently moving in reverse
change_speed(-1*speed_change_angle)
if(speed_current_angle >= speed_min_angle_forward): # we are currently moving forward
change_speed(-1*speed_change_angle_decrease)
if(instruction == 'L'): # this is a turn left request (~left button pressed)
change_direction(direction_change_angle)
if(instruction == 'R'): # this is a turn right request (~right button pressed)
change_direction(-1*direction_change_angle)
# this function is called with the angle change request and will change the current angle with the amount requested
def change_speed(angle_change):
new_angle = speed_current_angle + angle_change
set_speed_angle(new_angle)
rospy.loginfo("Changed the speed by angle %i", angle_change)
# this function is called with the angle change request and will change the current angle with the amount requested
def change_direction(angle_change):
new_angle = direction_current_angle + angle_change
set_direction_angle(new_angle)
rospy.loginfo("Changed the direction by angle %i", angle_change)
# sets the speed to the angle requested
def set_speed_angle(angle):
global speed_current_angle # so we can change this global variable in this function
global last_stop_timestamp # so we can set this global variable in this function
movement_allowed = 'yes'
#rospy.loginfo("Value of speed_current_angle is %i", speed_current_angle)
#rospy.loginfo("Value of new angle to be set is %i", angle)
if(angle < speed_max_angle_reverse or angle > speed_max_angle_forward):
rospy.loginfo("Out of range angle was requested for speed: %i", angle)
else:
# the old (current) speed is NOT in the zero range but the new speed is in the zero range, then we need to set the last_stop_timestamp,
# which later we will use to determine whether the speed_direction_change_delay has passed yet
# but we only set this if hasn't been set already
if((speed_current_angle <= speed_min_angle_reverse or speed_current_angle >= speed_min_angle_forward)
and angle > speed_min_angle_reverse and angle < speed_min_angle_forward
and last_stop_timestamp == 0.0):
last_stop_timestamp = rospy.get_time() # populate the last_stop_timestamp with the unix timestamp (example: 1424637131.834309)
rospy.loginfo("Last stop timestamp set %f", last_stop_timestamp)
movement_allowed = 'yes'
else:
# the old (current) speed is in the zero range but the new speed is NOT in the zero range, then we need to check the last_stop_timestamp,
# whether the speed_direction_change_delay has passed already
if(speed_current_angle >= speed_min_angle_reverse and speed_current_angle <= speed_min_angle_forward
and (angle < speed_min_angle_reverse or angle > speed_min_angle_forward )):
# if the speed_direction_change_delay already passed or there wasn one then we can start moving
if(rospy.get_time() > (last_stop_timestamp + speed_direction_change_delay)):
movement_allowed = 'yes'
last_stop_timestamp = 0.0
else:
movement_allowed = 'no'
if on_hardware == True: # running on hardware -- we need to actually write this value to the PWM
board.digital[drivepin].write(speed_natural) # we set the angle to the middle of the neutral zone, so we don't send power to the motors
rospy.loginfo("No movement allowed, because the speed_direction_change_delay hasn't passed yet!")
else:
movement_allowed = 'yes'
last_stop_timestamp = 0.0
if(movement_allowed == 'yes'):
if(angle > speed_min_angle_reverse and angle < speed_min_angle_forward): # if the request came to set the angle within the neutral range, then we set it to 90, so we don't send power to the motors
angle = speed_natural
if on_hardware == True: # running on hardware -- we need to actually write this value to the PWM
board.digital[drivepin].write(angle)
speed_current_angle = angle # overwrite the global variable with the new value
pub_speed_angle.publish(angle) # publish the angle we set to a topic so others can see it
rospy.loginfo("Set the speed to angle %i", angle)
# sets the direction to the angle requested
def set_direction_angle(angle):
global direction_current_angle # so we can change this global variable in this function
if(angle < direction_max_angle_left or angle > direction_max_angle_right):
rospy.loginfo("Out of range angle was requested for direction: %i", angle)
else:
if on_hardware == True: # running on hardware -- we need to actually write this value to the PWM
board.digital[wheelpin].write(angle)
direction_current_angle = angle # overwrite the global variable with the new value
pub_direction_angle.publish(angle) # publish the angle we set to a topic so others can see it
rospy.loginfo("Set the direction to angle %i", angle)
if __name__ == '__main__':
listener()
|
mit
| -8,419,473,920,683,098,000
| 59.035176
| 208
| 0.696493
| false
| 4.087239
| false
| false
| false
|
TetraAsh/baruwa2
|
baruwa/controllers/lists.py
|
1
|
12838
|
# -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4
# Baruwa - Web 2.0 MailScanner front-end.
# Copyright (C) 2010-2012 Andrew Colin Kissa <andrew@topdog.za.net>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"Lists controller"
import socket
import struct
import logging
from urlparse import urlparse
from pylons import request, response, session, tmpl_context as c, url
from pylons.controllers.util import abort, redirect
from webhelpers import paginate
from pylons.i18n.translation import _
from sqlalchemy import desc
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm.exc import NoResultFound
from repoze.what.predicates import not_anonymous
from sphinxapi import SphinxClient, SPH_MATCH_EXTENDED2
#from repoze.what.plugins.pylonshq import ActionProtector
from repoze.what.plugins.pylonshq import ControllerProtector
from baruwa.lib.dates import now
from baruwa.lib.base import BaseController, render
from baruwa.lib.helpers import flash, flash_alert
from baruwa.model.meta import Session
from baruwa.model.lists import List
from baruwa.lib.audit import audit_log
from baruwa.model.domains import Domain
from baruwa.lib.misc import check_num_param
from baruwa.lib.misc import ipaddr_is_valid, convert_list_to_json
from baruwa.forms.lists import list_forms
from baruwa.tasks.settings import update_serial
#from baruwa.lib.auth.predicates import CanAccessAccount
from baruwa.model.accounts import User, Address, domain_owners
from baruwa.lib.regex import EMAIL_RE, IPV4_NET_OR_RANGE_RE, DOM_RE
from baruwa.lib.audit.msgs.lists import *
log = logging.getLogger(__name__)
def make_item(form):
"Make a list item"
litem = List()
litem.user = c.user
litem.list_type = form.list_type.data
litem.from_address = form.from_address.data
return litem
def _set_type(obj):
"Set type of object"
if EMAIL_RE.match(obj.from_address):
obj.from_addr_type = 1
return
if DOM_RE.match(obj.from_address):
obj.from_addr_type = 2
return
if IPV4_NET_OR_RANGE_RE.match(obj.from_address):
obj.from_addr_type = 3
return
if ipaddr_is_valid(obj.from_address):
obj.from_addr_type = 4
return
@ControllerProtector(not_anonymous())
class ListsController(BaseController):
def __before__(self):
"set context"
BaseController.__before__(self)
if self.identity:
c.user = self.identity['user']
else:
c.user = None
c.selectedtab = 'lists'
c.is_ajax = request.is_xhr
def _user_addresses(self):
"Return user addresses"
userid = self.identity['user'].id
query1 = Session.query(User.email.label('email'))\
.filter_by(active=True, account_type=3, id=userid)
query2 = Session.query(Address.address.label('email'))\
.filter_by(enabled=True, user_id=userid)
return query1.union(query2)
def _get_listitem(self, itemid):
"Get a list item"
try:
item = Session.query(List).get(itemid)
except NoResultFound:
item = None
return item
def index(self, list_type=1, direction='dsc', order_by='id',
page=1, format=None):
"Page through lists"
total_found = 0
search_time = 0
num_items = session.get('lists_num_items', 10)
if direction == 'dsc':
sort = desc(order_by)
else:
sort = order_by
q = request.GET.get('q', None)
kwds = {}
if q:
kwds['presliced_list'] = True
conn = SphinxClient()
conn.SetMatchMode(SPH_MATCH_EXTENDED2)
conn.SetFilter('list_type', [int(list_type),])
if page == 1:
conn.SetLimits(0, num_items, 500)
else:
page = int(page)
offset = (page - 1) * num_items
conn.SetLimits(offset, num_items, 500)
try:
results = conn.Query(q, 'lists, lists_rt')
except (socket.timeout, struct.error):
redirect(request.path_qs)
if results and results['matches']:
ids = [hit['id'] for hit in results['matches']]
total_found = results['total_found']
search_time = results['time']
items = Session.query(List)\
.filter(List.list_type == list_type)\
.filter(List.id.in_(ids))\
.order_by(sort)\
.all()
listcount = total_found
else:
items = []
itemcount = 0
listcount = 0
else:
items = Session.query(List)\
.filter(List.list_type == list_type)\
.order_by(sort)
itemcount = Session.query(List.id)\
.filter(List.list_type == list_type)
if c.user.account_type != 1 and itemcount:
items = items.filter(List.user_id == c.user.id)
itemcount = itemcount.filter(List.user_id == c.user.id)
if not 'listcount' in locals():
listcount = itemcount.count()
records = paginate.Page(items,
page=int(page),
items_per_page=num_items,
item_count=listcount,
**kwds)
if format == 'json':
response.headers['Content-Type'] = 'application/json'
data = convert_list_to_json(records, list_type)
return data
c.list_type = list_type
c.page = records
c.direction = direction
c.order_by = order_by
c.q = q
c.total_found = total_found
c.search_time = search_time
return render('/lists/index.html')
def new(self):
"Add a new list item"
c.form = list_forms[c.user.account_type](request.POST,
csrf_context=session)
if c.user.is_domain_admin:
orgs = [group.id for group in c.user.organizations]
query = Session.query(Domain.name).join(domain_owners)\
.filter(domain_owners.c.organization_id.in_(orgs))
options = [(domain.name, domain.name) for domain in query]
c.form.to_domain.choices = options
if c.user.is_peleb:
query = self._user_addresses()
options = [(item.email, item.email) for item in query]
c.form.to_address.choices = options
if request.POST and c.form.validate():
# item = List()
# item.user = c.user
# item.list_type = c.form.list_type.data
# item.from_address = c.form.from_address.data
item = make_item(c.form)
_set_type(item)
aliases = []
if c.user.is_superadmin or c.user.is_peleb:
if c.form.to_address.data != '':
item.to_address = c.form.to_address.data
if ('add_to_alias' in c.form and c.form.add_to_alias.data
and c.user.is_peleb):
for new_addr in options:
if new_addr[0] == item.to_address:
continue
newitem = make_item(c.form)
_set_type(newitem)
newitem.to_address = new_addr[0]
aliases.append(newitem)
else:
item.to_address = 'any'
if c.user.is_domain_admin:
if c.form.to_address.data in ['', 'any']:
item.to_address = c.form.to_domain.data
if c.form.add_to_alias.data:
for dom in options:
if dom[0] == item.to_address:
continue
newitem = make_item(c.form)
_set_type(newitem)
newitem.to_address = dom[0]
aliases.append(newitem)
else:
item.to_address = "%s@%s" % (c.form.to_address.data,
c.form.to_domain.data)
if c.form.add_to_alias.data:
for dom in options:
newitem = make_item(c.form)
_set_type(newitem)
newitem.to_address = "%s@%s" % \
(c.form.to_address.data, dom[0])
if newitem.to_address == item.to_address:
continue
aliases.append(newitem)
try:
Session.add(item)
Session.commit()
for alias in aliases:
try:
Session.add(alias)
Session.commit()
except IntegrityError:
pass
update_serial.delay()
if item.list_type == 1:
listname = _('Approved senders')
else:
listname = _('Banned senders')
info = LISTADD_MSG % dict(s=item.from_address, l=listname)
audit_log(c.user.username,
3, unicode(info), request.host,
request.remote_addr, now())
flash(_('The item has been added to the list'))
if not request.is_xhr:
redirect(url('lists-index',
list_type=c.form.list_type.data))
except IntegrityError:
Session.rollback()
flash_alert(_('The list item already exists'))
return render('/lists/add.html')
def list_delete(self, listid):
"Delete a list item"
item = self._get_listitem(listid)
if not item:
abort(404)
if c.user.account_type != 1 and c.user.id != item.user_id:
abort(403)
c.form = list_forms[c.user.account_type](request.POST,
item,
csrf_context=session)
if not c.user.is_superadmin:
del c.form.add_to_alias
if c.user.is_domain_admin:
orgs = [group.id for group in c.user.organizations]
query = Session.query(Domain.name).join(domain_owners)\
.filter(domain_owners.c.organization_id.in_(orgs))
options = [(domain.name, domain.name) for domain in query]
c.form.to_domain.choices = options
if c.user.is_peleb:
query = self._user_addresses()
options = [(addr.email, addr.email) for addr in query]
c.form.to_address.choices = options
c.id = item.id
if request.POST and c.form.validate():
if item.list_type == 1:
listname = _('Approved senders')
else:
listname = _('Banned senders')
name = item.from_address
Session.delete(item)
Session.commit()
update_serial.delay()
info = LISTDEL_MSG % dict(s=name, l=listname)
audit_log(c.user.username,
4, unicode(info), request.host,
request.remote_addr, now())
flash(_('The item has been deleted'))
if not request.is_xhr:
redirect(url(controller='lists'))
else:
c.delflag = True
return render('/lists/delete.html')
def setnum(self, format=None):
"Set number of items returned"
num = check_num_param(request)
if num and int(num) in [10, 20, 50, 100]:
num = int(num)
session['lists_num_items'] = num
session.save()
nextpage = request.headers.get('Referer', '/')
if '://' in nextpage:
from_url = urlparse(nextpage)
nextpage = from_url[2]
redirect(nextpage)
|
gpl-3.0
| 3,271,341,334,908,312,600
| 38.140244
| 80
| 0.531391
| false
| 4.030769
| false
| false
| false
|
luisza/dfva_client
|
src/client_fva/ui/requestauthentication.py
|
1
|
10627
|
import time
from PyQt5 import QtWidgets, QtCore, QtGui
from PyQt5.QtCore import Qt, pyqtSlot, QThread, pyqtSignal
from PyQt5.QtWidgets import QCompleter, QTableWidgetItem
from client_fva.models.ContactDropDown import ContactModel
from client_fva.models.MyRequest import MyRequestModel
from client_fva.session_storage import SessionStorage
from client_fva.ui.requestauthenticationui import Ui_RequestAuthentication
from client_fva.user_settings import UserSettings
class PersonAuthenticationOpers(QThread):
has_result = pyqtSignal(int)
has_changes = pyqtSignal(str, int, bool, str)
remove_check = pyqtSignal(str)
new_code = pyqtSignal(str, str)
def __init__(self, tid, person, identifications, user):
self.identifications = identifications
self.person = person
super(PersonAuthenticationOpers, self).__init__()
self.tid = tid
self.result = None
self.pending_check = {}
self.wait_time = UserSettings.getInstance().check_wait_time
self.session_storage = SessionStorage.getInstance()
self.myrequest = MyRequestModel(db=self.session_storage.db, user=user)
def log_transaction(self, identification, data):
self.has_changes.emit(identification, data['status'], False, data['status_text'])
self.myid = self.myrequest.add_myrequest(identification, 'autenticación', '', '', signed_document_path="",
transaction_status=data['status'], transaction_text=data['status_text'])
def log_check_transaction(self, identification, data):
self.has_changes.emit(identification, data['status'], data['received_notification'], data['status_text'])
self.myrequest.update_myrequest(self.myid, transaction_status=data['status'],
transaction_text=data['status_text'])
def run(self):
transactions = []
for identification in self.identifications:
result = self.person.authenticate(identification)
self.log_transaction(identification, result)
if result['status'] == 0:
self.pending_check[identification] = result['id']
self.session_storage.transactions[result['id_transaction']] = result['code']
transactions.append(result['id_transaction'])
self.new_code.emit(identification, result['code'])
else:
self.remove_check.emit(identification)
while self.pending_check:
for identification in list(self.pending_check.keys()):
result = self.person.check_authenticate(self.pending_check[identification])
self.log_check_transaction(identification, result)
if result['received_notification']:
del self.pending_check[identification]
self.remove_check.emit(identification)
time.sleep(self.wait_time)
for trans in transactions:
if trans in self.session_storage.transactions:
del self.session_storage.transactions[trans]
self.has_result.emit(self.tid)
class RequestAuthentication(Ui_RequestAuthentication):
CONNECTING = 0
CONNECTED = 1
REJECTED = 2
ERROR = 3
def __init__(self, widget, main_app, db, serial):
Ui_RequestAuthentication.__init__(self)
self.widget = widget
self.main_app = main_app
self.session_storage = SessionStorage.getInstance()
self.setupUi(self.widget)
self.person = self.session_storage.session_info[serial]['personclient']
self.user = self.session_storage.session_info[serial]['user']
self.contacts_model = ContactModel(user=self.user, db=db)
completer = QCompleter()
completer.setModel(self.contacts_model)
completer.setCaseSensitivity(Qt.CaseInsensitive)
completer.setFilterMode(Qt.MatchContains)
self.searchContact.setCompleter(completer)
self.add_contact.clicked.connect(lambda: self.add_contact_to_list())
self.requestAuthentication.clicked.connect(self.request_authentication)
self.cleanbtn.clicked.connect(self.cleantable)
self.auth_list = []
self.status_widgets = {}
self.code_widgets = {}
self.initialize()
def initialize(self):
self.contacts.setColumnCount(4)
self.contacts.setHorizontalHeaderItem(0, QTableWidgetItem("Estado"))
self.contacts.setHorizontalHeaderItem(1, QTableWidgetItem("Identificación"))
self.contacts.setHorizontalHeaderItem(2, QTableWidgetItem("Nombre"))
self.contacts.setHorizontalHeaderItem(3, QTableWidgetItem("Código"))
self.contacts.resizeColumnsToContents()
self.contacts_count = 0
self.contacts.contextMenuEvent = self.context_element_menu_event
def inactive_btn(self):
self.cleanbtn.setEnabled(False)
self.add_contact.setEnabled(False)
self.requestAuthentication.setEnabled(False)
def active_btn(self):
self.cleanbtn.setEnabled(True)
self.add_contact.setEnabled(True)
self.requestAuthentication.setEnabled(True)
def insert_item(self, identification, name):
status_widget = QTableWidgetItem()
status_widget.setIcon(QtGui.QIcon(":/images/autentication.png"))
code_widget = QTableWidgetItem("")
self.contacts.insertRow(self.contacts.rowCount())
self.contacts.setItem(self.contacts_count, 0, status_widget)
self.contacts.setItem(self.contacts_count, 1, QTableWidgetItem(identification))
self.contacts.setItem(self.contacts_count, 2, QTableWidgetItem(name))
self.contacts.setItem(self.contacts_count, 3, code_widget)
self.contacts_count += 1
self.status_widgets[identification] = status_widget
self.code_widgets[identification] = code_widget
self.contacts.resizeColumnsToContents()
def change_person_status(self,status_widget, status, error_text="Error o rechazo por parte del usuario"):
if status == self.CONNECTING:
status_widget.setIcon(QtGui.QIcon(":/images/connecting.png"))
status_widget.setToolTip('Conectando al servicio de firmado')
elif status == self.CONNECTED:
status_widget.setIcon(QtGui.QIcon(":/images/connected.png"))
status_widget.setToolTip('Persona autenticada satisfactoriamente')
elif status == self.REJECTED:
status_widget.setIcon(QtGui.QIcon(":/images/rejected.png"))
status_widget.setToolTip('Persona autenticada satisfactoriamente')
elif status == self.ERROR:
status_widget.setIcon(QtGui.QIcon(":/images/error.png"))
status_widget.setToolTip(error_text)
def add_contact_to_list(self):
txt = self.searchContact.text()
id = self.contacts_model.deserialize_contact(txt)
if id:
if id not in self.auth_list:
if id != txt:
self.insert_item(id, txt)
else:
self.insert_item(id, '')
self.auth_list.append(id)
self.searchContact.setText('')
else:
QtWidgets.QMessageBox.warning(self.widget, 'Contacto ya existente',
"El contacto seleccionado fue agregado a la lista anteriormente.")
else:
QtWidgets.QMessageBox.warning(self.widget, 'Contacto no identificado',
"Lo ingresado no es un nombre de contacto o un número de identificación válido. Formato: 08-8888-8888 o 15 números para extranjeros")
def request_authentication(self):
self.inactive_btn()
self.requestAuthProgressBar.setRange(0, len(self.auth_list))
self.auth_pending = len(self.auth_list)
self.update_process_bar(0, "Enviando peticiones de autenticación")
self.pao = PersonAuthenticationOpers(1, self.person, self.auth_list, self.user)
self.pao.has_result.connect(self.end_authentication)
self.pao.has_changes.connect(self.check_transaction_change)
self.pao.remove_check.connect(self.check_transaction_end)
self.pao.new_code.connect(self.add_new_code)
self.pao.start()
def context_element_menu_event(self, pos):
if self.contacts.selectedIndexes():
selected = self.contacts.currentIndex()
if selected.isValid():
row, column = selected.row(), selected.column()
menu = QtWidgets.QMenu()
menu.setStyleSheet("QMenu::item{color:rgb(76, 118, 82);background-color:rgb(216, 230, 225);}")
delete_action = menu.addAction("Delete")
delete_action.setIcon(QtGui.QIcon(":images/delete.png"))
action = menu.exec_(self.contacts.mapToGlobal(pos.pos()))
if action == delete_action:
self.delete_element(row)
def delete_element(self, row):
self.contacts.removeRow(row)
self.auth_list.pop(row)
self.contacts_count -= 1
def cleantable(self):
for x in range(len(self.auth_list)):
self.contacts.removeRow(0)
self.auth_list.pop()
self.contacts.setRowCount(0)
self.contacts_count=0
def update_process_bar(self, value, text):
self.requestAuthProgressBar.setValue(value)
if text:
self.requestAuthProgressBar.setFormat(text)
def end_authentication(self, id):
self.update_process_bar(len(self.auth_list), 'Solicitud de autorizaciones completo')
self.active_btn()
def check_transaction_end(self, identification):
self.auth_pending -= 1
self.update_process_bar(len(self.auth_list) - self.auth_pending,
'Solicitudes faltantes %d'%self.auth_pending)
def check_transaction_change(self, identification, status, recieved, text):
# transaction_status
icon_status = 0
icon_tooltip = ''
if status == 0:
if recieved:
icon_status = self.CONNECTED
else:
icon_status = self.CONNECTING
elif status == 2:
icon_status = self.REJECTED
icon_tooltip = text
else:
icon_status = self.ERROR
icon_tooltip = text
self.change_person_status(self.status_widgets[identification], icon_status, icon_tooltip)
def add_new_code(self, identification, code):
self.code_widgets[identification].setText(code)
self.contacts.resizeColumnsToContents()
|
gpl-3.0
| -6,719,245,680,836,708,000
| 42.52459
| 150
| 0.647424
| false
| 4.026925
| false
| false
| false
|
gonesurfing/Quisk_rpi_remote
|
sdriqpkg/quisk_hardware.py
|
1
|
2872
|
# Please do not change this hardware control module.
# It provides support for the SDR-IQ by RfSpace.
from __future__ import print_function
import _quisk as QS
import sdriq
from quisk_hardware_model import Hardware as BaseHardware
class Hardware(BaseHardware):
decimations = [1250, 600, 500, 360]
def __init__(self, app, conf):
BaseHardware.__init__(self, app, conf)
self.use_sidetone = 1
self.clock = conf.sdriq_clock
self.rf_gain_labels = ('RF +30', 'RF +20', 'RF +10', 'RF 0 dB')
if conf.fft_size_multiplier == 0:
conf.fft_size_multiplier = 3 # Set size needed by VarDecim
def open(self):
return sdriq.open_samples() # Return a config message
def close(self):
sdriq.close_samples()
def OnButtonRfGain(self, event):
"""Set the SDR-IQ preamp gain and attenuator state.
sdriq.gain_sdriq(gstate, gain)
gstate == 0: Gain must be 0, -10, -20, or -30
gstate == 1: Attenuator is on and gain is 0 to 127 (7 bits)
gstate == 2: Attenuator is off and gain is 0 to 127 (7 bits)
gain for 34, 24, 14, 4 db is 127, 39, 12, 4.
"""
btn = event.GetEventObject()
n = btn.index
if n == 0:
sdriq.gain_sdriq(2, 127)
elif n == 1:
sdriq.gain_sdriq(2, 39)
elif n == 2:
sdriq.gain_sdriq(2, 12)
elif n == 3:
sdriq.gain_sdriq(1, 12)
else:
print ('Unknown RfGain')
def ChangeFrequency(self, tune, vfo, source='', band='', event=None):
if vfo:
sdriq.freq_sdriq(vfo)
return tune, vfo
def ChangeBand(self, band):
# band is a string: "60", "40", "WWV", etc.
btn = self.application.BtnRfGain
if btn:
if band in ('160', '80', '60', '40'):
btn.SetLabel('RF +10', True)
elif band in ('20',):
btn.SetLabel('RF +20', True)
else:
btn.SetLabel('RF +20', True)
def VarDecimGetChoices(self): # return text labels for the control
l = [] # a list of sample rates
for dec in self.decimations:
l.append(str(int(float(self.clock) / dec / 1e3 + 0.5)))
return l
def VarDecimGetLabel(self): # return a text label for the control
return "Sample rate ksps"
def VarDecimGetIndex(self): # return the current index
return self.index
def VarDecimSet(self, index=None): # set decimation, return sample rate
if index is None: # initial call to set decimation before the call to open()
rate = self.application.vardecim_set # May be None or from different hardware
try:
dec = int(float(self.clock / rate + 0.5))
self.index = self.decimations.index(dec)
except:
try:
self.index = self.decimations.index(self.conf.sdriq_decimation)
except:
self.index = 0
else:
self.index = index
dec = self.decimations[self.index]
sdriq.set_decimation(dec)
return int(float(self.clock) / dec + 0.5)
|
gpl-2.0
| -3,532,599,379,455,024,600
| 33.60241
| 84
| 0.625
| false
| 3.055319
| false
| false
| false
|
yuxng/Deep_ISM
|
ISM/lib/datasets/imdb.py
|
1
|
3612
|
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
import os
import os.path as osp
import PIL
import numpy as np
import scipy.sparse
import datasets
from ism.config import cfg
class imdb(object):
"""Image database."""
def __init__(self, name):
self._name = name
self._num_classes = 0
self._classes = []
self._image_index = []
self._roidb = None
self._roidb_handler = self.default_roidb
# Use this dict for storing dataset specific config options
self.config = {}
@property
def name(self):
return self._name
@property
def num_classes(self):
return len(self._classes)
@property
def classes(self):
return self._classes
@property
def image_index(self):
return self._image_index
@property
def roidb_handler(self):
return self._roidb_handler
@roidb_handler.setter
def roidb_handler(self, val):
self._roidb_handler = val
@property
def roidb(self):
# A roidb is a list of dictionaries, each with the following keys:
# boxes
# gt_overlaps
# gt_classes
# flipped
if self._roidb is not None:
return self._roidb
self._roidb = self.roidb_handler()
return self._roidb
@property
def cache_path(self):
cache_path = osp.abspath(osp.join(datasets.ROOT_DIR, 'data', 'cache'))
if not os.path.exists(cache_path):
os.makedirs(cache_path)
return cache_path
@property
def num_images(self):
return len(self.image_index)
def image_path_at(self, i):
raise NotImplementedError
def default_roidb(self):
raise NotImplementedError
def evaluate_detections(self, all_boxes, output_dir=None):
"""
all_boxes is a list of length number-of-classes.
Each list element is a list of length number-of-images.
Each of those list elements is either an empty list []
or a numpy array of detection.
all_boxes[class][image] = [] or np.array of shape #dets x 5
"""
raise NotImplementedError
def evaluate_proposals(self, all_boxes, output_dir=None):
"""
all_boxes is a list of length number-of-classes.
Each list element is a list of length number-of-images.
Each of those list elements is either an empty list []
or a numpy array of detection.
all_boxes[class][image] = [] or np.array of shape #dets x 5
"""
raise NotImplementedError
def append_flipped_images(self):
num_images = self.num_images
for i in xrange(num_images):
entry = {'image' : self.roidb[i]['image'],
'depth' : self.roidb[i]['depth'],
'label' : self.roidb[i]['label'],
'meta_data' : self.roidb[i]['meta_data'],
'class_colors' : self.roidb[i]['class_colors'],
'flipped' : True}
self.roidb.append(entry)
self._image_index = self._image_index * 2
print 'finish appending flipped images'
def competition_mode(self, on):
"""Turn competition mode on or off."""
pass
def fast_hist(self, a, b, n):
k = (a >= 0) & (a < n)
return np.bincount(n * a[k].astype(int) + b[k], minlength=n**2).reshape(n, n)
|
mit
| 5,910,891,603,174,442,000
| 28.365854
| 85
| 0.564784
| false
| 3.926087
| false
| false
| false
|
astonshane/davisputnamGo
|
davisputnam/graph.py
|
1
|
1255
|
import json
import sys
from pprint import pprint
from ete3 import Tree, TreeStyle, TextFace
from PIL import Image, ImageDraw
def parseTree(root):
tree = Tree()
tree.name = root['Name']
tree.add_face(TextFace(root['Split'], fgcolor="red"), column=0, position="branch-bottom")
if root['Children']:
for child in root['Children']:
tree.children.append(parseTree(child))
return tree
'''
with open('sample_tree.json', 'w') as outfile:
json.dump(obj, outfile, sort_keys=True, indent=4, separators=(',', ': '))
'''
if __name__ == '__main__':
ts = TreeStyle()
ts.show_leaf_name = False
root = json.loads(open(sys.argv[1]).read())
pprint(root)
tree_root = parseTree(root)
print tree_root
for child in tree_root.traverse():
# add a marker with the name of each node, at each node
child.add_face(TextFace(child.name), column=0, position="branch-top")
# render the file and save it
fname = sys.argv[1][:-4] + "png"
tree_root.render(fname, tree_style=ts, w=5000)
im = Image.open(fname)
(x, y) = im.size
draw = ImageDraw.Draw(im)
draw.rectangle((0, y*.45, x*.25, y), fill="white")
im.save(fname, "PNG")
# tree_root.show(tree_style=ts)
|
mit
| -8,743,040,261,105,409,000
| 25.702128
| 93
| 0.623108
| false
| 3.251295
| false
| false
| false
|
stanislavb/nagios-snmp-location
|
nagios-plugins/check_snmp_location.py
|
1
|
1637
|
#!/usr/bin/env python
# This script checks the standard SNMP location oid
# and saves it in a memcached database with hostname as key.
#
# FreeBSD requirements:
# Compile net-snmp with python bindings
# Install py-memcached
# Nagios exit codes:
# 0 OK
# 1 WARNING
# 2 CRITICAL
# 3 UNKNOWN
import netsnmp
import memcache
from optparse import OptionParser
from sys import exit
# Config
# Either hostname/IP or UNIX socket
memcached_address = ['unix:/var/run/memcached/memcached.sock']
default_community = "public"
location_oid = '1.3.6.1.2.1.1.6'
snmp_version = 2
# Command line option parsing and help text (-h)
usage = "usage: %prog -H host_or_IP -C snmp_community"
parser = OptionParser(usage=usage)
parser.add_option("-H", "--host", dest="host", help="hostname or IP address")
parser.add_option("-C", "--community", dest="community", default=default_community, help="SNMP community")
(options, args) = parser.parse_args()
# We must have a host
if not options.host:
print("UNKNOWN: No hostname or IP to check")
exit(3) # UNKNOWN
# Let's get SNMP location
var = netsnmp.Varbind(location_oid, '0')
res = netsnmp.snmpget(var, Version=snmp_version, DestHost=options.host, Community=options.community, Retries=1)
location = res[0]
if location is not None:
print("OK: " + location)
# Memcached
try:
mc = memcache.Client(memcached_address, debug=0)
mc.set(options.host, location)
except Exception:
# We don't care if memcached doesn't work
pass
exit(0) # OK
print("UNKNOWN: error for host " + options.host + " and SNMP community " + options.community)
exit(3) # UNKNOWN
|
unlicense
| 1,020,803,993,351,950,000
| 27.224138
| 111
| 0.703115
| false
| 3.382231
| false
| false
| false
|
flavio/scsgate
|
scsgate/reactor.py
|
1
|
1869
|
""" This module contains the definition of the Reactor class.
This one is useful when dealing with concurrent access to the SCSGate
device """
import queue
import threading
from scsgate.tasks import MonitorTask, ExecutionError
class Reactor(threading.Thread):
""" Allows concurrent access to the SCSGate device """
def __init__(self, connection, handle_message, logger=None):
""" Initialize the instance
Arguments
connection: a scsgate.Connection object
handle_message: callback function to invoke whenever a new message
is received
logger: instance of logger
"""
threading.Thread.__init__(self)
self._connection = connection
self._handle_message = handle_message
self._terminate = False
self._logger = logger
self._request_queue = queue.Queue()
def run(self):
""" Starts the thread """
task = None
monitor_task = MonitorTask(
notification_endpoint=self._handle_message)
while True:
if self._terminate:
self._logger.info("scsgate.Reactor exiting")
self._connection.close()
break
try:
task = self._request_queue.get_nowait()
self._logger.debug("scsgate.Reactor: got task {}".format(task))
except queue.Empty:
task = monitor_task
try:
task.execute(connection=self._connection)
except ExecutionError as err:
self._logger.error(err)
def stop(self):
""" Blocks the thread, performs cleanup of the associated
connection """
self._terminate = True
def append_task(self, task):
""" Adds a tasks to the list of the jobs to execute """
self._request_queue.put(task)
|
mit
| 3,353,941,284,899,801,000
| 29.639344
| 79
| 0.596041
| false
| 4.804627
| false
| false
| false
|
asedunov/intellij-community
|
python/helpers/pydev/_pydevd_bundle/pydevd_comm.py
|
1
|
60832
|
''' pydevd - a debugging daemon
This is the daemon you launch for python remote debugging.
Protocol:
each command has a format:
id\tsequence-num\ttext
id: protocol command number
sequence-num: each request has a sequence number. Sequence numbers
originating at the debugger are odd, sequence numbers originating
at the daemon are even. Every response uses the same sequence number
as the request.
payload: it is protocol dependent. When response is a complex structure, it
is returned as XML. Each attribute value is urlencoded, and then the whole
payload is urlencoded again to prevent stray characters corrupting protocol/xml encodings
Commands:
NUMBER NAME FROM* ARGUMENTS RESPONSE NOTE
100 series: program execution
101 RUN JAVA - -
102 LIST_THREADS JAVA RETURN with XML listing of all threads
103 THREAD_CREATE PYDB - XML with thread information
104 THREAD_KILL JAVA id (or * to exit) kills the thread
PYDB id nofies JAVA that thread was killed
105 THREAD_SUSPEND JAVA XML of the stack, suspends the thread
reason for suspension
PYDB id notifies JAVA that thread was suspended
106 CMD_THREAD_RUN JAVA id resume the thread
PYDB id \t reason notifies JAVA that thread was resumed
107 STEP_INTO JAVA thread_id
108 STEP_OVER JAVA thread_id
109 STEP_RETURN JAVA thread_id
110 GET_VARIABLE JAVA thread_id \t frame_id \t GET_VARIABLE with XML of var content
FRAME|GLOBAL \t attributes*
111 SET_BREAK JAVA file/line of the breakpoint
112 REMOVE_BREAK JAVA file/line of the return
113 CMD_EVALUATE_EXPRESSION JAVA expression result of evaluating the expression
114 CMD_GET_FRAME JAVA request for frame contents
115 CMD_EXEC_EXPRESSION JAVA
116 CMD_WRITE_TO_CONSOLE PYDB
117 CMD_CHANGE_VARIABLE
118 CMD_RUN_TO_LINE
119 CMD_RELOAD_CODE
120 CMD_GET_COMPLETIONS JAVA
500 series diagnostics/ok
501 VERSION either Version string (1.0) Currently just used at startup
502 RETURN either Depends on caller -
900 series: errors
901 ERROR either - This is reserved for unexpected errors.
* JAVA - remote debugger, the java end
* PYDB - pydevd, the python end
'''
import os
from _pydev_bundle.pydev_imports import _queue
from _pydev_imps._pydev_saved_modules import time
from _pydev_imps._pydev_saved_modules import thread
from _pydev_imps._pydev_saved_modules import threading
from _pydev_imps._pydev_saved_modules import socket
from socket import socket, AF_INET, SOCK_STREAM, SHUT_RD, SHUT_WR, SOL_SOCKET, SO_REUSEADDR, SHUT_RDWR, timeout
from _pydevd_bundle.pydevd_constants import DebugInfoHolder, dict_contains, get_thread_id, IS_JYTHON, IS_PY2, IS_PY3K, IS_PY36_OR_GREATER, \
STATE_RUN
try:
from urllib import quote_plus, unquote, unquote_plus
except:
from urllib.parse import quote_plus, unquote, unquote_plus #@Reimport @UnresolvedImport
import pydevconsole
from _pydevd_bundle import pydevd_vars
from _pydevd_bundle import pydevd_xml
from _pydevd_bundle import pydevd_tracing
from _pydevd_bundle import pydevd_vm_type
from pydevd_file_utils import get_abs_path_real_path_and_base_from_frame, NORM_PATHS_AND_BASE_CONTAINER, norm_file_to_client
import sys
import traceback
from _pydevd_bundle.pydevd_utils import quote_smart as quote, compare_object_attrs, cmp_to_key, to_string
from _pydev_bundle import pydev_log
from _pydev_bundle import _pydev_completer
from _pydevd_bundle.pydevd_tracing import get_exception_traceback_str
from _pydevd_bundle import pydevd_console
from _pydev_bundle.pydev_monkey import disable_trace_thread_modules, enable_trace_thread_modules
CMD_RUN = 101
CMD_LIST_THREADS = 102
CMD_THREAD_CREATE = 103
CMD_THREAD_KILL = 104
CMD_THREAD_SUSPEND = 105
CMD_THREAD_RUN = 106
CMD_STEP_INTO = 107
CMD_STEP_OVER = 108
CMD_STEP_RETURN = 109
CMD_GET_VARIABLE = 110
CMD_SET_BREAK = 111
CMD_REMOVE_BREAK = 112
CMD_EVALUATE_EXPRESSION = 113
CMD_GET_FRAME = 114
CMD_EXEC_EXPRESSION = 115
CMD_WRITE_TO_CONSOLE = 116
CMD_CHANGE_VARIABLE = 117
CMD_RUN_TO_LINE = 118
CMD_RELOAD_CODE = 119
CMD_GET_COMPLETIONS = 120
# Note: renumbered (conflicted on merge)
CMD_CONSOLE_EXEC = 121
CMD_ADD_EXCEPTION_BREAK = 122
CMD_REMOVE_EXCEPTION_BREAK = 123
CMD_LOAD_SOURCE = 124
CMD_ADD_DJANGO_EXCEPTION_BREAK = 125
CMD_REMOVE_DJANGO_EXCEPTION_BREAK = 126
CMD_SET_NEXT_STATEMENT = 127
CMD_SMART_STEP_INTO = 128
CMD_EXIT = 129
CMD_SIGNATURE_CALL_TRACE = 130
CMD_SET_PY_EXCEPTION = 131
CMD_GET_FILE_CONTENTS = 132
CMD_SET_PROPERTY_TRACE = 133
# Pydev debug console commands
CMD_EVALUATE_CONSOLE_EXPRESSION = 134
CMD_RUN_CUSTOM_OPERATION = 135
CMD_GET_BREAKPOINT_EXCEPTION = 136
CMD_STEP_CAUGHT_EXCEPTION = 137
CMD_SEND_CURR_EXCEPTION_TRACE = 138
CMD_SEND_CURR_EXCEPTION_TRACE_PROCEEDED = 139
CMD_IGNORE_THROWN_EXCEPTION_AT = 140
CMD_ENABLE_DONT_TRACE = 141
CMD_SHOW_CONSOLE = 142
CMD_GET_ARRAY = 143
CMD_STEP_INTO_MY_CODE = 144
CMD_GET_CONCURRENCY_EVENT = 145
CMD_SHOW_RETURN_VALUES = 146
CMD_INPUT_REQUESTED = 147
CMD_GET_DESCRIPTION = 148
CMD_PROCESS_CREATED = 149
CMD_SHOW_CYTHON_WARNING = 150
CMD_VERSION = 501
CMD_RETURN = 502
CMD_ERROR = 901
ID_TO_MEANING = {
'101': 'CMD_RUN',
'102': 'CMD_LIST_THREADS',
'103': 'CMD_THREAD_CREATE',
'104': 'CMD_THREAD_KILL',
'105': 'CMD_THREAD_SUSPEND',
'106': 'CMD_THREAD_RUN',
'107': 'CMD_STEP_INTO',
'108': 'CMD_STEP_OVER',
'109': 'CMD_STEP_RETURN',
'110': 'CMD_GET_VARIABLE',
'111': 'CMD_SET_BREAK',
'112': 'CMD_REMOVE_BREAK',
'113': 'CMD_EVALUATE_EXPRESSION',
'114': 'CMD_GET_FRAME',
'115': 'CMD_EXEC_EXPRESSION',
'116': 'CMD_WRITE_TO_CONSOLE',
'117': 'CMD_CHANGE_VARIABLE',
'118': 'CMD_RUN_TO_LINE',
'119': 'CMD_RELOAD_CODE',
'120': 'CMD_GET_COMPLETIONS',
'121': 'CMD_CONSOLE_EXEC',
'122': 'CMD_ADD_EXCEPTION_BREAK',
'123': 'CMD_REMOVE_EXCEPTION_BREAK',
'124': 'CMD_LOAD_SOURCE',
'125': 'CMD_ADD_DJANGO_EXCEPTION_BREAK',
'126': 'CMD_REMOVE_DJANGO_EXCEPTION_BREAK',
'127': 'CMD_SET_NEXT_STATEMENT',
'128': 'CMD_SMART_STEP_INTO',
'129': 'CMD_EXIT',
'130': 'CMD_SIGNATURE_CALL_TRACE',
'131': 'CMD_SET_PY_EXCEPTION',
'132': 'CMD_GET_FILE_CONTENTS',
'133': 'CMD_SET_PROPERTY_TRACE',
'134': 'CMD_EVALUATE_CONSOLE_EXPRESSION',
'135': 'CMD_RUN_CUSTOM_OPERATION',
'136': 'CMD_GET_BREAKPOINT_EXCEPTION',
'137': 'CMD_STEP_CAUGHT_EXCEPTION',
'138': 'CMD_SEND_CURR_EXCEPTION_TRACE',
'139': 'CMD_SEND_CURR_EXCEPTION_TRACE_PROCEEDED',
'140': 'CMD_IGNORE_THROWN_EXCEPTION_AT',
'141': 'CMD_ENABLE_DONT_TRACE',
'142': 'CMD_SHOW_CONSOLE',
'143': 'CMD_GET_ARRAY',
'144': 'CMD_STEP_INTO_MY_CODE',
'145': 'CMD_GET_CONCURRENCY_EVENT',
'146': 'CMD_SHOW_RETURN_VALUES',
'147': 'CMD_INPUT_REQUESTED',
'148': 'CMD_GET_DESCRIPTION',
'149': 'CMD_PROCESS_CREATED',
'150': 'CMD_SHOW_CYTHON_WARNING',
'501': 'CMD_VERSION',
'502': 'CMD_RETURN',
'901': 'CMD_ERROR',
}
MAX_IO_MSG_SIZE = 1000 #if the io is too big, we'll not send all (could make the debugger too non-responsive)
#this number can be changed if there's need to do so
VERSION_STRING = "@@BUILD_NUMBER@@"
from _pydev_bundle._pydev_filesystem_encoding import getfilesystemencoding
file_system_encoding = getfilesystemencoding()
#--------------------------------------------------------------------------------------------------- UTILITIES
#=======================================================================================================================
# pydevd_log
#=======================================================================================================================
def pydevd_log(level, *args):
""" levels are:
0 most serious warnings/errors
1 warnings/significant events
2 informational trace
"""
if level <= DebugInfoHolder.DEBUG_TRACE_LEVEL:
#yes, we can have errors printing if the console of the program has been finished (and we're still trying to print something)
try:
sys.stderr.write('%s\n' % (args,))
except:
pass
#=======================================================================================================================
# GlobalDebuggerHolder
#=======================================================================================================================
class GlobalDebuggerHolder:
'''
Holder for the global debugger.
'''
global_dbg = None # Note: don't rename (the name is used in our attach to process)
#=======================================================================================================================
# get_global_debugger
#=======================================================================================================================
def get_global_debugger():
return GlobalDebuggerHolder.global_dbg
GetGlobalDebugger = get_global_debugger # Backward-compatibility
#=======================================================================================================================
# set_global_debugger
#=======================================================================================================================
def set_global_debugger(dbg):
GlobalDebuggerHolder.global_dbg = dbg
#------------------------------------------------------------------- ACTUAL COMM
#=======================================================================================================================
# PyDBDaemonThread
#=======================================================================================================================
class PyDBDaemonThread(threading.Thread):
created_pydb_daemon_threads = {}
def __init__(self):
threading.Thread.__init__(self)
self.setDaemon(True)
self.killReceived = False
self.pydev_do_not_trace = True
self.is_pydev_daemon_thread = True
def run(self):
created_pydb_daemon = self.created_pydb_daemon_threads
created_pydb_daemon[self] = 1
try:
try:
if IS_JYTHON and not isinstance(threading.currentThread(), threading._MainThread):
# we shouldn't update sys.modules for the main thread, cause it leads to the second importing 'threading'
# module, and the new instance of main thread is created
import org.python.core as PyCore #@UnresolvedImport
ss = PyCore.PySystemState()
# Note: Py.setSystemState() affects only the current thread.
PyCore.Py.setSystemState(ss)
self._on_run()
except:
if sys is not None and traceback is not None:
traceback.print_exc()
finally:
del created_pydb_daemon[self]
def _on_run(self):
raise NotImplementedError('Should be reimplemented by: %s' % self.__class__)
def do_kill_pydev_thread(self):
#that was not working very well because jython gave some socket errors
self.killReceived = True
def _stop_trace(self):
if self.pydev_do_not_trace:
disable_tracing = True
if pydevd_vm_type.get_vm_type() == pydevd_vm_type.PydevdVmType.JYTHON and sys.hexversion <= 0x020201f0:
# don't run untraced threads if we're in jython 2.2.1 or lower
# jython bug: if we start a thread and another thread changes the tracing facility
# it affects other threads (it's not set only for the thread but globally)
# Bug: http://sourceforge.net/tracker/index.php?func=detail&aid=1870039&group_id=12867&atid=112867
disable_tracing = False
if disable_tracing:
pydevd_tracing.SetTrace(None) # no debugging on this thread
#=======================================================================================================================
# ReaderThread
#=======================================================================================================================
class ReaderThread(PyDBDaemonThread):
""" reader thread reads and dispatches commands in an infinite loop """
def __init__(self, sock):
PyDBDaemonThread.__init__(self)
self.sock = sock
self.setName("pydevd.Reader")
from _pydevd_bundle.pydevd_process_net_command import process_net_command
self.process_net_command = process_net_command
self.global_debugger_holder = GlobalDebuggerHolder
def do_kill_pydev_thread(self):
#We must close the socket so that it doesn't stay halted there.
self.killReceived = True
try:
self.sock.shutdown(SHUT_RD) #shutdown the socket for read
except:
#just ignore that
pass
def _on_run(self):
self._stop_trace()
read_buffer = ""
try:
while not self.killReceived:
try:
r = self.sock.recv(1024)
except:
if not self.killReceived:
traceback.print_exc()
self.handle_except()
return #Finished communication.
#Note: the java backend is always expected to pass utf-8 encoded strings. We now work with unicode
#internally and thus, we may need to convert to the actual encoding where needed (i.e.: filenames
#on python 2 may need to be converted to the filesystem encoding).
if hasattr(r, 'decode'):
r = r.decode('utf-8')
read_buffer += r
if DebugInfoHolder.DEBUG_RECORD_SOCKET_READS:
sys.stderr.write('debugger: received >>%s<<\n' % (read_buffer,))
sys.stderr.flush()
if len(read_buffer) == 0:
self.handle_except()
break
while read_buffer.find('\n') != -1:
command, read_buffer = read_buffer.split('\n', 1)
args = command.split('\t', 2)
try:
cmd_id = int(args[0])
pydev_log.debug('Received command: %s %s\n' % (ID_TO_MEANING.get(str(cmd_id), '???'), command,))
self.process_command(cmd_id, int(args[1]), args[2])
except:
traceback.print_exc()
sys.stderr.write("Can't process net command: %s\n" % command)
sys.stderr.flush()
except:
traceback.print_exc()
self.handle_except()
def handle_except(self):
self.global_debugger_holder.global_dbg.finish_debugging_session()
def process_command(self, cmd_id, seq, text):
self.process_net_command(self.global_debugger_holder.global_dbg, cmd_id, seq, text)
#----------------------------------------------------------------------------------- SOCKET UTILITIES - WRITER
#=======================================================================================================================
# WriterThread
#=======================================================================================================================
class WriterThread(PyDBDaemonThread):
""" writer thread writes out the commands in an infinite loop """
def __init__(self, sock):
PyDBDaemonThread.__init__(self)
self.sock = sock
self.setName("pydevd.Writer")
self.cmdQueue = _queue.Queue()
if pydevd_vm_type.get_vm_type() == 'python':
self.timeout = 0
else:
self.timeout = 0.1
def add_command(self, cmd):
""" cmd is NetCommand """
if not self.killReceived: #we don't take new data after everybody die
self.cmdQueue.put(cmd)
def _on_run(self):
""" just loop and write responses """
self._stop_trace()
get_has_timeout = sys.hexversion >= 0x02030000 # 2.3 onwards have it.
try:
while True:
try:
try:
if get_has_timeout:
cmd = self.cmdQueue.get(1, 0.1)
else:
time.sleep(.01)
cmd = self.cmdQueue.get(0)
except _queue.Empty:
if self.killReceived:
try:
self.sock.shutdown(SHUT_WR)
self.sock.close()
except:
pass
return #break if queue is empty and killReceived
else:
continue
except:
#pydevd_log(0, 'Finishing debug communication...(1)')
#when liberating the thread here, we could have errors because we were shutting down
#but the thread was still not liberated
return
out = cmd.outgoing
if DebugInfoHolder.DEBUG_TRACE_LEVEL >= 1:
out_message = 'sending cmd --> '
out_message += "%20s" % ID_TO_MEANING.get(out[:3], 'UNKNOWN')
out_message += ' '
out_message += unquote(unquote(out)).replace('\n', ' ')
try:
sys.stderr.write('%s\n' % (out_message,))
except:
pass
if IS_PY3K:
out = bytearray(out, 'utf-8')
self.sock.send(out) #TODO: this does not guarantee that all message are sent (and jython does not have a send all)
if cmd.id == CMD_EXIT:
break
if time is None:
break #interpreter shutdown
time.sleep(self.timeout)
except Exception:
GlobalDebuggerHolder.global_dbg.finish_debugging_session()
if DebugInfoHolder.DEBUG_TRACE_LEVEL >= 0:
traceback.print_exc()
def empty(self):
return self.cmdQueue.empty()
#--------------------------------------------------- CREATING THE SOCKET THREADS
#=======================================================================================================================
# start_server
#=======================================================================================================================
def start_server(port):
""" binds to a port, waits for the debugger to connect """
s = socket(AF_INET, SOCK_STREAM)
s.settimeout(None)
try:
from socket import SO_REUSEPORT
s.setsockopt(SOL_SOCKET, SO_REUSEPORT, 1)
except ImportError:
s.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
s.bind(('', port))
pydevd_log(1, "Bound to port ", str(port))
try:
s.listen(1)
newSock, _addr = s.accept()
pydevd_log(1, "Connection accepted")
# closing server socket is not necessary but we don't need it
s.shutdown(SHUT_RDWR)
s.close()
return newSock
except:
sys.stderr.write("Could not bind to port: %s\n" % (port,))
sys.stderr.flush()
traceback.print_exc()
sys.exit(1) #TODO: is it safe?
#=======================================================================================================================
# start_client
#=======================================================================================================================
def start_client(host, port):
""" connects to a host/port """
pydevd_log(1, "Connecting to ", host, ":", str(port))
s = socket(AF_INET, SOCK_STREAM)
MAX_TRIES = 100
i = 0
while i<MAX_TRIES:
try:
s.connect((host, port))
except:
i+=1
time.sleep(0.2)
continue
pydevd_log(1, "Connected.")
return s
sys.stderr.write("Could not connect to %s: %s\n" % (host, port))
sys.stderr.flush()
traceback.print_exc()
sys.exit(1) #TODO: is it safe?
#------------------------------------------------------------------------------------ MANY COMMUNICATION STUFF
#=======================================================================================================================
# NetCommand
#=======================================================================================================================
class NetCommand:
""" Commands received/sent over the network.
Command can represent command received from the debugger,
or one to be sent by daemon.
"""
next_seq = 0 # sequence numbers
def __init__(self, id, seq, text):
""" smart handling of parameters
if sequence is 0, new sequence will be generated
if text has carriage returns they'll be replaced"""
self.id = id
if seq == 0:
NetCommand.next_seq += 2
seq = NetCommand.next_seq
self.seq = seq
self.text = text
encoded = quote(to_string(text), '/<>_=" \t')
self.outgoing = '%s\t%s\t%s\n' % (id, seq, encoded)
#=======================================================================================================================
# NetCommandFactory
#=======================================================================================================================
class NetCommandFactory:
def _thread_to_xml(self, thread):
""" thread information as XML """
name = pydevd_xml.make_valid_xml_value(thread.getName())
cmdText = '<thread name="%s" id="%s" />' % (quote(name), get_thread_id(thread))
return cmdText
def make_error_message(self, seq, text):
cmd = NetCommand(CMD_ERROR, seq, text)
if DebugInfoHolder.DEBUG_TRACE_LEVEL > 2:
sys.stderr.write("Error: %s" % (text,))
return cmd
def make_thread_created_message(self, thread):
cmdText = "<xml>" + self._thread_to_xml(thread) + "</xml>"
return NetCommand(CMD_THREAD_CREATE, 0, cmdText)
def make_process_created_message(self):
cmdText = '<process/>'
return NetCommand(CMD_PROCESS_CREATED, 0, cmdText)
def make_show_cython_warning_message(self):
try:
return NetCommand(CMD_SHOW_CYTHON_WARNING, 0, '')
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_custom_frame_created_message(self, frameId, frameDescription):
frameDescription = pydevd_xml.make_valid_xml_value(frameDescription)
cmdText = '<xml><thread name="%s" id="%s"/></xml>' % (frameDescription, frameId)
return NetCommand(CMD_THREAD_CREATE, 0, cmdText)
def make_list_threads_message(self, seq):
""" returns thread listing as XML """
try:
t = threading.enumerate()
cmd_text = ["<xml>"]
append = cmd_text.append
for i in t:
if t.isAlive():
append(self._thread_to_xml(i))
append("</xml>")
return NetCommand(CMD_RETURN, seq, ''.join(cmd_text))
except:
return self.make_error_message(seq, get_exception_traceback_str())
def make_variable_changed_message(self, seq, payload):
# notify debugger that value was changed successfully
return NetCommand(CMD_RETURN, seq, payload)
def make_io_message(self, v, ctx, dbg=None):
'''
@param v: the message to pass to the debug server
@param ctx: 1 for stdio 2 for stderr
@param dbg: If not none, add to the writer
'''
try:
if len(v) > MAX_IO_MSG_SIZE:
v = v[0:MAX_IO_MSG_SIZE]
v += '...'
v = pydevd_xml.make_valid_xml_value(quote(v, '/>_= \t'))
net = NetCommand(str(CMD_WRITE_TO_CONSOLE), 0, '<xml><io s="%s" ctx="%s"/></xml>' % (v, ctx))
except:
net = self.make_error_message(0, get_exception_traceback_str())
if dbg:
dbg.writer.add_command(net)
return net
def make_version_message(self, seq):
try:
return NetCommand(CMD_VERSION, seq, VERSION_STRING)
except:
return self.make_error_message(seq, get_exception_traceback_str())
def make_thread_killed_message(self, id):
try:
return NetCommand(CMD_THREAD_KILL, 0, str(id))
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_thread_suspend_str(self, thread_id, frame, stop_reason, message):
""" <xml>
<thread id="id" stop_reason="reason">
<frame id="id" name="functionName " file="file" line="line">
<var variable stuffff....
</frame>
</thread>
"""
cmd_text_list = ["<xml>"]
append = cmd_text_list.append
make_valid_xml_value = pydevd_xml.make_valid_xml_value
if message:
message = make_valid_xml_value(message)
append('<thread id="%s" stop_reason="%s" message="%s">' % (thread_id, stop_reason, message))
curr_frame = frame
try:
while curr_frame:
#print cmdText
my_id = id(curr_frame)
#print "id is ", my_id
if curr_frame.f_code is None:
break #Iron Python sometimes does not have it!
my_name = curr_frame.f_code.co_name #method name (if in method) or ? if global
if my_name is None:
break #Iron Python sometimes does not have it!
#print "name is ", my_name
abs_path_real_path_and_base = get_abs_path_real_path_and_base_from_frame(curr_frame)
myFile = norm_file_to_client(abs_path_real_path_and_base[0])
if file_system_encoding.lower() != "utf-8" and hasattr(myFile, "decode"):
# myFile is a byte string encoded using the file system encoding
# convert it to utf8
myFile = myFile.decode(file_system_encoding).encode("utf-8")
#print "file is ", myFile
#myFile = inspect.getsourcefile(curr_frame) or inspect.getfile(frame)
myLine = str(curr_frame.f_lineno)
#print "line is ", myLine
#the variables are all gotten 'on-demand'
#variables = pydevd_xml.frame_vars_to_xml(curr_frame.f_locals)
variables = ''
append('<frame id="%s" name="%s" ' % (my_id , make_valid_xml_value(my_name)))
append('file="%s" line="%s">' % (quote(myFile, '/>_= \t'), myLine))
append(variables)
append("</frame>")
curr_frame = curr_frame.f_back
except :
traceback.print_exc()
append("</thread></xml>")
return ''.join(cmd_text_list)
def make_thread_suspend_message(self, thread_id, frame, stop_reason, message):
try:
return NetCommand(CMD_THREAD_SUSPEND, 0, self.make_thread_suspend_str(thread_id, frame, stop_reason, message))
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_thread_run_message(self, id, reason):
try:
return NetCommand(CMD_THREAD_RUN, 0, str(id) + "\t" + str(reason))
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_get_variable_message(self, seq, payload):
try:
return NetCommand(CMD_GET_VARIABLE, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_get_array_message(self, seq, payload):
try:
return NetCommand(CMD_GET_ARRAY, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_get_description_message(self, seq, payload):
try:
return NetCommand(CMD_GET_DESCRIPTION, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_get_frame_message(self, seq, payload):
try:
return NetCommand(CMD_GET_FRAME, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_evaluate_expression_message(self, seq, payload):
try:
return NetCommand(CMD_EVALUATE_EXPRESSION, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_get_completions_message(self, seq, payload):
try:
return NetCommand(CMD_GET_COMPLETIONS, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_get_file_contents(self, seq, payload):
try:
return NetCommand(CMD_GET_FILE_CONTENTS, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_send_breakpoint_exception_message(self, seq, payload):
try:
return NetCommand(CMD_GET_BREAKPOINT_EXCEPTION, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_send_curr_exception_trace_message(self, seq, thread_id, curr_frame_id, exc_type, exc_desc, trace_obj):
try:
while trace_obj.tb_next is not None:
trace_obj = trace_obj.tb_next
exc_type = pydevd_xml.make_valid_xml_value(str(exc_type)).replace('\t', ' ') or 'exception: type unknown'
exc_desc = pydevd_xml.make_valid_xml_value(str(exc_desc)).replace('\t', ' ') or 'exception: no description'
payload = str(curr_frame_id) + '\t' + exc_type + "\t" + exc_desc + "\t" + \
self.make_thread_suspend_str(thread_id, trace_obj.tb_frame, CMD_SEND_CURR_EXCEPTION_TRACE, '')
return NetCommand(CMD_SEND_CURR_EXCEPTION_TRACE, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_send_curr_exception_trace_proceeded_message(self, seq, thread_id):
try:
return NetCommand(CMD_SEND_CURR_EXCEPTION_TRACE_PROCEEDED, 0, str(thread_id))
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_send_console_message(self, seq, payload):
try:
return NetCommand(CMD_EVALUATE_CONSOLE_EXPRESSION, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_custom_operation_message(self, seq, payload):
try:
return NetCommand(CMD_RUN_CUSTOM_OPERATION, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_load_source_message(self, seq, source, dbg=None):
try:
net = NetCommand(CMD_LOAD_SOURCE, seq, '%s' % source)
except:
net = self.make_error_message(0, get_exception_traceback_str())
if dbg:
dbg.writer.add_command(net)
return net
def make_show_console_message(self, thread_id, frame):
try:
return NetCommand(CMD_SHOW_CONSOLE, 0, self.make_thread_suspend_str(thread_id, frame, CMD_SHOW_CONSOLE, ''))
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_input_requested_message(self, started):
try:
return NetCommand(CMD_INPUT_REQUESTED, 0, started)
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_exit_message(self):
try:
net = NetCommand(CMD_EXIT, 0, '')
except:
net = self.make_error_message(0, get_exception_traceback_str())
return net
INTERNAL_TERMINATE_THREAD = 1
INTERNAL_SUSPEND_THREAD = 2
#=======================================================================================================================
# InternalThreadCommand
#=======================================================================================================================
class InternalThreadCommand:
""" internal commands are generated/executed by the debugger.
The reason for their existence is that some commands have to be executed
on specific threads. These are the InternalThreadCommands that get
get posted to PyDB.cmdQueue.
"""
def can_be_executed_by(self, thread_id):
'''By default, it must be in the same thread to be executed
'''
return self.thread_id == thread_id or self.thread_id.endswith('|' + thread_id)
def do_it(self, dbg):
raise NotImplementedError("you have to override do_it")
class ReloadCodeCommand(InternalThreadCommand):
def __init__(self, module_name, thread_id):
self.thread_id = thread_id
self.module_name = module_name
self.executed = False
self.lock = thread.allocate_lock()
def can_be_executed_by(self, thread_id):
if self.thread_id == '*':
return True #Any thread can execute it!
return InternalThreadCommand.can_be_executed_by(self, thread_id)
def do_it(self, dbg):
self.lock.acquire()
try:
if self.executed:
return
self.executed = True
finally:
self.lock.release()
module_name = self.module_name
if not dict_contains(sys.modules, module_name):
if '.' in module_name:
new_module_name = module_name.split('.')[-1]
if dict_contains(sys.modules, new_module_name):
module_name = new_module_name
if not dict_contains(sys.modules, module_name):
sys.stderr.write('pydev debugger: Unable to find module to reload: "' + module_name + '".\n')
# Too much info...
# sys.stderr.write('pydev debugger: This usually means you are trying to reload the __main__ module (which cannot be reloaded).\n')
else:
sys.stderr.write('pydev debugger: Start reloading module: "' + module_name + '" ... \n')
from _pydevd_bundle import pydevd_reload
if pydevd_reload.xreload(sys.modules[module_name]):
sys.stderr.write('pydev debugger: reload finished\n')
else:
sys.stderr.write('pydev debugger: reload finished without applying any change\n')
#=======================================================================================================================
# InternalTerminateThread
#=======================================================================================================================
class InternalTerminateThread(InternalThreadCommand):
def __init__(self, thread_id):
self.thread_id = thread_id
def do_it(self, dbg):
pydevd_log(1, "killing ", str(self.thread_id))
cmd = dbg.cmd_factory.make_thread_killed_message(self.thread_id)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalRunThread
#=======================================================================================================================
class InternalRunThread(InternalThreadCommand):
def __init__(self, thread_id):
self.thread_id = thread_id
def do_it(self, dbg):
t = pydevd_find_thread_by_id(self.thread_id)
if t:
t.additional_info.pydev_step_cmd = -1
t.additional_info.pydev_step_stop = None
t.additional_info.pydev_state = STATE_RUN
#=======================================================================================================================
# InternalStepThread
#=======================================================================================================================
class InternalStepThread(InternalThreadCommand):
def __init__(self, thread_id, cmd_id):
self.thread_id = thread_id
self.cmd_id = cmd_id
def do_it(self, dbg):
t = pydevd_find_thread_by_id(self.thread_id)
if t:
t.additional_info.pydev_step_cmd = self.cmd_id
t.additional_info.pydev_state = STATE_RUN
#=======================================================================================================================
# InternalSetNextStatementThread
#=======================================================================================================================
class InternalSetNextStatementThread(InternalThreadCommand):
def __init__(self, thread_id, cmd_id, line, func_name):
self.thread_id = thread_id
self.cmd_id = cmd_id
self.line = line
if IS_PY2:
if isinstance(func_name, unicode):
# On cython with python 2.X it requires an str, not unicode (but on python 3.3 it should be a str, not bytes).
func_name = func_name.encode('utf-8')
self.func_name = func_name
def do_it(self, dbg):
t = pydevd_find_thread_by_id(self.thread_id)
if t:
t.additional_info.pydev_step_cmd = self.cmd_id
t.additional_info.pydev_next_line = int(self.line)
t.additional_info.pydev_func_name = self.func_name
t.additional_info.pydev_state = STATE_RUN
#=======================================================================================================================
# InternalGetVariable
#=======================================================================================================================
class InternalGetVariable(InternalThreadCommand):
""" gets the value of a variable """
def __init__(self, seq, thread_id, frame_id, scope, attrs):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.scope = scope
self.attributes = attrs
def do_it(self, dbg):
""" Converts request into python variable """
try:
xml = "<xml>"
_typeName, valDict = pydevd_vars.resolve_compound_variable(self.thread_id, self.frame_id, self.scope, self.attributes)
if valDict is None:
valDict = {}
keys = valDict.keys()
if _typeName != "OrderedDict" and not IS_PY36_OR_GREATER:
if hasattr(keys, 'sort'):
keys.sort(compare_object_attrs) #Python 3.0 does not have it
else:
if IS_PY3K:
keys = sorted(keys, key=cmp_to_key(compare_object_attrs)) #Jython 2.1 does not have it (and all must be compared as strings).
else:
keys = sorted(keys, cmp=compare_object_attrs) #Jython 2.1 does not have it (and all must be compared as strings).
for k in keys:
xml += pydevd_xml.var_to_xml(valDict[k], to_string(k))
xml += "</xml>"
cmd = dbg.cmd_factory.make_get_variable_message(self.sequence, xml)
dbg.writer.add_command(cmd)
except Exception:
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error resolving variables " + get_exception_traceback_str())
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalGetArray
#=======================================================================================================================
class InternalGetArray(InternalThreadCommand):
def __init__(self, seq, roffset, coffset, rows, cols, format, thread_id, frame_id, scope, attrs):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.scope = scope
self.name = attrs.split("\t")[-1]
self.attrs = attrs
self.roffset = int(roffset)
self.coffset = int(coffset)
self.rows = int(rows)
self.cols = int(cols)
self.format = format
def do_it(self, dbg):
try:
frame = pydevd_vars.find_frame(self.thread_id, self.frame_id)
var = pydevd_vars.eval_in_context(self.name, frame.f_globals, frame.f_locals)
xml = pydevd_vars.table_like_struct_to_xml(var, self.name, self.roffset, self.coffset, self.rows, self.cols, self.format )
cmd = dbg.cmd_factory.make_get_array_message(self.sequence, xml)
dbg.writer.add_command(cmd)
except:
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error resolving array: " + get_exception_traceback_str())
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalChangeVariable
#=======================================================================================================================
class InternalChangeVariable(InternalThreadCommand):
""" changes the value of a variable """
def __init__(self, seq, thread_id, frame_id, scope, attr, expression):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.scope = scope
self.attr = attr
self.expression = expression
def do_it(self, dbg):
""" Converts request into python variable """
try:
result = pydevd_vars.change_attr_expression(self.thread_id, self.frame_id, self.attr, self.expression, dbg)
xml = "<xml>"
xml += pydevd_xml.var_to_xml(result, "")
xml += "</xml>"
cmd = dbg.cmd_factory.make_variable_changed_message(self.sequence, xml)
dbg.writer.add_command(cmd)
except Exception:
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error changing variable attr:%s expression:%s traceback:%s" % (self.attr, self.expression, get_exception_traceback_str()))
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalGetFrame
#=======================================================================================================================
class InternalGetFrame(InternalThreadCommand):
""" gets the value of a variable """
def __init__(self, seq, thread_id, frame_id):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
def do_it(self, dbg):
""" Converts request into python variable """
try:
frame = pydevd_vars.find_frame(self.thread_id, self.frame_id)
if frame is not None:
hidden_ns = pydevconsole.get_ipython_hidden_vars()
xml = "<xml>"
xml += pydevd_xml.frame_vars_to_xml(frame.f_locals, hidden_ns)
del frame
xml += "</xml>"
cmd = dbg.cmd_factory.make_get_frame_message(self.sequence, xml)
dbg.writer.add_command(cmd)
else:
#pydevd_vars.dump_frames(self.thread_id)
#don't print this error: frame not found: means that the client is not synchronized (but that's ok)
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Frame not found: %s from thread: %s" % (self.frame_id, self.thread_id))
dbg.writer.add_command(cmd)
except:
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error resolving frame: %s from thread: %s" % (self.frame_id, self.thread_id))
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalEvaluateExpression
#=======================================================================================================================
class InternalEvaluateExpression(InternalThreadCommand):
""" gets the value of a variable """
def __init__(self, seq, thread_id, frame_id, expression, doExec, doTrim, temp_name):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.expression = expression
self.doExec = doExec
self.doTrim = doTrim
self.temp_name = temp_name
def do_it(self, dbg):
""" Converts request into python variable """
try:
result = pydevd_vars.evaluate_expression(self.thread_id, self.frame_id, self.expression, self.doExec)
if self.temp_name != "":
pydevd_vars.change_attr_expression(self.thread_id, self.frame_id, self.temp_name, self.expression, dbg, result)
xml = "<xml>"
xml += pydevd_xml.var_to_xml(result, self.expression, self.doTrim)
xml += "</xml>"
cmd = dbg.cmd_factory.make_evaluate_expression_message(self.sequence, xml)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error evaluating expression " + exc)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalGetCompletions
#=======================================================================================================================
class InternalGetCompletions(InternalThreadCommand):
""" Gets the completions in a given scope """
def __init__(self, seq, thread_id, frame_id, act_tok):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.act_tok = act_tok
def do_it(self, dbg):
""" Converts request into completions """
try:
remove_path = None
try:
frame = pydevd_vars.find_frame(self.thread_id, self.frame_id)
if frame is not None:
msg = _pydev_completer.generate_completions_as_xml(frame, self.act_tok)
cmd = dbg.cmd_factory.make_get_completions_message(self.sequence, msg)
dbg.writer.add_command(cmd)
else:
cmd = dbg.cmd_factory.make_error_message(self.sequence, "InternalGetCompletions: Frame not found: %s from thread: %s" % (self.frame_id, self.thread_id))
dbg.writer.add_command(cmd)
finally:
if remove_path is not None:
sys.path.remove(remove_path)
except:
exc = get_exception_traceback_str()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error evaluating expression " + exc)
dbg.writer.add_command(cmd)
# =======================================================================================================================
# InternalGetDescription
# =======================================================================================================================
class InternalGetDescription(InternalThreadCommand):
""" Fetch the variable description stub from the debug console
"""
def __init__(self, seq, thread_id, frame_id, expression):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.expression = expression
def do_it(self, dbg):
""" Get completions and write back to the client
"""
try:
frame = pydevd_vars.find_frame(self.thread_id, self.frame_id)
description = pydevd_console.get_description(frame, self.thread_id, self.frame_id, self.expression)
description = pydevd_xml.make_valid_xml_value(quote(description, '/>_= \t'))
description_xml = '<xml><var name="" type="" value="%s"/></xml>' % description
cmd = dbg.cmd_factory.make_get_description_message(self.sequence, description_xml)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error in fetching description" + exc)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalGetBreakpointException
#=======================================================================================================================
class InternalGetBreakpointException(InternalThreadCommand):
""" Send details of exception raised while evaluating conditional breakpoint """
def __init__(self, thread_id, exc_type, stacktrace):
self.sequence = 0
self.thread_id = thread_id
self.stacktrace = stacktrace
self.exc_type = exc_type
def do_it(self, dbg):
try:
callstack = "<xml>"
makeValid = pydevd_xml.make_valid_xml_value
for filename, line, methodname, methodobj in self.stacktrace:
if file_system_encoding.lower() != "utf-8" and hasattr(filename, "decode"):
# filename is a byte string encoded using the file system encoding
# convert it to utf8
filename = filename.decode(file_system_encoding).encode("utf-8")
callstack += '<frame thread_id = "%s" file="%s" line="%s" name="%s" obj="%s" />' \
% (self.thread_id, makeValid(filename), line, makeValid(methodname), makeValid(methodobj))
callstack += "</xml>"
cmd = dbg.cmd_factory.make_send_breakpoint_exception_message(self.sequence, self.exc_type + "\t" + callstack)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error Sending Exception: " + exc)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalSendCurrExceptionTrace
#=======================================================================================================================
class InternalSendCurrExceptionTrace(InternalThreadCommand):
""" Send details of the exception that was caught and where we've broken in.
"""
def __init__(self, thread_id, arg, curr_frame_id):
'''
:param arg: exception type, description, traceback object
'''
self.sequence = 0
self.thread_id = thread_id
self.curr_frame_id = curr_frame_id
self.arg = arg
def do_it(self, dbg):
try:
cmd = dbg.cmd_factory.make_send_curr_exception_trace_message(self.sequence, self.thread_id, self.curr_frame_id, *self.arg)
del self.arg
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error Sending Current Exception Trace: " + exc)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalSendCurrExceptionTraceProceeded
#=======================================================================================================================
class InternalSendCurrExceptionTraceProceeded(InternalThreadCommand):
""" Send details of the exception that was caught and where we've broken in.
"""
def __init__(self, thread_id):
self.sequence = 0
self.thread_id = thread_id
def do_it(self, dbg):
try:
cmd = dbg.cmd_factory.make_send_curr_exception_trace_proceeded_message(self.sequence, self.thread_id)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error Sending Current Exception Trace Proceeded: " + exc)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalEvaluateConsoleExpression
#=======================================================================================================================
class InternalEvaluateConsoleExpression(InternalThreadCommand):
""" Execute the given command in the debug console """
def __init__(self, seq, thread_id, frame_id, line, buffer_output=True):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.line = line
self.buffer_output = buffer_output
def do_it(self, dbg):
""" Create an XML for console output, error and more (true/false)
<xml>
<output message=output_message></output>
<error message=error_message></error>
<more>true/false</more>
</xml>
"""
try:
frame = pydevd_vars.find_frame(self.thread_id, self.frame_id)
if frame is not None:
console_message = pydevd_console.execute_console_command(
frame, self.thread_id, self.frame_id, self.line, self.buffer_output)
cmd = dbg.cmd_factory.make_send_console_message(self.sequence, console_message.to_xml())
else:
from _pydevd_bundle.pydevd_console import ConsoleMessage
console_message = ConsoleMessage()
console_message.add_console_message(
pydevd_console.CONSOLE_ERROR,
"Select the valid frame in the debug view (thread: %s, frame: %s invalid)" % (self.thread_id, self.frame_id),
)
cmd = dbg.cmd_factory.make_error_message(self.sequence, console_message.to_xml())
except:
exc = get_exception_traceback_str()
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error evaluating expression " + exc)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalRunCustomOperation
#=======================================================================================================================
class InternalRunCustomOperation(InternalThreadCommand):
""" Run a custom command on an expression
"""
def __init__(self, seq, thread_id, frame_id, scope, attrs, style, encoded_code_or_file, fnname):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.scope = scope
self.attrs = attrs
self.style = style
self.code_or_file = unquote_plus(encoded_code_or_file)
self.fnname = fnname
def do_it(self, dbg):
try:
res = pydevd_vars.custom_operation(self.thread_id, self.frame_id, self.scope, self.attrs,
self.style, self.code_or_file, self.fnname)
resEncoded = quote_plus(res)
cmd = dbg.cmd_factory.make_custom_operation_message(self.sequence, resEncoded)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error in running custom operation" + exc)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalConsoleGetCompletions
#=======================================================================================================================
class InternalConsoleGetCompletions(InternalThreadCommand):
""" Fetch the completions in the debug console
"""
def __init__(self, seq, thread_id, frame_id, act_tok):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.act_tok = act_tok
def do_it(self, dbg):
""" Get completions and write back to the client
"""
try:
frame = pydevd_vars.find_frame(self.thread_id, self.frame_id)
completions_xml = pydevd_console.get_completions(frame, self.act_tok)
cmd = dbg.cmd_factory.make_send_console_message(self.sequence, completions_xml)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error in fetching completions" + exc)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalConsoleExec
#=======================================================================================================================
class InternalConsoleExec(InternalThreadCommand):
""" gets the value of a variable """
def __init__(self, seq, thread_id, frame_id, expression):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.expression = expression
def do_it(self, dbg):
""" Converts request into python variable """
try:
try:
#don't trace new threads created by console command
disable_trace_thread_modules()
result = pydevconsole.console_exec(self.thread_id, self.frame_id, self.expression, dbg)
xml = "<xml>"
xml += pydevd_xml.var_to_xml(result, "")
xml += "</xml>"
cmd = dbg.cmd_factory.make_evaluate_expression_message(self.sequence, xml)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error evaluating console expression " + exc)
dbg.writer.add_command(cmd)
finally:
enable_trace_thread_modules()
sys.stderr.flush()
sys.stdout.flush()
#=======================================================================================================================
# pydevd_find_thread_by_id
#=======================================================================================================================
def pydevd_find_thread_by_id(thread_id):
try:
# there was a deadlock here when I did not remove the tracing function when thread was dead
threads = threading.enumerate()
for i in threads:
tid = get_thread_id(i)
if thread_id == tid or thread_id.endswith('|' + tid):
return i
sys.stderr.write("Could not find thread %s\n" % thread_id)
sys.stderr.write("Available: %s\n" % [get_thread_id(t) for t in threads])
sys.stderr.flush()
except:
traceback.print_exc()
return None
|
apache-2.0
| 896,342,124,411,002,400
| 40.837689
| 191
| 0.513513
| false
| 4.328755
| false
| false
| false
|
suutari-ai/shoop
|
shuup/core/api/users.py
|
3
|
1754
|
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2017, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
from django.contrib.auth import get_user_model
from django.utils.translation import ugettext_lazy as _
from django_filters.rest_framework import DjangoFilterBackend, FilterSet
from rest_framework.serializers import ModelSerializer
from rest_framework.viewsets import ModelViewSet
from shuup.api.mixins import PermissionHelperMixin
class UserSerializer(ModelSerializer):
class Meta:
fields = "__all__"
model = get_user_model()
fields = "__all__"
class UserFilter(FilterSet):
class Meta:
model = get_user_model()
fields = ['email']
class UserViewSet(PermissionHelperMixin, ModelViewSet):
"""
retrieve: Fetches a user by its ID.
list: Lists all users.
delete: Deletes an user.
If the object is related to another one and the relationship is protected, an error will be returned.
create: Creates a new user.
update: Fully updates an existing user.
You must specify all parameters to make it possible to overwrite all attributes.
partial_update: Updates an existing user.
You can update only a set of attributes.
"""
queryset = get_user_model().objects.all()
serializer_class = UserSerializer
filter_backends = (DjangoFilterBackend,)
filter_class = UserFilter
def get_view_name(self):
return _("Users")
@classmethod
def get_help_text(cls):
return _("Users can be listed, fetched, created, updated and deleted.")
|
agpl-3.0
| 8,015,017,041,828,015,000
| 27.754098
| 105
| 0.706956
| false
| 4.146572
| false
| false
| false
|
m0mik/gr-osmosdr
|
grc/gen_osmosdr_blocks.py
|
1
|
11905
|
"""
Copyright 2012 Free Software Foundation, Inc.
This file is part of GNU Radio
GNU Radio Companion is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
GNU Radio Companion is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
MAIN_TMPL = """\
<?xml version="1.0"?>
<block>
<name>$(title) $sourk.title()</name>
<key>$(prefix)_$(sourk)</key>
<category>$($sourk.title())s</category>
<throttle>1</throttle>
<import>import osmosdr</import>
<make>osmosdr.$(sourk)( args="numchan=" + str(\$nchan) + " " + \$args )
self.\$(id).set_sample_rate(\$sample_rate)
#for $n in range($max_nchan)
\#if \$nchan() > $n
self.\$(id).set_center_freq(\$freq$(n), $n)
self.\$(id).set_freq_corr(\$corr$(n), $n)
#if $sourk == 'source':
self.\$(id).set_dc_offset_mode(\$dc_offset_mode$(n), $n)
self.\$(id).set_iq_balance_mode(\$iq_balance_mode$(n), $n)
self.\$(id).set_gain_mode(\$gain_mode$(n), $n)
#end if
self.\$(id).set_gain(\$gain$(n), $n)
self.\$(id).set_if_gain(\$if_gain$(n), $n)
self.\$(id).set_bb_gain(\$bb_gain$(n), $n)
self.\$(id).set_antenna(\$ant$(n), $n)
self.\$(id).set_bandwidth(\$bw$(n), $n)
\#end if
#end for
</make>
<callback>set_sample_rate(\$sample_rate)</callback>
#for $n in range($max_nchan)
<callback>set_center_freq(\$freq$(n), $n)</callback>
<callback>set_freq_corr(\$corr$(n), $n)</callback>
#if $sourk == 'source':
<callback>set_dc_offset_mode(\$dc_offset_mode$(n), $n)</callback>
<callback>set_iq_balance_mode(\$iq_balance_mode$(n), $n)</callback>
<callback>set_gain_mode(\$gain_mode$(n), $n)</callback>
#end if
<callback>set_gain(\$gain$(n), $n)</callback>
<callback>set_if_gain(\$if_gain$(n), $n)</callback>
<callback>set_bb_gain(\$bb_gain$(n), $n)</callback>
<callback>set_antenna(\$ant$(n), $n)</callback>
<callback>set_bandwidth(\$bw$(n), $n)</callback>
#end for
<param>
<name>$(dir.title())put Type</name>
<key>type</key>
<type>enum</type>
<option>
<name>Complex float32</name>
<key>fc32</key>
<opt>type:fc32</opt>
</option>
</param>
<param>
<name>Device Arguments</name>
<key>args</key>
<value></value>
<type>string</type>
<hide>
\#if \$args()
none
\#else
part
\#end if
</hide>
</param>
<param>
<name>Num Channels</name>
<key>nchan</key>
<value>1</value>
<type>int</type>
#for $n in range(1, $max_nchan+1)
<option>
<name>$(n)</name>
<key>$n</key>
</option>
#end for
</param>
<param>
<name>Sample Rate (sps)</name>
<key>sample_rate</key>
<value>samp_rate</value>
<type>real</type>
</param>
$params
<check>$max_nchan >= \$nchan</check>
<check>\$nchan > 0</check>
<$sourk>
<name>$dir</name>
<type>\$type.type</type>
<nports>\$nchan</nports>
</$sourk>
<doc>
The osmocom $sourk block:
While primarily being developed for the OsmoSDR hardware, this block as well supports:
#if $sourk == 'source':
* FUNcube Dongle through libgnuradio-fcd
* FUNcube Dongle Pro+ through gr-fcdproplus
* sysmocom OsmoSDR Devices through libosmosdr
* RTL2832U based DVB-T dongles through librtlsdr
* RTL-TCP spectrum server (see librtlsdr project)
* MSi2500 based DVB-T dongles through libmirisdr
* gnuradio .cfile input through libgnuradio-blocks
* RFSPACE SDR-IQ, SDR-IP, NetSDR (incl. X2 option)
* AirSpy Wideband Receiver through libairspy
#end if
* Great Scott Gadgets HackRF through libhackrf
* Nuand LLC bladeRF through libbladeRF library
* Ettus USRP Devices through Ettus UHD library
* Fairwaves UmTRX through Fairwaves' fork of UHD
By using the osmocom $sourk block you can take advantage of a common software api in your application(s) independent of the underlying radio hardware.
Output Type:
This parameter controls the data type of the stream in gnuradio. Only complex float32 samples are supported at the moment.
Device Arguments:
The device argument is a comma delimited string used to locate devices on your system. Device arguments for multiple devices may be given by separating them with a space.
Use the device id or name/serial (if applicable) to specify a certain device or list of devices. If left blank, the first device found will be used.
Examples:
Optional arguments are placed into [] brackets, remove the brackets before using them! Specific variable values are separated with a |, choose one of them. Variable values containing spaces shall be enclosed in '' as demonstrated in examples section below.
Lines ending with ... mean it's possible to bind devices together by specifying multiple device arguments separated with a space.
#if $sourk == 'source':
fcd=0[,device=hw:2][,type=2]
miri=0[,buffers=32] ...
rtl=serial_number ...
rtl=0[,rtl_xtal=28.8e6][,tuner_xtal=28.8e6] ...
rtl=1[,buffers=32][,buflen=N*512] ...
rtl=2[,direct_samp=0|1|2][,offset_tune=0|1] ...
rtl_tcp=127.0.0.1:1234[,psize=16384][,direct_samp=0|1|2][,offset_tune=0|1] ...
osmosdr=0[,buffers=32][,buflen=N*512] ...
file='/path/to/your file',rate=1e6[,freq=100e6][,repeat=true][,throttle=true] ...
netsdr=127.0.0.1[:50000][,nchan=2]
sdr-ip=127.0.0.1[:50000]
sdr-iq=/dev/ttyUSB0
airspy=0
#end if
hackrf=0[,buffers=32]
bladerf=0[,fpga='/path/to/the/bitstream.rbf']
uhd[,serial=...][,lo_offset=0][,mcr=52e6][,nchan=2][,subdev='\\\\'B:0 A:0\\\\''] ...
Num Channels:
Selects the total number of channels in this multi-device configuration. Required when specifying multiple device arguments.
Sample Rate:
The sample rate is the number of samples per second output by this block on each channel.
Frequency:
The center frequency is the frequency the RF chain is tuned to.
Freq. Corr.:
The frequency correction factor in parts per million (ppm). Set to 0 if unknown.
#if $sourk == 'source':
DC Offset Mode:
Controls the behavior of hardware DC offset corrrection.
Off: Disable correction algorithm (pass through).
Manual: Keep last estimated correction when switched from Automatic to Manual.
Automatic: Periodicallly find the best solution to compensate for DC offset.
This functionality is available for USRP devices only.
IQ Balance Mode:
Controls the behavior of software IQ imbalance corrrection.
Off: Disable correction algorithm (pass through).
Manual: Keep last estimated correction when switched from Automatic to Manual.
Automatic: Periodicallly find the best solution to compensate for image signals.
This functionality depends on http://cgit.osmocom.org/cgit/gr-iqbal/
Gain Mode:
Chooses between the manual (default) and automatic gain mode where appropriate.
To allow manual control of RF/IF/BB gain stages, manual gain mode must be configured.
Currently, only RTL-SDR devices support automatic gain mode.
#end if
RF Gain:
Overall RF gain of the device.
IF Gain:
Overall intermediate frequency gain of the device.
This setting is available for RTL-SDR and OsmoSDR devices with E4000 tuners and HackRF Jawbreaker in receive and transmit mode. Observations lead to a reasonable gain range from 15 to 30dB.
BB Gain:
Overall baseband gain of the device.
This setting is available for HackRF Jawbreaker in receive mode. Observations lead to a reasonable gain range from 15 to 30dB.
Antenna:
For devices with only one antenna, this may be left blank.
Otherwise, the user should specify one of the possible antenna choices.
Bandwidth:
Set the bandpass filter on the radio frontend. To use the default (automatic) bandwidth filter setting, this should be zero.
See the OsmoSDR project page for more detailed documentation:
http://sdr.osmocom.org/trac/wiki/GrOsmoSDR
http://sdr.osmocom.org/trac/wiki/rtl-sdr
http://sdr.osmocom.org/trac/
</doc>
</block>
"""
PARAMS_TMPL = """
<param>
<name>Ch$(n): Frequency (Hz)</name>
<key>freq$(n)</key>
<value>100e6</value>
<type>real</type>
<hide>\#if \$nchan() > $n then 'none' else 'all'#</hide>
</param>
<param>
<name>Ch$(n): Freq. Corr. (ppm)</name>
<key>corr$(n)</key>
<value>0</value>
<type>real</type>
<hide>\#if \$nchan() > $n then 'none' else 'all'#</hide>
</param>
#if $sourk == 'source':
<param>
<name>Ch$(n): DC Offset Mode</name>
<key>dc_offset_mode$(n)</key>
<value>0</value>
<type>int</type>
<hide>\#if \$nchan() > $n then 'none' else 'all'#</hide>
<option>
<name>Off</name>
<key>0</key>
</option>
<option>
<name>Manual</name>
<key>1</key>
</option>
<option>
<name>Automatic</name>
<key>2</key>
</option>
</param>
<param>
<name>Ch$(n): IQ Balance Mode</name>
<key>iq_balance_mode$(n)</key>
<value>0</value>
<type>int</type>
<hide>\#if \$nchan() > $n then 'none' else 'all'#</hide>
<option>
<name>Off</name>
<key>0</key>
</option>
<option>
<name>Manual</name>
<key>1</key>
</option>
<option>
<name>Automatic</name>
<key>2</key>
</option>
</param>
<param>
<name>Ch$(n): Gain Mode</name>
<key>gain_mode$(n)</key>
<value>0</value>
<type>int</type>
<hide>\#if \$nchan() > $n then 'none' else 'all'#</hide>
<option>
<name>Manual</name>
<key>0</key>
</option>
<option>
<name>Automatic</name>
<key>1</key>
</option>
</param>
#end if
<param>
<name>Ch$(n): RF Gain (dB)</name>
<key>gain$(n)</key>
<value>10</value>
<type>real</type>
<hide>\#if \$nchan() > $n then 'none' else 'all'#</hide>
</param>
<param>
<name>Ch$(n): IF Gain (dB)</name>
<key>if_gain$(n)</key>
<value>20</value>
<type>real</type>
<hide>\#if \$nchan() > $n then 'none' else 'all'#</hide>
</param>
<param>
<name>Ch$(n): BB Gain (dB)</name>
<key>bb_gain$(n)</key>
<value>20</value>
<type>real</type>
<hide>\#if \$nchan() > $n then 'none' else 'all'#</hide>
</param>
<param>
<name>Ch$(n): Antenna</name>
<key>ant$(n)</key>
<value></value>
<type>string</type>
<hide>
\#if not \$nchan() > $n
all
\#elif \$ant$(n)()
none
\#else
part
\#end if
</hide>
</param>
<param>
<name>Ch$(n): Bandwidth (Hz)</name>
<key>bw$(n)</key>
<value>0</value>
<type>real</type>
<hide>
\#if not \$nchan() > $n
all
\#elif \$bw$(n)()
none
\#else
part
\#end if
</hide>
</param>
"""
def parse_tmpl(_tmpl, **kwargs):
from Cheetah import Template
return str(Template.Template(_tmpl, kwargs))
max_num_channels = 5
import os.path
if __name__ == '__main__':
import sys
for file in sys.argv[1:]:
head, tail = os.path.split(file)
if tail.startswith('rtlsdr'):
title = 'RTL-SDR'
prefix = 'rtlsdr'
elif tail.startswith('osmosdr'):
title = 'osmocom'
prefix = 'osmosdr'
else: raise Exception, 'file %s has wrong syntax!'%tail
if tail.endswith ('source.xml'):
sourk = 'source'
dir = 'out'
elif tail.endswith ('sink.xml'):
sourk = 'sink'
dir = 'in'
else: raise Exception, 'is %s a source or sink?'%file
params = ''.join([parse_tmpl(PARAMS_TMPL, n=n, sourk=sourk) for n in range(max_num_channels)])
open(file, 'w').write(parse_tmpl(MAIN_TMPL,
max_nchan=max_num_channels,
params=params,
title=title,
prefix=prefix,
sourk=sourk,
dir=dir,
))
|
gpl-3.0
| -936,261,014,248,224,000
| 29.762274
| 256
| 0.646031
| false
| 2.972534
| false
| false
| false
|
ptonini/Stratus
|
lib/classes.py
|
1
|
7366
|
from time import sleep
__author__ = 'ptonini'
import re
import os
import sys
import time
from mutagen.mp3 import MP3
from mutagen.easyid3 import EasyID3
class Tracks:
def __init__(self, source):
if isinstance(source, dict):
self.__dict__.update(source)
elif isinstance(source, list):
full_filename = source[0] + source[1]
try:
os.path.isfile(full_filename)
audio = MP3(full_filename)
tag = EasyID3(full_filename)
except Exception:
print 'Invalid file', full_filename
else:
self.path = source[0]
self.filename = source[1]
self.full_filename = full_filename
self.timestamp = int(os.path.getmtime(self.full_filename))
self.length = audio.info.length
if 'genre' in tag:
self.genre = tag['genre'][0]
if 'artist' in tag:
self.artist = tag['artist'][0]
if 'performer' in tag:
self.album_artist = tag['performer'][0]
if 'album' in tag:
self.album = tag['album'][0]
if "date" in tag:
self.year = tag['date'][0]
if 'tracknumber' in tag:
self.track_num = tag['tracknumber'][0]
if 'title' in tag:
self.title = tag['title'][0]
if 'discnumber' in tag:
self.disc_num = tag['discnumber'][0]
else:
self.disc_num = "1"
def update_db(self, db):
if hasattr(self, '_id'):
db.tracks.update({'_id': self._id}, self.__dict__)
print 'Updated to DB:', self.filename
else:
track_count = db.tracks.find({'filename': self.filename}).count()
if track_count == 0:
db.tracks.insert(self.__dict__)
print 'Added to DB:', self.filename
elif track_count > 1:
print 'Error: duplicate tracks on database:', self.filename
def upload_to_gmusic(self, mm):
if not hasattr(self, 'gmusic_id'):
r = mm.upload(self.full_filename, enable_matching=True)
if not r[0] == {}:
self.gmusic_id = r[0][self.full_filename]
print 'Uploaded:', self.filename
elif not r[1] == {}:
self.gmusic_id = r[1][self.full_filename]
print 'Matched: ', self.filename
elif not r[2] == {}:
if 'TrackSampleResponse code 4' in r[2][self.full_filename]:
self.gmusic_id = re.search("\((.*)\)", str(r[2][self.full_filename])).group(1)
print 'Exists: ', self.filename
else:
print 'Error: could no upload or match', self.filename
class Playlists:
def __init__(self, source, db=None, playlists_home=None):
if isinstance(source, dict):
if 'id' in source:
self.full_filename = playlists_home + '/' + source['name'].encode('utf-8') + '.m3u'
self.name = source['name']
self.timestamp = int(int(source['lastModifiedTimestamp'])/1000000)
self.tracks = list()
print self.name
for track in source['tracks']:
self.tracks.append(db.tracks.find_one({'gmusic_id': track['trackId']})['_id'])
self.gmusic_id = source['id']
else:
self.__dict__.update(source)
elif isinstance(source, list):
self.full_filename = os.path.join(source[0], source[1])
self.name = source[1][:-4]
self.timestamp = int(os.path.getmtime(self.full_filename))
with open(self.full_filename, 'r+') as file:
self.tracks = list()
for line in file.readlines():
if line != '\n':
self.tracks.append(db.tracks.find_one({'filename': line[:-1]})['_id'])
def update_db(self, db):
if hasattr(self, '_id'):
print 'Updating playlist "' + self.name + '" on database'
self.__find_one_and_update_db(db, {'_id': self._id})
else:
count = db.playlists.find({'name': self.name}).count()
if count == 0:
print 'Adding playlist "' + self.name + '" to database.'
db.playlists.insert(self.__dict__)
elif count == 1:
print 'Updating playlist "' + self.name + '" on database'
self.__find_one_and_update_db(db, {'name': self.name})
else:
print 'Error: duplicate playlists on database:', self.name
def update_gmusic(self, db, mc, gm_playlists):
if hasattr(self, 'gmusic_id'):
for gm_playlist in gm_playlists:
if self.gmusic_id == gm_playlist['id']:
self.__find_most_recent_and_update_gmusic(db, mc, gm_playlist)
matched_gmusic_id = True
break
if not matched_gmusic_id:
print 'Error - could not match gmusic_id:', self.name
else:
matched_lists = list()
for gm_playlist in gm_playlists:
if self.name == gm_playlist['name']:
matched_lists.append(gm_playlist)
if len(matched_lists) == 0:
self.gmusic_id = mc.create_playlist(self.name)
self.__build_list_and_update_gmusic(db, mc)
elif len(matched_lists) == 1:
self.gmusic_id = matched_lists[0]['id']
self.__find_most_recent_and_update_gmusic(db, mc, matched_lists[0])
else:
print 'Error - duplicate playlists on gmusic:', matched_lists[0]['name']
def __find_one_and_update_db(self, db, criteria):
playlist = db.playlists.find_one(criteria)
if self.timestamp < playlist['timestamp']:
self.tracks = playlist['tracks']
db.playlists.update(criteria, self.__dict__)
def __build_list_and_update_gmusic(self, db, mc):
new_list = list()
for track_id in self.tracks:
new_list.append(db.tracks.find_one({'_id': track_id})['gmusic_id'])
try:
mc.add_songs_to_playlist(self.gmusic_id, new_list)
except:
print 'Error'
sys.exit(1)
def __find_most_recent_and_update_gmusic(self, db, mc, gm_playlist):
gm_timestamp = int(gm_playlist['lastModifiedTimestamp'])/1000000
if self.timestamp > gm_timestamp:
old_list = list()
for entry in gm_playlist['tracks']:
old_list.append(entry['id'])
print 'Updating playlist "' + self.name + '"',
mc.remove_entries_from_playlist(old_list)
time.sleep(len(old_list)/90 )
self.__build_list_and_update_gmusic(db, mc)
print ' finished'
else:
self.timestamp = gm_timestamp
track_list = list()
for track in gm_playlist['tracks']:
track_list.append(db.tracks.find_one({'gmusic_id': track['trackId']})['_id'])
self.tracks = track_list
|
unlicense
| -336,839,685,327,072,450
| 40.150838
| 99
| 0.509503
| false
| 3.994577
| false
| false
| false
|
2gis/vmmaster
|
vmpool/app.py
|
1
|
1295
|
# coding: utf-8
import logging
from flask import Flask
from core.config import config
from core.utils import JSONEncoder
log = logging.getLogger(__name__)
class Provider(Flask):
def __init__(self, *args, **kwargs):
from core.db import Database
from core.sessions import Sessions
from vmpool.virtual_machines_pool import VirtualMachinesPool
super(Provider, self).__init__(*args, **kwargs)
self.running = True
self.json_encoder = JSONEncoder
self.database = Database()
self.sessions = Sessions(self.database, self.app_context)
self.pool = VirtualMachinesPool(app=self, name=config.PROVIDER_NAME)
self.pool.start_workers()
def cleanup(self):
try:
log.info("Cleanup...")
self.pool.stop_workers()
log.info("Cleanup was done")
except:
log.exception("Cleanup was finished with errors")
def stop(self):
self.running = False
def register_blueprints(app):
from vmpool.api import api
app.register_blueprint(api, url_prefix='/api')
def create_app():
if config is None:
raise Exception("Need to setup config.py in application directory")
app = Provider(__name__)
register_blueprints(app)
return app
|
mit
| -5,654,950,707,962,474,000
| 25.428571
| 76
| 0.644015
| false
| 4.098101
| false
| false
| false
|
bitforks/drawbot
|
drawBot/ui/codeEditor.py
|
1
|
49280
|
import AppKit
import objc
from keyword import kwlist
import re
from pygments.lexers import PythonLexer, get_lexer_by_name
from pygments.token import *
from pygments.style import Style
from pygments.styles.default import DefaultStyle
try:
import jedi
hasJedi = True
except:
hasJedi = False
from vanilla import *
from lineNumberRulerView import NSLineNumberRuler
from drawBot.misc import getDefault, getFontDefault, getColorDefault, DrawBotError
from drawBot.drawBotDrawingTools import _drawBotDrawingTool
variableChars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_"
fallbackTextColor = AppKit.NSColor.blackColor()
fallbackBackgroundColor = AppKit.NSColor.whiteColor()
fallbackHightLightColor = AppKit.NSColor.selectedTextBackgroundColor()
fallbackFont = AppKit.NSFont.fontWithName_size_("Menlo", 10)
if not fallbackFont:
fallbackFont = AppKit.NSFont.fontWithName_size_("Monaco", 10)
basicLineHeightMultiple = 1.2
basicParagraph = AppKit.NSMutableParagraphStyle.alloc().init()
basicParagraph.setDefaultTabInterval_(28.0)
basicParagraph.setTabStops_(AppKit.NSArray.array())
basicParagraph.setLineHeightMultiple_(basicLineHeightMultiple)
fallbackTypeAttributes = {
AppKit.NSFontAttributeName: fallbackFont,
AppKit.NSLigatureAttributeName: 0,
AppKit.NSParagraphStyleAttributeName: basicParagraph
}
fallbackTracebackAttributes = dict(fallbackTypeAttributes)
fallbackTracebackAttributes[AppKit.NSForegroundColorAttributeName] = AppKit.NSColor.redColor()
fallbackStyles = [
(Token, '#000000'),
(Text, ''),
(Error, '#FF0000'),
(Punctuation, '#4C4C4C'),
(Keyword, '#4978FC'),
(Keyword.Namespace, '#1950FD'),
(Number, '#CC5858'),
(Number.Float, ''),
(Number.Oct, ''),
(Number.Hex, ''),
(Name, ''),
(Name.Tag, '#fb660a'),
(Name.Variable, '#fb660a'),
(Name.Attribute, '#ff0086'),
(Name.Function, '#ff0086'),
(Name.Class, '#ff0086'),
(Name.Constant, '#0086d2'),
(Name.Namespace, ''),
(Name.Builtin, '#31A73E'),
(Name.Builtin.Pseudo, '#FF8700'),
(Name.Exception, '#FF1400'),
(Name.Decorator, ''),
(Operator, '#6D37C9'),
(Operator.Word, '#6D37C9'),
(Comment, '#A3A3A3'),
(String, '#FC00E7'),
(String.Doc, '#FC00E7'),
]
fallbackStyleDict = {}
for key, value in fallbackStyles:
fallbackStyleDict[str(key)] = value
def styleFromDefault():
styles = dict()
tokens = getDefault("PyDETokenColors", fallbackStyleDict)
for key, value in tokens.items():
token = string_to_tokentype(key)
if value and not value.startswith("#"):
value = "#%s" % value
styles[token] = value
style = type('DrawBotStyle', (Style,), dict(styles=styles))
style.background_color = _NSColorToHexString(getColorDefault("PyDEBackgroundColor", fallbackBackgroundColor))
style.highlight_color = _NSColorToHexString(getColorDefault("PyDEHightLightColor", fallbackHightLightColor))
return style
def outputTextAttributesForStyles(styles=None, isError=False):
if styles is None:
styles = styleFromDefault()
if isError:
style = styles.style_for_token(Error)
else:
style = styles.style_for_token(Token)
attr = _textAttributesForStyle(style)
for key in (AppKit.NSForegroundColorAttributeName, AppKit.NSUnderlineColorAttributeName):
if key in attr:
attr[key] = _hexToNSColor(attr[key])
return attr
class _JumpToLineSheet(object):
def __init__(self, callback, parentWindow):
self._callback = callback
self.w = Sheet((210, 80), parentWindow=parentWindow)
self.w.text = TextBox((15, 15, 200, 22), "Jump to line number:")
self.w.lineNumber = EditText((-55, 17, -15, 18), sizeStyle="small")
self.w.cancelButton = Button((-170, -30, -80, 20), "Cancel", callback=self.cancelCallback, sizeStyle="small")
self.w.cancelButton.bind(".", ["command"])
self.w.cancelButton.bind(unichr(27), [])
self.w.okButton = Button((-70, -30, -10, 20), "OK", callback=self.okCallback, sizeStyle="small")
self.w.setDefaultButton(self.w.okButton)
self.w.open()
def okCallback(self, sender):
value = self.w.lineNumber.get()
try:
value = int(value.strip())
except:
value = None
self._callback(value)
self.closeCallback(sender)
def cancelCallback(self, sender):
self._callback(None)
self.closeCallback(sender)
def closeCallback(self, sender):
self.w.close()
def _hexToNSColor(color, default=AppKit.NSColor.blackColor()):
if color is None:
return default
if len(color) != 6:
return default
r = int(color[0:2], 16) / 255.
g = int(color[2:4], 16) / 255.
b = int(color[4:6], 16) / 255.
return AppKit.NSColor.colorWithCalibratedRed_green_blue_alpha_(r, g, b, 1)
def _hexStringToNSColor(txt, default=AppKit.NSColor.blackColor()):
if not txt.startswith("#"):
raise DrawBotError("Not a hex color, should start with '#'")
return _hexToNSColor(txt[1:], default)
def _NSColorToHexString(color):
color = color.colorUsingColorSpaceName_(AppKit.NSCalibratedRGBColorSpace)
r = color.redComponent() * 255
g = color.greenComponent() * 255
b = color.blueComponent() * 255
return "#%02X%02X%02X" % (r, g, b)
def _reverseMap(sourceMap):
destMap = dict()
for key, item in sourceMap.items():
destMap[item] = key
return destMap
_textAttributesForStyleCache = {}
def _clearTextAttributesForStyleCache():
_textAttributesForStyleCache.clear()
def _textAttributesForStyle(style, font=None, token=None):
if font is None:
font = getFontDefault("PyDEFont", fallbackFont)
if token and token in _textAttributesForStyleCache:
return _textAttributesForStyleCache[token]
attr = {
AppKit.NSLigatureAttributeName: 0,
AppKit.NSParagraphStyleAttributeName: basicParagraph,
}
if style.get("italic", False) and style.get("bold", False):
fontManager = AppKit.NSFontManager.sharedFontManager()
boldItalic = fontManager.convertFont_toHaveTrait_(font, AppKit.NSBoldFontMask | AppKit.NSItalicFontMask)
if boldItalic is not None:
font = boldItalic
elif style.get("italic", False):
fontManager = AppKit.NSFontManager.sharedFontManager()
italic = fontManager.convertFont_toHaveTrait_(font, AppKit.NSItalicFontMask)
if italic is not None:
font = italic
elif style.get("bold", False):
fontManager = AppKit.NSFontManager.sharedFontManager()
bold = fontManager.convertFont_toHaveTrait_(font, AppKit.NSBoldFontMask)
if bold is not None:
font = bold
attr[AppKit.NSFontAttributeName] = font
if style.get("color", False):
attr[AppKit.NSForegroundColorAttributeName] = style["color"]
if style.get("bgcolor", False):
attr[AppKit.NSBackgroundColorAttributeName] = style["bgcolor"]
if style.get("underline", False):
attr[AppKit.NSUnderlineStyleAttributeName] = AppKit.NSUnderlineStyleSingle
if style["color"]:
attr[AppKit.NSUnderlineColorAttributeName] = style["color"]
if token:
_textAttributesForStyleCache[token] = attr
return attr
_multiLineRE = re.compile(
r"(\'\'\'|\"\"\"|/\*|<!--)"
r".*?"
r"(\'\'\'|\"\"\"|\*/|--!>)"
, re.DOTALL
)
_whiteSpaceRE = re.compile(r"[ \t]+")
def _findWhitespace(s, pos=0):
m = _whiteSpaceRE.match(s, pos)
if m is None:
return pos
return m.end()
def _pythonWordCompletions(text, charRange):
if not hasJedi:
return [], 0
partialString = text.substringWithRange_(charRange)
keyWords = list(_drawBotDrawingTool.__all__)
try:
lines = text[:charRange.location].count("\n") + 1
if len(text) == charRange.location:
columns = None
else:
columns = 0
if text:
while text[charRange.location-columns] != "\n":
columns += 1
script = jedi.api.Script(source=text, line=lines, column=columns)
keyWords += [c.word for c in script.complete()]
except:
pass
keyWords = [word for word in sorted(keyWords) if word.startswith(partialString)]
return keyWords, 0
languagesIDEBehavior = {
"Python": {
"openToCloseMap": {"(": ")", "[": "]", "{": "}", "<": ">"},
"indentWithEndOfLine": [":", "(", "[", "{"],
"comment": "#",
"keywords": kwlist,
"wordCompletions": _pythonWordCompletions,
"dropPathFormatting": 'u"%s"',
"dropPathsFormatting": '[%s]',
"dropPathsSeperator": ", "
},
}
downArrowSelectionDirection = 0
upArrowSelectionDirection = 1
class CodeNSTextView(AppKit.NSTextView):
jumpToLineWindowClass = _JumpToLineSheet
def init(self):
self = super(CodeNSTextView, self).init()
self._highlightStyle = DefaultStyle
self._languagesIDEBehavior = dict()
self._fallbackTextColor = fallbackTextColor
self._fallbackBackgroundColor = fallbackBackgroundColor
self._fallbackHightLightColor = fallbackHightLightColor
self._fallbackFont = fallbackFont
self.setTypingAttributes_(_textAttributesForStyle(dict(color=self._fallbackTextColor)))
self.setUsesFontPanel_(False)
self.setRichText_(False)
self.setAllowsUndo_(True)
try:
self.setUsesFindBar_(True)
except:
self.setUsesFindPanel_(True)
self._usesTabs = False
self._indentSize = 4
self._ignoreProcessEditing = False
self._lexer = None
self.highlightStyleMap = dict()
nc = AppKit.NSNotificationCenter.defaultCenter()
nc.addObserver_selector_name_object_(self, "userDefaultChanged:", "drawBotUserDefaultChanged", None)
self._arrowSelectionDirection = None
self._canDrag = False
self._liveCoding = False
return self
def __del__(self):
nc = AppKit.NSNotificationCenter.defaultCenter()
nc.removeObserver_(self)
def setLexer_(self, lexer):
if lexer is None:
raise "Cannot set a None type for lexer, must be a subclass of pygment Lexer."
self._lexer = lexer
if self.window():
self.resetHighLightSyntax()
def lexer(self):
return self._lexer
def setHighlightStyle_(self, style):
self._highlightStyle = style
self._buildhighlightStyleMap()
if self.window():
self.resetHighLightSyntax()
def highlightStyle(self):
return self._highlightStyle
def setLanguagesIDEBehavior_(self, languagesIDEBehavior):
self._languagesIDEBehavior = languagesIDEBehavior
def languagesIDEBehavior(self):
return self._languagesIDEBehavior
def languagesIDEBehaviorForLanguage_(self, language):
return self._languagesIDEBehavior.get(language)
def _buildhighlightStyleMap(self):
# cache all tokens with nscolors
styles = self.highlightStyle()
backgroundColor = _hexStringToNSColor(styles.background_color, self._fallbackBackgroundColor)
self.setBackgroundColor_(backgroundColor)
selectionColor = _hexStringToNSColor(styles.highlight_color, self._fallbackHightLightColor)
self.setSelectedTextAttributes_({AppKit.NSBackgroundColorAttributeName: selectionColor})
self.highlightStyleMap = dict()
for token, style in styles:
for key in "color", "bgcolor", "border":
style[key] = _hexToNSColor(style[key], None)
self.highlightStyleMap[token] = style
def setUsesTabs_(self, usesTabs):
oldIndent = self.indent()
self._usesTabs = usesTabs
newIndent = self.indent()
string = self.string()
string = string.replace(oldIndent, newIndent)
self.setString_(string)
def usesTabs(self):
return self._usesTabs
def setIndentSize_(self, size):
oldIndent = oldIndent = self.indent()
self._indentSize = size
newIndent = self.indent()
if not self.usesTabs():
string = self.string()
string = string.replace(oldIndent, newIndent)
self.setString_(string)
def indentSize(self):
return self._indentSize
def indent(self):
if self.usesTabs():
return "\t"
else:
return " " * self.indentSize()
# overwritting NSTextView methods
def setBackgroundColor_(self, color):
# invert the insertioin pointer color
# and the fallback text color and background color
color = color.colorUsingColorSpaceName_(AppKit.NSCalibratedRGBColorSpace)
r = color.redComponent()
g = color.greenComponent()
b = color.blueComponent()
s = sum([r, g, b]) / 3.
inverseColor = s < .6
if inverseColor:
self._fallbackBackgroundColor = AppKit.NSColor.blackColor()
self._fallbackTextColor = AppKit.NSColor.whiteColor()
self.setInsertionPointColor_(AppKit.NSColor.whiteColor())
else:
self._fallbackBackgroundColor = AppKit.NSColor.whiteColor()
self._fallbackTextColor = AppKit.NSColor.blackColor()
self.setInsertionPointColor_(AppKit.NSColor.blackColor())
if self.enclosingScrollView():
self.enclosingScrollView().setBackgroundColor_(color)
self._updateRulersColors()
super(CodeNSTextView, self).setBackgroundColor_(color)
def changeColor_(self, color):
# prevent external color overwrite,
pass
def changeAttributes_(self, attr):
# prevent external attributes overwrite
pass
def smartInsertDeleteEnabled(self):
return False
def isAutomaticTextReplacementEnabled(self):
return True
def isAutomaticQuoteSubstitutionEnabled(self):
return False
def isAutomaticLinkDetectionEnabled(self):
return False
def isAutomaticDataDetectionEnabled(self):
return False
def isAutomaticDashSubstitutionEnabled(self):
return False
def isAutomaticSpellingCorrectionEnabled(self):
return False
# hightlighting
def resetHighLightSyntax(self):
self._ignoreProcessEditing = True
self._highlightSyntax(0, self.string())
self._ignoreProcessEditing = False
def _highlightSyntax(self, location, text):
if self.lexer() is None:
return
font = getFontDefault("PyDEFont", self._fallbackFont)
length = len(self.string())
setAttrs = self.textStorage().setAttributes_range_
if text.endswith("\n"):
text = text[:-1]
# setAttrs = self.layoutManager().addTemporaryAttributes_forCharacterRange_
self.textStorage().beginEditing()
totLenValue = 0
for pos, token, value in self.lexer().get_tokens_unprocessed(text):
style = self.highlightStyleMap.get(token)
lenValue = len(value)
if location + pos + lenValue > length:
lenValue = length - (location + pos)
if lenValue > 0:
setAttrs(_textAttributesForStyle(style, font), (location + pos, lenValue))
totLenValue += lenValue
self.textStorage().fixFontAttributeInRange_((location, totLenValue))
self.textStorage().endEditing()
# key down
def keyDown_(self, event):
char = event.characters()
selectedRange = self.selectedRange()
if AppKit.NSEvent.modifierFlags() & AppKit.NSCommandKeyMask and selectedRange and char in (AppKit.NSUpArrowFunctionKey, AppKit.NSDownArrowFunctionKey, AppKit.NSLeftArrowFunctionKey, AppKit.NSRightArrowFunctionKey):
value = self._getSelectedValueForRange(selectedRange)
if value is not None:
altDown = AppKit.NSEvent.modifierFlags() & AppKit.NSAlternateKeyMask
shiftDown = AppKit.NSEvent.modifierFlags() & AppKit.NSShiftKeyMask
altDown = AppKit.NSEvent.modifierFlags() & AppKit.NSAlternateKeyMask
add = 1
if altDown:
add = .1
if char == AppKit.NSDownArrowFunctionKey:
add *= -1
elif char == AppKit.NSLeftArrowFunctionKey:
add *= -1
if shiftDown and altDown:
add /= 10
elif shiftDown:
add *= 10
if isinstance(value, tuple):
valueX, valueY = value
if char in [AppKit.NSUpArrowFunctionKey, AppKit.NSDownArrowFunctionKey]:
valueY += add
else:
valueX += add
value = "%s, %s" % (valueX, valueY)
else:
value += add
self._insertTextAndRun("%s" % value, selectedRange)
return
txt = self.string().substringWithRange_(selectedRange)
if txt == "True":
self._insertTextAndRun("False", selectedRange)
return
if txt == "False":
self._insertTextAndRun("True", selectedRange)
return
super(CodeNSTextView, self).keyDown_(event)
selectedRange = self.selectedRange()
self._balanceParenForChar(char, selectedRange.location)
if self.isLiveCoding():
self.performSelectorInBackground_withObject_("_runInternalCode", None)
def flagsChanged_(self, event):
self._arrowSelectionDirection = None
super(CodeNSTextView, self).flagsChanged_(event)
def mouseDown_(self, event):
self._canDrag = False
if AppKit.NSEvent.modifierFlags() & AppKit.NSCommandKeyMask and self.selectedRange():
self._canDrag = True
self.undoManager().beginUndoGrouping()
selRng = self.selectedRange()
txt = self.string().substringWithRange_(selRng)
if txt == "True":
self._insertTextAndRun("False", selRng)
elif txt == "False":
self._insertTextAndRun("True", selRng)
return
super(CodeNSTextView, self).mouseDown_(event)
def mouseDragged_(self, event):
if self._canDrag:
try:
selRng = self.selectedRange()
value = self._getSelectedValueForRange(selRng)
if value is not None:
altDown = event.modifierFlags() & AppKit.NSAlternateKeyMask
shiftDown = event.modifierFlags() & AppKit.NSShiftKeyMask
altDown = event.modifierFlags() & AppKit.NSAlternateKeyMask
add = 1
if altDown and shiftDown:
add = .01
elif altDown:
add = .1
elif shiftDown:
add = 10
if isinstance(value, tuple):
valueX, valueY = value
valueX += int(event.deltaX()*2) * add
valueY -= int(event.deltaY()*2) * add
txtValue = "%s, %s" % (valueX, valueY)
else:
value += int(event.deltaX()*2) * add
txtValue = "%s" % value
self._insertTextAndRun(txtValue, selRng)
except:
pass
super(CodeNSTextView, self).mouseDragged_(event)
def mouseUp_(self, event):
if self._canDrag:
self.undoManager().endUndoGrouping()
super(CodeNSTextView, self).mouseUp_(event)
def insertTab_(self, sender):
string = self.string()
if string:
selectedRange = self.selectedRange()
try:
char = string[selectedRange.location-1]
except:
char = ""
if char == ".":
self.setSelectedRange_((selectedRange.location-1, 1))
self.insertText_("self.")
return
if self.usesTabs():
return super(CodeNSTextView, self).insertTab_(sender)
self.insertText_(self.indent())
def insertNewline_(self, sender):
selectedRange = self.selectedRange()
super(CodeNSTextView, self).insertNewline_(sender)
languageData = self.languagesIDEBehaviorForLanguage_(self.lexer().name)
if languageData:
leadingSpace = ""
line, lineRange = self._getTextForRange(selectedRange)
m = _whiteSpaceRE.match(line)
if m is not None:
leadingSpace = m.group()
line = line.strip()
if line and line[-1] in languageData["indentWithEndOfLine"]:
leadingSpace += self.indent()
if leadingSpace:
self.insertText_(leadingSpace)
def deleteBackward_(self, sender):
self._deleteIndentation(sender, False, super(CodeNSTextView, self).deleteBackward_)
def deleteForward_(self, sender):
self._deleteIndentation(sender, True, super(CodeNSTextView, self).deleteForward_)
def moveLeft_(self, sender):
super(CodeNSTextView, self).moveLeft_(sender)
string = self.string()
if not string:
return
selectedRange = self.selectedRange()
char = string[selectedRange.location]
self._balanceParenForChar(char, selectedRange.location+1)
def moveRight_(self, sender):
super(CodeNSTextView, self).moveRight_(sender)
string = self.string()
if not string:
return
selectedRange = self.selectedRange()
char = string[selectedRange.location-1]
self._balanceParenForChar(char, selectedRange.location)
def moveWordLeft_(self, sender):
ranges = self.selectedRanges()
if len(ranges) == 1:
newRange = ranges[0].rangeValue()
location = self._getLeftWordRange(newRange)
self.setSelectedRange_((location, 0))
else:
super(CodeNSTextView, self).moveWordLeft_(sender)
def moveWordLeftAndModifySelection_(self, sender):
ranges = self.selectedRanges()
if self._arrowSelectionDirection is None:
self._arrowSelectionDirection = downArrowSelectionDirection
if len(ranges) == 1:
newRange = ranges[0].rangeValue()
testLocation = -1
if newRange.length and self._arrowSelectionDirection != downArrowSelectionDirection:
testLocation = self._getLeftWordRange(AppKit.NSRange(newRange.location + newRange.length, 0))
if AppKit.NSLocationInRange(testLocation, newRange) or AppKit.NSMaxRange(newRange) == testLocation:
newRange = AppKit.NSRange(newRange.location, testLocation - newRange.location)
else:
location = self._getLeftWordRange(newRange)
newRange = AppKit.NSRange(location, newRange.location - location + newRange.length)
if newRange.length == 0:
self._arrowSelectionDirection = None
self.setSelectedRange_(newRange)
else:
super(CodeNSTextView, self).moveWordLeftAndModifySelection_(sender)
def moveWordRight_(self, sender):
ranges = self.selectedRanges()
if len(ranges) == 1:
newRange = ranges[0].rangeValue()
location = self._getRightWordRange(newRange)
self.setSelectedRange_((location, 0))
else:
super(CodeNSTextView, self).moveWordRight_(sender)
def moveWordRightAndModifySelection_(self, sender):
ranges = self.selectedRanges()
if self._arrowSelectionDirection is None:
self._arrowSelectionDirection = upArrowSelectionDirection
if len(ranges) == 1:
newRange = ranges[0].rangeValue()
testLocation = -1
if newRange.length and self._arrowSelectionDirection != upArrowSelectionDirection:
testLocation = self._getRightWordRange(AppKit.NSRange(newRange.location, 0))
if AppKit.NSLocationInRange(testLocation, newRange) or AppKit.NSMaxRange(newRange) == testLocation:
newRange = AppKit.NSRange(testLocation, newRange.location - testLocation + newRange.length)
else:
location = self._getRightWordRange(newRange)
newRange = AppKit.NSRange(newRange.location, location - newRange.location)
if newRange.length == 0:
self._arrowSelectionDirection = None
self.setSelectedRange_(newRange)
else:
super(CodeNSTextView, self).moveWordRightAndModifySelection_(sender)
def deleteWordBackward_(self, sender):
ranges = self.selectedRanges()
if len(ranges) == 1:
newRange = ranges[0].rangeValue()
if newRange.length == 0:
self.moveWordLeftAndModifySelection_(sender)
super(CodeNSTextView, self).deleteWordForward_(sender)
def deleteWordForward_(self, sender):
ranges = self.selectedRanges()
if len(ranges) == 1:
newRange = ranges[0].rangeValue()
if newRange.length == 0:
self.moveWordRightAndModifySelection_(sender)
super(CodeNSTextView, self).deleteWordForward_(sender)
# text completion
def rangeForUserCompletion(self):
charRange = super(CodeNSTextView, self).rangeForUserCompletion()
text = self.string()
partialString = text.substringWithRange_(charRange)
if "." in partialString:
dotSplit = partialString.split(".")
partialString = dotSplit.pop()
move = len(".".join(dotSplit))
charRange.location += move + 1
charRange.length = len(partialString)
for c in partialString:
if c not in variableChars:
return (AppKit.NSNotFound, 0)
return charRange
def completionsForPartialWordRange_indexOfSelectedItem_(self, charRange, index):
languageData = self.languagesIDEBehaviorForLanguage_(self.lexer().name)
if languageData is None:
return [], 0
text = self.string()
func = languageData.get("wordCompletions", self._genericCompletions)
return func(text, charRange)
def _genericCompletions(self, text, charRange):
partialString = text.substringWithRange_(charRange)
keyWords = list()
index = 0
languageData = self.languagesIDEBehaviorForLanguage_(self.lexer().name)
if languageData is None:
return keyWords, index
if partialString:
_reWords = re.compile(r"\b%s\w+\b" % partialString)
keyWords = _reWords.findall(text)
keyWords = list(set(keyWords + languageData.get("keywords", [])))
keyWords = [word for word in sorted(keyWords) if word.startswith(partialString)]
return keyWords, index
def selectionRangeForProposedRange_granularity_(self, proposedRange, granularity):
location = proposedRange.location
if granularity == AppKit.NSSelectByWord and proposedRange.length == 0 and location != 0:
text = self.string()
lenText = len(text)
length = 1
found = False
while not found:
location -= 1
length += 1
if location <= 0:
found = True
else:
c = text.substringWithRange_((location, 1))[0]
if c not in variableChars:
location += 1
found = True
found = False
while not found:
length += 1
if location + length >= lenText:
found = True
else:
c = text.substringWithRange_((location, length))[-1]
if c not in variableChars:
length -= 1
found = True
return location, length
else:
return super(CodeNSTextView, self).selectionRangeForProposedRange_granularity_(proposedRange, granularity)
# drop
def acceptableDragTypes(self):
acceptableDragTypes = super(CodeNSTextView, self).acceptableDragTypes()
return list(acceptableDragTypes) + [AppKit.NSFilenamesPboardType]
def draggingEntered_(self, dragInfo):
pboard = dragInfo.draggingPasteboard()
types = pboard.types()
if AppKit.NSFilenamesPboardType in types:
languageData = self.languagesIDEBehaviorForLanguage_(self.lexer().name)
if languageData is not None:
formatter = languageData.get("dropPathFormatting")
if formatter:
paths = pboard.propertyListForType_(AppKit.NSFilenamesPboardType)
dropText = ""
if len(paths) == 1:
dropText = formatter % paths[0]
else:
formattedPaths = []
for path in paths:
formattedPaths.append(formatter % path)
multiLineFormater = languageData.get("dropPathsFormatting", "%s")
seperator = languageData.get("dropPathsSeperator", "\n")
dropText = multiLineFormater % seperator.join(formattedPaths)
if dropText:
pboard.declareTypes_owner_([AppKit.NSPasteboardTypeString], self)
pboard.setString_forType_(dropText, AppKit.NSPasteboardTypeString)
return super(CodeNSTextView, self).draggingEntered_(dragInfo)
# menu
def indent_(self, sender):
def indentFilter(lines):
indent = self.indent()
indentedLines = []
for line in lines:
if line.strip():
indentedLines.append(indent + line)
else:
indentedLines.append(line)
[indent + line for line in lines[:-1]]
return indentedLines
self._filterLines(indentFilter)
def dedent_(self, sender):
def dedentFilter(lines):
indent = self.indent()
dedentedLines = []
indentSize = len(indent)
for line in lines:
if line.startswith(indent):
line = line[indentSize:]
dedentedLines.append(line)
return dedentedLines
self._filterLines(dedentFilter)
def comment_(self, sender):
def commentFilter(lines):
languageData = self.languagesIDEBehaviorForLanguage_(self.lexer().name)
if languageData is None:
return lines
commentTag = languageData.get("comment")
if commentTag is None:
return lines
commentTag += " "
commentEndTag = languageData.get("commentEnd", "")
if commentEndTag:
commentEndTag = " " + commentEndTag
commentedLines = []
pos = 100
for line in lines:
if not line.strip():
continue
pos = min(pos, _findWhitespace(line))
for line in lines:
if line.strip():
addEnd = ""
if line[-1] == "\n":
line = line.replace("\n", "")
addEnd = "\n"
commentedLines.append(line[:pos] + commentTag + line[pos:] + commentEndTag + addEnd)
else:
commentedLines.append(line)
return commentedLines
self._filterLines(commentFilter)
def uncomment_(self, sender):
def uncommentFilter(lines):
languageData = self.languagesIDEBehaviorForLanguage_(self.lexer().name)
if languageData is None:
return lines
commentTag = languageData.get("comment", "")
commentEndTag = languageData.get("commentEnd", "")
_commentRE = re.compile(r"[ \t]*(%s)[ ]?" % commentTag)
commentedLines = []
commentMatch = _commentRE.match
for line in lines:
m = commentMatch(line)
if m is not None:
start = m.start(1)
end = m.end()
line = line[:start] + line[end:]
line = line.replace(commentEndTag, "")
commentedLines.append(line)
return commentedLines
self._filterLines(uncommentFilter)
def _jumpToLine(self, lineNumber):
lines = 1
string = self.string()
length = len(string)
tempRange = AppKit.NSMakeRange(0, length)
found = None
while tempRange.location < length:
tempRange = string.lineRangeForRange_(AppKit.NSMakeRange(tempRange.location, 0))
if lines == lineNumber:
found = tempRange
break
tempRange.location = AppKit.NSMaxRange(tempRange)
lines += 1
if found:
self.setSelectedRange_(found)
self.scrollRangeToVisible_(found)
def jumpToLine_(self, sender):
self.jumpToLineWindowClass(self._jumpToLine, self.window())
def jumpToLineNumber_(self, lineNumber):
self._jumpToLine(lineNumber)
def liveCoding_(self, sender):
self._liveCoding = not self._liveCoding
def isLiveCoding(self):
return self._liveCoding
def validateUserInterfaceItem_(self, item):
if item.action() == "liveCoding:":
item.setState_(self.isLiveCoding())
return super(CodeNSTextView, self).validateUserInterfaceItem_(item)
# notifications
def textStorageDidProcessEditing_(self, notification):
if self._ignoreProcessEditing:
return
string = self.string()
if not string:
# no text to color
return
length = len(string)
textStorage = self.textStorage()
lineStart, lineLength = textStorage.editedRange()
lineStart -= 200
lineLength += 200
if lineStart <= 0:
lineStart = 0
if lineStart > length:
lineStart = length
lineLength = 0
if lineStart + lineLength > length:
lineLength = length - lineStart
lineStart, lineLength = string.lineRangeForRange_((lineStart, lineLength))
for quoteMatch in _multiLineRE.finditer(string):
start, end = quoteMatch.start(), quoteMatch.end()
quoteRange = (start, end-start)
if AppKit.NSLocationInRange(lineStart, quoteRange) or AppKit.NSLocationInRange(lineStart+lineLength, quoteRange):
quoteStart, quoteLenght = string.lineRangeForRange_(quoteRange)
lineStart, lineLength = AppKit.NSUnionRange(quoteRange, (lineStart, lineLength))
break
text = string.substringWithRange_((lineStart, lineLength))
self._highlightSyntax(lineStart, text)
def viewDidMoveToWindow(self):
self._buildhighlightStyleMap()
self.resetHighLightSyntax()
notificationCenter = AppKit.NSNotificationCenter.defaultCenter()
notificationCenter.addObserver_selector_name_object_(self, "textStorageDidProcessEditing:", AppKit.NSTextStorageDidProcessEditingNotification, self.textStorage())
def dealloc(self):
notificationCenter = AppKit.NSNotificationCenter.defaultCenter()
notificationCenter.removeObserver_(self)
super(CodeNSTextView, self).dealloc()
def userDefaultChanged_(self, notification):
if self.window():
_clearTextAttributesForStyleCache()
style = styleFromDefault()
self.setTypingAttributes_(_textAttributesForStyle(dict(color=self._fallbackTextColor)))
self.setHighlightStyle_(style)
# helpers
def _updateRulersColors(self):
scrollView = self.enclosingScrollView()
if scrollView and scrollView.hasVerticalRuler():
ruler = scrollView.verticalRulerView()
if hasattr(ruler, "setTextColor_"):
numberStyle = self.highlightStyleMap.get(Comment)
if numberStyle:
ruler.setTextColor_(numberStyle["color"])
if hasattr(ruler, "setRulerBackgroundColor_"):
styles = self.highlightStyle()
backgroundColor = _hexStringToNSColor(styles.background_color, self._fallbackBackgroundColor)
ruler.setRulerBackgroundColor_(backgroundColor)
def _deleteIndentation(self, sender, isForward, superFunc):
selectedRange = self.selectedRange()
if self.usesTabs() or selectedRange.length:
return superFunc(sender)
string = self.string()
if not string:
return superFunc(sender)
possibleIndentStart = selectedRange.location - self.indentSize()
possibleIndentEnd = self.indentSize()
if isForward:
possibleIndentStart = selectedRange.location
if possibleIndentStart < 0:
return superFunc(sender)
possibleIndent = None
if possibleIndentStart + possibleIndentEnd > len(string):
return superFunc(sender)
possibleIndent = string.substringWithRange_((possibleIndentStart, possibleIndentEnd))
if possibleIndent == self.indent():
self.setSelectedRange_((possibleIndentStart, possibleIndentEnd))
self.insertText_("")
else:
superFunc(sender)
def _findMatchingParen(self, location, char, matchChar, end):
add = 1
if end:
add = -1
location -= 2
string = self.string()
found = None
stack = 0
while location >= 0 and location < len(string):
c = string[location]
if c == char:
stack += 1
elif stack != 0 and c == matchChar:
stack -= 1
elif c == matchChar:
found = location
break
location += add
return found
def _balanceParenForChar(self, char, location):
if self.lexer() is None:
return
languageData = self.languagesIDEBehaviorForLanguage_(self.lexer().name)
if languageData is None:
return
openToCloseMap = languageData["openToCloseMap"]
if char in openToCloseMap.keys():
self._balanceParens(location=location, char=char, matchChar=openToCloseMap[char], end=False)
elif char in openToCloseMap.values():
openToCloseMap = _reverseMap(openToCloseMap)
self._balanceParens(location=location, char=char, matchChar=openToCloseMap[char], end=True)
def _balanceParens(self, location, char, matchChar, end):
found = self._findMatchingParen(location, char, matchChar, end)
if found is not None:
oldAttrs, effRng = self.textStorage().attributesAtIndex_effectiveRange_(found, None)
styles = self.highlightStyle()
selectionColor = _hexStringToNSColor(styles.highlight_color, self._fallbackHightLightColor)
textColor = oldAttrs.get(AppKit.NSForegroundColorAttributeName, self._fallbackTextColor)
shadow = AppKit.NSShadow.alloc().init()
shadow.setShadowOffset_((0, 0))
shadow.setShadowColor_(textColor)
shadow.setShadowBlurRadius_(3)
balancingAttrs = {
AppKit.NSBackgroundColorAttributeName: selectionColor,
AppKit.NSShadowAttributeName: shadow
}
self.layoutManager().setTemporaryAttributes_forCharacterRange_(balancingAttrs, (found, 1))
self.performSelector_withObject_afterDelay_("_resetBalanceParens:", (oldAttrs, effRng), 0.2)
def _resetBalanceParens_(self, (attrs, rng)):
self.layoutManager().setTemporaryAttributes_forCharacterRange_(attrs, rng)
def _filterLines(self, filterFunc):
selectedRange = self.selectedRange()
lines, linesRange = self._getTextForRange(selectedRange)
filteredLines = filterFunc(lines.splitlines(True))
filteredLines = "".join(filteredLines)
if lines == filteredLines:
return
self.setSelectedRange_(linesRange)
self.insertText_(filteredLines)
newSelRng = linesRange.location, len(filteredLines)
self.setSelectedRange_(newSelRng)
def _getLeftWordRange(self, newRange):
if newRange.location == 0:
return 0
text = self.string()
location = newRange.location - 1
c = text.substringWithRange_((location, 1))[0]
isChar = foundChar = c in variableChars
count = 0
while isChar == foundChar:
count += 1
location -= 1
if location <= 0:
location = 0
foundChar = not isChar
else:
c = text.substringWithRange_((location, 1))[0]
foundChar = c in variableChars
if count == 1 and isChar != foundChar:
isChar = not isChar
if location != 0:
location += 1
return location
def _getRightWordRange(self, newRange):
text = self.string()
lenText = len(text)
location = newRange.location + newRange.length
if location >= lenText:
return lenText
count = 0
c = text.substringWithRange_((location, 1))[0]
isChar = foundChar = c in variableChars
while isChar == foundChar:
count += 1
location += 1
if location >= lenText:
location = lenText
foundChar = not isChar
else:
c = text.substringWithRange_((location, 1))[0]
foundChar = c in variableChars
if count == 1 and isChar != foundChar:
isChar = not isChar
return location
def _getTextForRange(self, lineRange):
string = self.string()
lineRange = string.lineRangeForRange_(lineRange)
return string.substringWithRange_(lineRange), lineRange
def _getSelectedValueForRange(self, selectedRange):
value = None
try:
txt = self.string().substringWithRange_(selectedRange)
for c in txt:
if c not in "0123456789.,- ":
raise DrawBotError("No dragging possible")
exec("value = %s" % txt)
except:
pass
return value
def _insertTextAndRun(self, txt, txtRange):
self.insertText_(txt)
newRange = AppKit.NSMakeRange(txtRange.location, len(txt))
self.setSelectedRange_(newRange)
return self._runInternalCode()
def _runInternalCode(self):
pool = AppKit.NSAutoreleasePool.alloc().init()
try:
window = self.window()
if window is not None:
doc = window.document()
if doc is not None:
doc.runCode_(self)
return True
except:
return False
class CodeEditor(TextEditor):
nsTextViewClass = CodeNSTextView
def __init__(self, *args, **kwargs):
codeAttr = dict()
for key in "lexer", "highlightStyle", "usesTabs", "indentSize", "languagesIDEBehavior", "showlineNumbers":
value = None
if key in kwargs:
value = kwargs.get(key)
del kwargs[key]
codeAttr[key] = value
super(CodeEditor, self).__init__(*args, **kwargs)
if isinstance(codeAttr["lexer"], str):
try:
codeAttr["lexer"] = get_lexer_by_name(codeAttr["lexer"])
except:
codeAttr["lexer"] = None
if codeAttr["lexer"] is None:
codeAttr["lexer"] = PythonLexer()
self.setLexer(codeAttr["lexer"])
if codeAttr["highlightStyle"] is None:
codeAttr["highlightStyle"] = styleFromDefault()
if codeAttr["highlightStyle"] is not None:
self.setHighlightStyle(codeAttr["highlightStyle"])
if codeAttr["usesTabs"] is not None:
self.setUsesTabs(codeAttr["usesTabs"])
if codeAttr["indentSize"] is not None:
self.setIndentSize(codeAttr["indentSize"])
if codeAttr["languagesIDEBehavior"] is not None:
_languagesIDEBehavior.update(codeAttr["languagesIDEBehavior"])
self.setLanguagesIDEBehavior(languagesIDEBehavior)
if codeAttr["showlineNumbers"] is None:
codeAttr["showlineNumbers"] = True
ruler = NSLineNumberRuler.alloc().init()
ruler.setClientView_(self.getNSTextView())
ruler.setRulerBackgroundColor_(AppKit.NSColor.colorWithCalibratedWhite_alpha_(.95, 1))
self.getNSScrollView().setVerticalRulerView_(ruler)
self.getNSScrollView().setHasHorizontalRuler_(False)
self.getNSScrollView().setHasVerticalRuler_(codeAttr["showlineNumbers"])
self.getNSScrollView().setRulersVisible_(True)
def setHighlightStyle(self, style):
self.getNSTextView().setHighlightStyle_(style)
def setLexer(self, lexer):
self.getNSTextView().setLexer_(lexer)
def setLanguagesIDEBehavior(self, languagesIDEBehavior):
self.getNSTextView().setLanguagesIDEBehavior_(languagesIDEBehavior)
def setUsesTabs(self, value):
self.getNSTextView().setUsesTabs_(value)
def usesTabs(self):
return self.getNSTextView().usesTabs()
def setIndentSize(self, value):
self.getNSTextView().setIndentSize_(value)
def indentSize(self):
return self.getNSTextView().indentSize()
def comment(self):
self.getNSTextView().comment_(self)
def uncomment(self):
self.getNSTextView().uncomment_(self)
def indent(self):
self.getNSTextView().indent_(self)
def dedent(self):
self.getNSTextView().dedent_(self)
def jumpToLine(self, lineNumber=None):
if lineNumber is None:
self.getNSTextView().jumpToLine_(self)
else:
self.getNSTextView().jumpToLineNumber_(lineNumber)
def toggleLineNumbers(self):
self.getNSScrollView().setHasVerticalRuler_(not self.getNSScrollView().hasVerticalRuler())
class OutPutCodeNSTextView(CodeNSTextView):
def init(self):
self = super(OutPutCodeNSTextView, self).init()
self._items = []
self.setTextAttributes()
return self
def clear(self):
self._items = []
self.setString_("")
def appendText_isError_(self, text, isError):
self._items.append((text, isError))
attrs = self.textAttributes
if isError:
attrs = self.tracebackAttributes
text = AppKit.NSAttributedString.alloc().initWithString_attributes_(text, attrs)
self.textStorage().appendAttributedString_(text)
def userDefaultChanged_(self, notification):
super(OutPutCodeNSTextView, self).userDefaultChanged_(notification)
self.setTextAttributes()
def setTextAttributes(self):
self.setString_("")
styles = styleFromDefault()
self.setHighlightStyle_(styles)
self.textAttributes = outputTextAttributesForStyles(styles)
self.tracebackAttributes = outputTextAttributesForStyles(styles, isError=True)
backgroundColor = _hexStringToNSColor(styles.background_color, self._fallbackBackgroundColor)
self.setBackgroundColor_(backgroundColor)
selectionColor = _hexStringToNSColor(styles.highlight_color, self._fallbackHightLightColor)
self.setSelectedTextAttributes_({AppKit.NSBackgroundColorAttributeName: selectionColor})
self.setFont_(getFontDefault("PyDEFont", self._fallbackFont))
items = self._items
self._items = []
for text, isError in items:
self.appendText_isError_(text, isError)
class OutPutEditor(TextEditor):
nsTextViewClass = OutPutCodeNSTextView
def append(self, text, isError=False):
self.getNSTextView().appendText_isError_(text, isError)
def clear(self):
self.getNSTextView().clear()
def forceUpdate(self):
self.getNSTextView().display()
def scrollToEnd(self):
self.getNSTextView().scrollRangeToVisible_((len(self.get()), 0))
|
bsd-2-clause
| -5,953,409,070,966,837,000
| 35.941529
| 222
| 0.605499
| false
| 4.238411
| false
| false
| false
|
hongzhouye/frankenstein
|
tools/mol_utils.py
|
1
|
8589
|
"""Utils functions for module MOL
"""
import os
import numpy as np
from frankenstein.tools.io_utils import zmat2xyz
from frankenstein.data.atom_data import get_atomic_number
def get_enuc(Zs, xyzs):
"""Compute nuclear repulsion for a give molecule
Note:
The coordinates must be in unit of Bohr. (1 Bohr = 1.88972612457 Ang)
"""
natom = len(Zs)
assert(len(xyzs) == 3*natom)
rs = np.asarray(xyzs).reshape(natom, 3)
enuc = 0
for i in range(natom):
for j in range(i+1, natom):
enuc += Zs[i]*Zs[j] / np.sum((rs[i]-rs[j])**2.)**0.5
return enuc
# utils for geometry
def parse_gfile(gfile):
"""Parse input geometry file into standard geometry string
"""
if gfile[-4:] == ".xyz":
fname = gfile
elif gfile[-5:] == ".zmat":
fname = ".tmp.xyz"
zmat2xyz(gfile, fname)
else:
raise ValueError("Unknown format of input geometry file {:s}".format(gfile))
with open(fname, "r") as f:
natom = int(f.readline())
f.readline()
gstr = ";".join([" ".join(f.readline().split()[:4]) for i in range(natom)])
if fname == ".tmp.xyz":
os.system("rm .tmp.xyz")
return gstr
def standardize_gstr(gstr):
"""Put input geometry string into standard format
[NOTE] If input string is in Z-mat format, transformation to xyz will be performed first.
"""
atoms = [spg.strip() for spg in gstr.split(";")]
atom0 = atoms[0].split()
if len(atom0) == 1:
fzmat = ".tmp.zmat"
with open(fzmat, "w") as f:
f.write("{:d}\n\n".format(len(atoms)))
f.write("\n".join(atoms))
gstr = parse_gfile(fzmat)
os.system("rm {:s}".format(fzmat))
elif len(atom0) == 4:
gstr = ";".join([" ".join(atom.split()) for atom in atoms])
else:
raise ValueError("Unknown format of input geometry string\n{:s}".format(gstr))
return gstr
def parse_gstr(gstr, scale=1.88972612457):
"""Get atomic numbers and (scaled) atomic coordinates
Inp:
scale (float, optional, default: 1.88972612457):
Scaling factor for coordinates. The default assumes input coordinates are in angstrom and transform them into bohr.
"""
axyzs = [atom.split() for atom in gstr.split(";")]
natom = len(axyzs)
atoms = [None] * natom
xyzs = np.zeros(3*natom)
for ia in range(natom):
atoms[ia] = axyzs[ia][0]
xyzs[ia*3:(ia+1)*3] = list(map(float, axyzs[ia][1:]))
xyzs *= scale
xyzs = xyzs.tolist()
Zs = [get_atomic_number(atoms[ia]) for ia in range(natom)]
return atoms, Zs, xyzs
class GEOM:
"""Parse user-inpute geometry
"""
def __init__(self, gfs):
"""Constructor
Inp:
gfs (str):
Geometry file or string.
Geometry file must end with either ".xyz" or ".zmat" and follow format:
```
Natom
comment
Atom1 x y z
Atom2 x y z
...
```
for ".xyz", or
```
Natom
comment
Atom1
Atom2 1 dist(1,2)
...
```
for ".zmat". Geometry string follows the same format as either file format, but (1) without heading lines (Natom + comment), and (2) line separation is replaced by semicolon. For example, for xyz format,
gstr = "Atom1 x y z; Atom2 x y z; ..."
[NOTE] Input Z-mat format will be transformed into xyz format automatically! And only the latter will be stored.
Properties:
gtype : "file" or "str"
gstr : "Atom1 x y z;Atom2 x y z;..."
"""
self.parse_gfs(gfs)
self.lspace = 8 # number of spaces on the left (for printing)
def parse_gfs(self, gfs):
"""Parsing geometry string or file into standard form.
"""
if ".xyz" in gfs or ".zmat" in gfs:
self.gtype = "file"
self.gstr = parse_gfile(gfs)
else:
self.gtype = "str"
self.gstr = standardize_gstr(gfs)
def parse_gstr(self, scale=1.88972612457):
return parse_gstr(self.gstr, scale=scale)
def __str__(self):
gstr_out = " "*(self.lspace//2) + "Nuclear Coordinates:\n"
atoms = self.gstr.split(";")
for ia in range(len(atoms)):
axyz = atoms[ia].split()
axyz[0] = " " * self.lspace + axyz[0]
atoms[ia] = " ".join(axyz)
gstr_out += "\n".join(atoms)
return gstr_out
def get_Zxyz(geom, scale=1.88972612457, retSymb=False):
"""Get atom symbols and coordinates
Note:
The default of "scale" assumes input geometry uses unit "Angstrom" and
tranformas it into "Bohr". Use "scale = 1." to stay with "Angstrom".
"""
gstr_raw = parse_gfile(geom)
gstr = standardize_gstr(gstr_raw)
atoms, Zs, xyzs = parse_gstr(gstr, scale)
if retSymb:
return atoms, xyzs
else:
return Zs, xyzs
def get_noccs(Zs, charge, spin):
"""Determine # of alpha and beta electrons
Inp:
Zs (list of int):
A list of atomic numbers (i.e., nuclear charges) for each atom
charge (int):
Net charge (nelectron - sum(Zs))
spin (int):
Spin multiplicity (2S + 1)
Out:
noccs (list):
[nocca, noccb]
"""
nuc_charge = int(sum(Zs))
nocca = (nuc_charge - charge + spin - 1) // 2
noccb = nocca + 1 - spin
if nuc_charge - (nocca + noccb) != charge:
raise RuntimeError(("Bad combination of spin (={:d}) and "
"charge (={:d})").format(spin, charge))
return [nocca, noccb]
def get_orth_mat(S, orth_method, ao_lindep_thresh):
"""Compute matrix X that is used for orthogonalizing basis functions
Inp:
S (np.ndarray, nao*nao):
AO overlap matrix
orth_method (str):
Either "symm" or "cano"
ao_lindep_thresh (int):
10**-ao_lindep_thresh is the threshold for "basically zero"
eigenvalues (only used and must be given in "cano"
orthogonalization)
Out:
X (np.ndarray, nao*nmo):
Meaning clear from eqn, h_orth = X.T @ h @ X.
nmo = nao for orth_method = "symm"
nmo = # of linearly dependent AOs for orth_method = "cano"
smin (float):
smallest eigenvalue of S
"""
e, u = np.linalg.eigh(S)
n_lindep = int(np.sum(e < 10**-ao_lindep_thresh))
smin = e[0]
if orth_method.upper() == "SYMM":
if n_lindep > 0:
raise RuntimeError("""orth_method = "symm" cannot handle linear dependency in AO basis. Please use a more tolerant ao_lindep_thresh (default: 6) or use orth_method = "cano".""")
X = u @ np.diag(e**-0.5) @ u.T
Xinv = u @ np.diag(e**0.5) @ u.T
elif orth_method.upper() == "CANO":
X = u[:,n_lindep:] @ np.diag(e[n_lindep:]**-0.5)
Xinv = np.diag(e[n_lindep:]**0.5) @ u[:,n_lindep:].T
else:
raise RuntimeError("Unknown orth_method {:s}.".format(orth_method))
return X, Xinv, smin
# utils for basis (TODO: move these functions to basis_utils.py)
def get_pure_by_l(ls, pures):
"""1. Check if same l has same purity; 2. return pures by l
"""
max_l = max(ls)
pure_by_l = [None] * (max_l+1)
for l, pure in zip(ls, pures):
if pure_by_l[l] is None:
pure_by_l[l] = pure
else:
if pure_by_l[l] != pure:
raise ValueError("Two shells with same angular momentum have different purity.")
return pure_by_l
def get_norb_l(l, pure):
"""Get number of orbitals for a given angular momentum
"""
if pure:
return 2 * l + 1
else:
return (l + 1) * (l + 2) // 2
def get_idao_by_l(ls, pures):
"""Get starting index of each group of AO (grouped by angular momentum)
Inp:
ls ([int] * nbas):
A list of angular momentum
pures ([bool] * nbas):
Indicate each l in ls is spheric (pure=True) or cartesian.
Output:
idao_by_l ([ [int] * nbas_this_l ] * max_l)
"""
max_l = max(ls)
idao_by_l = [[] for i in range(max_l+1)]
p0 = 0
for i in range(len(ls)):
l, pure = ls[i], pures[i]
p1 = p0 + get_norb_l(l, pure)
idao_by_l[l].append(p0)
p0 = p1
return idao_by_l
|
bsd-3-clause
| -3,732,571,148,976,621,600
| 29.031469
| 219
| 0.544301
| false
| 3.280749
| false
| false
| false
|
pwyliu/clancy
|
clancy/engage.py
|
1
|
1892
|
import getpass
from .utils import goodquit_json, read_file
from .redoctober import api_call
def engage(args, password):
"""
Construct payloads and POST to Red October
"""
if args['create']:
payload = {'Name': args['--user'], 'Password': password}
goodquit_json(api_call('create', args, payload))
elif args['delegate']:
payload = {
'Name': args['--user'], 'Password': password,
'Time': args['--time'], 'Uses': args['--uses']
}
goodquit_json(api_call('delegate', args, payload))
elif args['encrypt']:
payload = {
'Name': args['--user'], 'Password': password,
'Minimum': args['--min'], 'Owners': args['--owners'].split(','),
'Data': (args['--str'] if args['--file'] is None
else read_file(args['--file']))
}
goodquit_json(api_call('encrypt', args, payload))
elif args['decrypt']:
payload = {
'Name': args['--user'], 'Password': password,
'Data': (args['--str'] if args['--file'] is None
else read_file(args['--file']))
}
goodquit_json(api_call('decrypt', args, payload))
elif args['summary']:
payload = {'Name': args['--user'], 'Password': password}
goodquit_json(api_call('summary', args, payload))
elif args['change-password']:
args['newpass'] = getpass.getpass('New Password: ')
payload = {
'Name': args['--user'], 'Password': password,
'NewPassword': args['newpass']
}
goodquit_json(api_call('password', args, payload))
elif args['modify']:
payload = {
'Name': args['--user'], 'Password': password,
'Command': args['--action'], 'ToModify': args['--target']
}
goodquit_json(api_call('modify', args, payload))
|
mit
| 5,020,975,873,342,050,000
| 32.785714
| 76
| 0.523784
| false
| 3.958159
| false
| false
| false
|
tobykurien/MakerDroid
|
assetsrc/pycam.mp3/src/pycam/Exporters/EMCToolExporter.py
|
1
|
1454
|
# -*- coding: utf-8 -*-
"""
$Id: EMCToolExporter.py 629 2010-08-23 16:53:06Z sumpfralle $
Copyright 2010 Lars Kruse <devel@sumpfralle.de>
This file is part of PyCAM.
PyCAM is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PyCAM is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PyCAM. If not, see <http://www.gnu.org/licenses/>.
"""
import os
class EMCToolExporter:
def __init__(self, tools):
self.tools = tools
def get_tool_definition_string(self):
result = []
#result.append(self.HEADER_ROW)
for index in range(len(self.tools)):
tool = self.tools[index]
# use an arbitrary length
tool_length = tool["tool_radius"] * 10
line = "T%d P%d D%f Z-%f ;%s" % (index + 1, index + 1,
2 * tool["tool_radius"], tool_length, tool["name"])
result.append(line)
# add the dummy line for the "last" tool
result.append("T99999 P99999 Z+0.100000 ;dummy tool")
return os.linesep.join(result)
|
gpl-3.0
| -6,490,145,569,136,249,000
| 32.813953
| 71
| 0.662999
| false
| 3.662469
| false
| false
| false
|
j-be/vj-aerome-scent-controller
|
aerome_scent_controller.py
|
1
|
1957
|
import logging
import serial
import array
class AeromeScentController (object):
BLOCK_BEGIN = [0x1b]
BLOCK_END = [0x0d]
ACTIVATE_CONTROLLER = [0xe0, 0xe1, 0xe2, 0xe3, 0x0d]
ALL_VALVES_HOLD = [0xee, 0xef]
FLUSH_VALVE_ON = [0x26]
FLUSH_VALVE_OFF = [0xa6]
SCENT_VALVE_ON = 0x40
SCENT_VALVE_OFF = 0xC0
def __init__(self, serial_port_name):
self.serial_port_name = serial_port_name
def _init_serial(self):
self.log = logging.getLogger("aeromeScentController")
try:
# Init Serial port
self.serial_port = serial.Serial(self.serial_port_name, timeout=1, baudrate=9600)
self.serial_port.flushInput()
self.serial_port.flushOutput()
except OSError as error:
self.serial_port = None
self.log.error("Cannot initialize. Reason: %s", error)
except serial.serialutil.SerialException as error:
self.serial_port = None
self.log.error("Cannot initialize. Reason: %s", error)
self.log.debug("Serial: %s", self.serial_port)
def initialize_controller(self):
self._init_serial()
self._send_block(self.ALL_VALVES_HOLD)
self._send_message(self.ACTIVATE_CONTROLLER)
self._send_block(self.ALL_VALVES_HOLD)
def open_valve(self, valve_id):
self._send_block(self.FLUSH_VALVE_ON + [self.SCENT_VALVE_ON + valve_id])
def close_valve(self, valve_id):
self._send_block(self.FLUSH_VALVE_OFF + [self.SCENT_VALVE_OFF + valve_id])
def _send_block(self, block_content):
block = []
block += self.BLOCK_BEGIN
block += block_content
block += self.BLOCK_END
self._send_message(block)
def _send_message(self, message):
msg_str = array.array('B', message).tostring()
self.log.debug("Sending: " + ''.join(format(x, '02x') for x in message))
#self.serial_port.write(msg_str)
|
mit
| 5,824,470,294,240,700,000
| 31.081967
| 93
| 0.615738
| false
| 3.187296
| false
| false
| false
|
dmsurti/reynolds-blender
|
reynolds_blender/fvschemes.py
|
1
|
6197
|
#------------------------------------------------------------------------------
# Reynolds-Blender | The Blender add-on for Reynolds, an OpenFoam toolbox.
#------------------------------------------------------------------------------
# Copyright|
#------------------------------------------------------------------------------
# Deepak Surti (dmsurti@gmail.com)
# Prabhu R (IIT Bombay, prabhu@aero.iitb.ac.in)
# Shivasubramanian G (IIT Bombay, sgopalak@iitb.ac.in)
#------------------------------------------------------------------------------
# License
#
# This file is part of reynolds-blender.
#
# reynolds-blender is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# reynolds-blender is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with reynolds-blender. If not, see <http://www.gnu.org/licenses/>.
#------------------------------------------------------------------------------
# -----------
# bpy imports
# -----------
import bpy, bmesh
from bpy.props import (StringProperty,
BoolProperty,
IntProperty,
FloatProperty,
EnumProperty,
PointerProperty,
IntVectorProperty,
FloatVectorProperty,
CollectionProperty
)
from bpy.types import (Panel,
Operator,
PropertyGroup,
UIList
)
from bpy.path import abspath
from mathutils import Matrix, Vector
# --------------
# python imports
# --------------
import operator
import os
import pathlib
# ------------------------
# reynolds blender imports
# ------------------------
from reynolds_blender.gui.register import register_classes, unregister_classes
from reynolds_blender.gui.attrs import set_scene_attrs, del_scene_attrs
from reynolds_blender.gui.custom_operator import create_custom_operators
from reynolds_blender.gui.renderer import ReynoldsGUIRenderer
# ----------------
# reynolds imports
# ----------------
from reynolds.dict.parser import ReynoldsFoamDict
from reynolds.foam.cmd_runner import FoamCmdRunner
# ------------------------------------------------------------------------
# operators
# ------------------------------------------------------------------------
def generate_laplacianFoam_fvschemes(fvschemes, scene):
fvschemes['ddtSchemes']['default'] = scene.ddt_schemes_default
fvschemes['gradSchemes']['default'] = scene.grad_schemes_default
fvschemes['gradSchemes']['grad(T)'] = scene.grad_schemes_grad_T
fvschemes['divSchemes']['default'] = scene.div_schemes_default
fvschemes['laplacianSchemes']['default'] = scene.lap_schemes_default
fvschemes['laplacianSchemes']['laplacian(DT,T)'] = scene.lap_schemes_dt_t
fvschemes['interpolationSchemes']['default'] = scene.interp_schemes_default
fvschemes['snGradSchemes']['default'] = scene.sngrad_schemes_default
fvschemes['fluxRequired']['default'] = scene.flux_required_default
fvschemes['fluxRequired']['T'] = scene.flux_required_t
def generate_icoFoam_fvschemes(fvschemes, scene):
fvschemes['ddtSchemes']['default'] = scene.ddt_schemes_default
fvschemes['gradSchemes']['default'] = scene.grad_schemes_default
fvschemes['gradSchemes']['grad(p)'] = scene.grad_schemes_grad_p
fvschemes['divSchemes']['default'] = scene.div_schemes_default
fvschemes['divSchemes']['div(phi,U)'] = scene.div_schemes_phi_U
fvschemes['laplacianSchemes']['default'] = scene.lap_schemes_default
fvschemes['interpolationSchemes']['default'] = scene.interp_schemes_default
fvschemes['snGradSchemes']['default'] = scene.sngrad_schemes_default
# ------------------------------------------------------------------------
# Panel
# ------------------------------------------------------------------------
class FVSchemesOperator(bpy.types.Operator):
bl_idname = "reynolds.of_fvschemes"
bl_label = "FVSchemes"
@classmethod
def poll(cls, context):
return True
def execute(self, context):
scene = context.scene
print('Generate fvschemes for solver: ' + scene.solver_name)
abs_case_dir_path = bpy.path.abspath(scene.case_dir_path)
fvschemes = ReynoldsFoamDict('fvSchemes.foam', solver_name=scene.solver_name)
if scene.solver_name == 'laplacianFoam':
generate_laplacianFoam_fvschemes(fvschemes, scene)
elif scene.solver_name == 'icoFoam':
generate_icoFoam_fvschemes(fvschemes, scene)
system_dir = os.path.join(abs_case_dir_path, "system")
if not os.path.exists(system_dir):
os.makedirs(system_dir)
fvschemes_file_path = os.path.join(system_dir, "fvSchemes")
with open(fvschemes_file_path, "w+") as f:
f.write(str(fvschemes))
return {'FINISHED'}
# Return True to force redraw
def check(self, context):
return True
def invoke(self, context, event):
scene = context.scene
return context.window_manager.invoke_props_dialog(self, width=1000)
def draw(self, context):
layout = self.layout
scene = context.scene
gui_renderer = ReynoldsGUIRenderer(scene, layout,
scene.solver_name + 'Schemes.yaml')
gui_renderer.render()
# ------------------------------------------------------------------------
# register and unregister
# ------------------------------------------------------------------------
def register():
register_classes(__name__)
def unregister():
unregister_classes(__name__)
if __name__ == "__main__":
register()
|
gpl-3.0
| -8,835,433,186,925,909,000
| 39.503268
| 85
| 0.555591
| false
| 4.063607
| false
| false
| false
|
maximinus/SPQR
|
setup.py
|
1
|
2499
|
#!/usr/bin/python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# setup code for game
# for now, you can only set the game resolution
import sys, pygame
from pygame.locals import *
from scripts import spqr_defines as SPQR
from scripts import spqr_gui as SGFX
from scripts import spqr_window as SWINDOW
from scripts import spqr_widgets as SWIDGET
from scripts import spqr_ybuild as SYAML
SCREEN_WIDTH = 285
SCREEN_HEIGHT = 192
def setupWindow():
SYAML.createWindow("../data/layouts/setup_window.yml")
def setupWindow2():
# get a fullsize window, and add the options to it
window = SWINDOW.CWindow(0, 0, SCREEN_WIDTH, SCREEN_HEIGHT,
"", False, "main-window")
window.fillWindowImage()
# an optionmenu, a label, a seperator and 2 buttons
label1 = SWIDGET.buildLabel("Resolution")
label1.rect.x = 29
label1.rect.y = 26
label2 = SWIDGET.buildLabel("Play Music")
label2.rect.x = 32
label2.rect.y = 64
music = SWIDGET.CCheckBox(140, 64, True)
label3 = SWIDGET.buildLabel("Show intro")
label3.rect.x = 30
label3.rect.y = 100
intro = SWIDGET.CCheckBox(140, 100, True)
options = SWIDGET.COptionMenu(124, 20, ["800x600", "1024x768", "Fullscreen"])
options.describe = "opt-Resolution"
sepbar = SWIDGET.CSeperator(6 ,label1.rect.y + 106, SCREEN_WIDTH - 9)
ok_button = SWIDGET.CButton(165, 148, "OK")
ok_button.callbacks.mouse_lclk = okClick
cancel_button = SWIDGET.CButton(50, 148, "Cancel")
cancel_button.callbacks.mouse_lclk = cancelClick
for i in [options, label1, label2, music, label3, intro,
sepbar, ok_button, cancel_button]:
i.active = True
window.addWidget(i)
# only 1 window, set it modal
window.modal = True
SGFX.gui.addWindow(window)
if __name__ == "__main__":
SGFX.gui.mainInit(SCREEN_WIDTH, SCREEN_HEIGHT, False, False)
setupWindow()
SGFX.gui.updateGUI()
SGFX.gui.mainLoop()
|
gpl-3.0
| 7,500,770,195,348,641,000
| 30.632911
| 78
| 0.732693
| false
| 3.104348
| false
| false
| false
|
alexweav/Learny-McLearnface
|
LearnyMcLearnface/NeuralNetwork.py
|
1
|
5358
|
# -*- coding: utf-8 -*-
"""
Created on Fri May 06 14:34:21 2016
@author: Alexander Weaver
"""
import numpy as np
from . import Layers as layers
from . import Utils as utils
class NeuralNetwork(object):
"""
Initializes a neural network.
Takes a dictionary of initialization options.
"""
def __init__(self, options):
self.input_dim = options['input_dim']
self.data_type = options.setdefault('data_type', np.float32)
self.init_scheme = options.setdefault('init_scheme', 'xavier')
self.layers = []
self.num_layers = 0
"""
Adds a layer to the neural network.
The layer must be of a valid type, and is associated with a dictionary.
If the layer has any special options or hyperparameters, these are indicated in the dictionary.
Otherwise, the dictionary is empty.
"""
def add_layer(self, layer_type, params):
if not self.layers:
in_dim = self.input_dim
else:
in_dim = self.layers[-1].out_dim
if 'weight_scale' in params:
weight_scale = params['weight_scale']
elif self.init_scheme == 'xavier':
weight_scale = 1./np.sqrt(in_dim)
if layer_type == 'SoftmaxLoss':
layer = layers.SoftmaxLossLayer(in_dim)
self.layers.append(layer)
elif layer_type == 'SVMLoss':
layer = layers.SVMLossLayer(in_dim)
self.layers.append(layer)
elif layer_type == 'Affine':
layer = layers.AffineLayer(in_dim, params['neurons'], weight_scale, self.data_type)
self.layers.append(layer)
self.num_layers += 1
elif layer_type == 'Batchnorm':
layer = layers.BatchnormLayer(in_dim, params['decay'], self.data_type)
self.layers.append(layer)
self.num_layers += 1
elif layer_type == 'Dropout':
if 'seed' in params:
layer = layers.DropoutLayer(in_dim, params['dropout_param'], seed=params['seed'])
else:
layer = layers.DropoutLayer(in_dim, params['dropout_param'])
self.layers.append(layer)
self.num_layers += 1
elif layer_type == 'PReLU':
layer = layers.PReLULayer(in_dim, self.data_type)
self.layers.append(layer)
self.num_layers += 1
elif layer_type == 'ReLU':
layer = layers.ReLULayer(in_dim)
self.layers.append(layer)
self.num_layers += 1
elif layer_type == 'Sigmoid':
layer = layers.SigmoidLayer(in_dim)
self.layers.append(layer)
self.num_layers += 1
elif layer_type == 'Tanh':
layer = layers.TanhLayer(in_dim)
self.layers.append(layer)
self.num_layers += 1
else:
raise InvalidLayerException('Invalid layer: ' + layer_type)
"""
Performs forward propagation on the network, pushing a tensor through each layer in sequence.
Does not perform final layer classification.
"""
def forward(self, X, train=False):
X = X.astype(self.data_type)
forward_tensor = X
for layer in self.layers:
if layer == self.layers[-1]:
return forward_tensor
if isinstance(layer, layers.DropoutLayer) or isinstance(layer, layers.BatchnormLayer) and train:
forward_tensor = layer.forward_train(forward_tensor)
else:
forward_tensor = layer.forward(forward_tensor)
"""
Performs forward propagation, and performs final layer classification.
Returns an NxC matrix of class scores per given example.
"""
def classify(self, X):
X = X.astype(self.data_type)
scores = self.forward(X)
return self.layers[-1].evaluate(scores)
"""
Given a set of training examples and their corresponding scores, performs forward propagation and then
returns the final layer classifier loss and the derivative of that loss function.
"""
def loss(self, X, y, reg_param=0.0):
X = X.astype(self.data_type)
scores = self.forward(X, train=True)
loss, dx = self.layers[-1].loss(scores, y)
squared_sum = 0.0
for layer in self.layers:
if isinstance(layer, layers.AffineLayer):
squared_sum += np.sum(layer.W * layer.W)
loss += 0.5 * reg_param * squared_sum
return loss, dx
"""
Takes a set of training examples and corresponding scores.
Performs forward propagation, executes the final layer classifier loss function.
Then, performs backpropagation on the network and saves intermediate derivatives to the respective layers.
Returns the classifier loss and its derivative for progress reporting purposes.
"""
def backward(self, X, y, reg_param=0.0):
X = X.astype(self.data_type)
loss, dx = self.loss(X, y, reg_param)
for layer in reversed(self.layers):
if layer == self.layers[-1]:
continue
dx = layer.backward(dx)
if isinstance(layer, layers.AffineLayer):
layer.dW += reg_param * layer.W
return loss, dx
class InvalidLayerException(Exception):
pass
|
mit
| -489,052,098,223,998,340
| 37.553957
| 110
| 0.591639
| false
| 4.09633
| false
| false
| false
|
DailyActie/Surrogate-Model
|
01-codes/scipy-master/scipy/ndimage/morphology.py
|
1
|
80212
|
# Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other 00-courses provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import division, print_function, absolute_import
import numpy
from . import _nd_image
from . import _ni_support
from . import filters
__all__ = ['iterate_structure', 'generate_binary_structure', 'binary_erosion',
'binary_dilation', 'binary_opening', 'binary_closing',
'binary_hit_or_miss', 'binary_propagation', 'binary_fill_holes',
'grey_erosion', 'grey_dilation', 'grey_opening', 'grey_closing',
'morphological_gradient', 'morphological_laplace', 'white_tophat',
'black_tophat', 'distance_transform_bf', 'distance_transform_cdt',
'distance_transform_edt']
def _center_is_true(structure, origin):
structure = numpy.array(structure)
coor = tuple([oo + ss // 2 for ss, oo in zip(structure.shape,
origin)])
return bool(structure[coor])
def iterate_structure(structure, iterations, origin=None):
"""
Iterate a structure by dilating it with itself.
Parameters
----------
structure : array_like
Structuring element (an array of bools, for example), to be dilated with
itself.
iterations : int
number of dilations performed on the structure with itself
origin : optional
If origin is None, only the iterated structure is returned. If
not, a tuple of the iterated structure and the modified origin is
returned.
Returns
-------
iterate_structure : ndarray of bools
A new structuring element obtained by dilating `structure`
(`iterations` - 1) times with itself.
See also
--------
generate_binary_structure
Examples
--------
>>> from scipy import ndimage
>>> struct = ndimage.generate_binary_structure(2, 1)
>>> struct.astype(int)
array([[0, 1, 0],
[1, 1, 1],
[0, 1, 0]])
>>> ndimage.iterate_structure(struct, 2).astype(int)
array([[0, 0, 1, 0, 0],
[0, 1, 1, 1, 0],
[1, 1, 1, 1, 1],
[0, 1, 1, 1, 0],
[0, 0, 1, 0, 0]])
>>> ndimage.iterate_structure(struct, 3).astype(int)
array([[0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0]])
"""
structure = numpy.asarray(structure)
if iterations < 2:
return structure.copy()
ni = iterations - 1
shape = [ii + ni * (ii - 1) for ii in structure.shape]
pos = [ni * (structure.shape[ii] // 2) for ii in range(len(shape))]
slc = [slice(pos[ii], pos[ii] + structure.shape[ii], None)
for ii in range(len(shape))]
out = numpy.zeros(shape, bool)
out[slc] = structure != 0
out = binary_dilation(out, structure, iterations=ni)
if origin is None:
return out
else:
origin = _ni_support._normalize_sequence(origin, structure.ndim)
origin = [iterations * o for o in origin]
return out, origin
def generate_binary_structure(rank, connectivity):
"""
Generate a binary structure for binary morphological operations.
Parameters
----------
rank : int
Number of dimensions of the array to which the structuring element
will be applied, as returned by `np.ndim`.
connectivity : int
`connectivity` determines which elements of the output array belong
to the structure, i.e. are considered as neighbors of the central
element. Elements up to a squared distance of `connectivity` from
the center are considered neighbors. `connectivity` may range from 1
(no diagonal elements are neighbors) to `rank` (all elements are
neighbors).
Returns
-------
output : ndarray of bools
Structuring element which may be used for binary morphological
operations, with `rank` dimensions and all dimensions equal to 3.
See also
--------
iterate_structure, binary_dilation, binary_erosion
Notes
-----
`generate_binary_structure` can only create structuring elements with
dimensions equal to 3, i.e. minimal dimensions. For larger structuring
elements, that are useful e.g. for eroding large objects, one may either
use `iterate_structure`, or create directly custom arrays with
numpy functions such as `numpy.ones`.
Examples
--------
>>> from scipy import ndimage
>>> struct = ndimage.generate_binary_structure(2, 1)
>>> struct
array([[False, True, False],
[ True, True, True],
[False, True, False]], dtype=bool)
>>> a = np.zeros((5,5))
>>> a[2, 2] = 1
>>> a
array([[ 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0.],
[ 0., 0., 1., 0., 0.],
[ 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0.]])
>>> b = ndimage.binary_dilation(a, structure=struct).astype(a.dtype)
>>> b
array([[ 0., 0., 0., 0., 0.],
[ 0., 0., 1., 0., 0.],
[ 0., 1., 1., 1., 0.],
[ 0., 0., 1., 0., 0.],
[ 0., 0., 0., 0., 0.]])
>>> ndimage.binary_dilation(b, structure=struct).astype(a.dtype)
array([[ 0., 0., 1., 0., 0.],
[ 0., 1., 1., 1., 0.],
[ 1., 1., 1., 1., 1.],
[ 0., 1., 1., 1., 0.],
[ 0., 0., 1., 0., 0.]])
>>> struct = ndimage.generate_binary_structure(2, 2)
>>> struct
array([[ True, True, True],
[ True, True, True],
[ True, True, True]], dtype=bool)
>>> struct = ndimage.generate_binary_structure(3, 1)
>>> struct # no diagonal elements
array([[[False, False, False],
[False, True, False],
[False, False, False]],
[[False, True, False],
[ True, True, True],
[False, True, False]],
[[False, False, False],
[False, True, False],
[False, False, False]]], dtype=bool)
"""
if connectivity < 1:
connectivity = 1
if rank < 1:
if connectivity < 1:
return numpy.array(0, dtype=bool)
else:
return numpy.array(1, dtype=bool)
output = numpy.fabs(numpy.indices([3] * rank) - 1)
output = numpy.add.reduce(output, 0)
return numpy.asarray(output <= connectivity, dtype=bool)
def _binary_erosion(input, structure, iterations, mask, output,
border_value, origin, invert, brute_force):
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if structure is None:
structure = generate_binary_structure(input.ndim, 1)
else:
structure = numpy.asarray(structure)
structure = structure.astype(bool)
if structure.ndim != input.ndim:
raise RuntimeError('structure and input must have same dimensionality')
if not structure.flags.contiguous:
structure = structure.copy()
if numpy.product(structure.shape, axis=0) < 1:
raise RuntimeError('structure must not be empty')
if mask is not None:
mask = numpy.asarray(mask)
if mask.shape != input.shape:
raise RuntimeError('mask and input must have equal sizes')
origin = _ni_support._normalize_sequence(origin, input.ndim)
cit = _center_is_true(structure, origin)
if isinstance(output, numpy.ndarray):
if numpy.iscomplexobj(output):
raise TypeError('Complex output type not supported')
else:
output = bool
output, return_value = _ni_support._get_output(output, input)
if iterations == 1:
_nd_image.binary_erosion(input, structure, mask, output,
border_value, origin, invert, cit, 0)
return return_value
elif cit and not brute_force:
changed, coordinate_list = _nd_image.binary_erosion(input,
structure, mask, output, border_value, origin, invert, cit,
1)
structure = structure[tuple([slice(None, None, -1)] *
structure.ndim)]
for ii in range(len(origin)):
origin[ii] = -origin[ii]
if not structure.shape[ii] & 1:
origin[ii] -= 1
if mask is not None:
msk = numpy.asarray(mask)
msk = mask.astype(numpy.int8)
if msk is mask:
msk = mask.copy()
mask = msk
if not structure.flags.contiguous:
structure = structure.copy()
_nd_image.binary_erosion2(output, structure, mask, iterations - 1,
origin, invert, coordinate_list)
return return_value
else:
tmp_in = numpy.zeros(input.shape, bool)
if return_value is None:
tmp_out = output
else:
tmp_out = numpy.zeros(input.shape, bool)
if not iterations & 1:
tmp_in, tmp_out = tmp_out, tmp_in
changed = _nd_image.binary_erosion(input, structure, mask,
tmp_out, border_value, origin, invert, cit, 0)
ii = 1
while (ii < iterations) or (iterations < 1) and changed:
tmp_in, tmp_out = tmp_out, tmp_in
changed = _nd_image.binary_erosion(tmp_in, structure, mask,
tmp_out, border_value, origin, invert, cit, 0)
ii += 1
if return_value is not None:
return tmp_out
def binary_erosion(input, structure=None, iterations=1, mask=None,
output=None, border_value=0, origin=0, brute_force=False):
"""
Multi-dimensional binary erosion with a given structuring element.
Binary erosion is a mathematical morphology operation used for image
processing.
Parameters
----------
input : array_like
Binary image to be eroded. Non-zero (True) elements form
the subset to be eroded.
structure : array_like, optional
Structuring element used for the erosion. Non-zero elements are
considered True. If no structuring element is provided, an element
is generated with a square connectivity equal to one.
iterations : {int, float}, optional
The erosion is repeated `iterations` times (one, by default).
If iterations is less than 1, the erosion is repeated until the
result does not change anymore.
mask : array_like, optional
If a mask is given, only those elements with a True value at
the corresponding mask element are modified at each iteration.
output : ndarray, optional
Array of the same shape as input, into which the output is placed.
By default, a new array is created.
origin : int or tuple of ints, optional
Placement of the filter, by default 0.
border_value : int (cast to 0 or 1), optional
Value at the border in the output array.
Returns
-------
binary_erosion : ndarray of bools
Erosion of the input by the structuring element.
See also
--------
grey_erosion, binary_dilation, binary_closing, binary_opening,
generate_binary_structure
Notes
-----
Erosion [1]_ is a mathematical morphology operation [2]_ that uses a
structuring element for shrinking the shapes in an image. The binary
erosion of an image by a structuring element is the locus of the points
where a superimposition of the structuring element centered on the point
is entirely contained in the set of non-zero elements of the image.
References
----------
.. [1] http://en.wikipedia.org/wiki/Erosion_%28morphology%29
.. [2] http://en.wikipedia.org/wiki/Mathematical_morphology
Examples
--------
>>> from scipy import ndimage
>>> a = np.zeros((7,7), dtype=int)
>>> a[1:6, 2:5] = 1
>>> a
array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0]])
>>> ndimage.binary_erosion(a).astype(a.dtype)
array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]])
>>> #Erosion removes objects smaller than the structure
>>> ndimage.binary_erosion(a, structure=np.ones((5,5))).astype(a.dtype)
array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]])
"""
return _binary_erosion(input, structure, iterations, mask,
output, border_value, origin, 0, brute_force)
def binary_dilation(input, structure=None, iterations=1, mask=None,
output=None, border_value=0, origin=0, brute_force=False):
"""
Multi-dimensional binary dilation with the given structuring element.
Parameters
----------
input : array_like
Binary array_like to be dilated. Non-zero (True) elements form
the subset to be dilated.
structure : array_like, optional
Structuring element used for the dilation. Non-zero elements are
considered True. If no structuring element is provided an element
is generated with a square connectivity equal to one.
iterations : {int, float}, optional
The dilation is repeated `iterations` times (one, by default).
If iterations is less than 1, the dilation is repeated until the
result does not change anymore.
mask : array_like, optional
If a mask is given, only those elements with a True value at
the corresponding mask element are modified at each iteration.
output : ndarray, optional
Array of the same shape as input, into which the output is placed.
By default, a new array is created.
origin : int or tuple of ints, optional
Placement of the filter, by default 0.
border_value : int (cast to 0 or 1), optional
Value at the border in the output array.
Returns
-------
binary_dilation : ndarray of bools
Dilation of the input by the structuring element.
See also
--------
grey_dilation, binary_erosion, binary_closing, binary_opening,
generate_binary_structure
Notes
-----
Dilation [1]_ is a mathematical morphology operation [2]_ that uses a
structuring element for expanding the shapes in an image. The binary
dilation of an image by a structuring element is the locus of the points
covered by the structuring element, when its center lies within the
non-zero points of the image.
References
----------
.. [1] http://en.wikipedia.org/wiki/Dilation_%28morphology%29
.. [2] http://en.wikipedia.org/wiki/Mathematical_morphology
Examples
--------
>>> from scipy import ndimage
>>> a = np.zeros((5, 5))
>>> a[2, 2] = 1
>>> a
array([[ 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0.],
[ 0., 0., 1., 0., 0.],
[ 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0.]])
>>> ndimage.binary_dilation(a)
array([[False, False, False, False, False],
[False, False, True, False, False],
[False, True, True, True, False],
[False, False, True, False, False],
[False, False, False, False, False]], dtype=bool)
>>> ndimage.binary_dilation(a).astype(a.dtype)
array([[ 0., 0., 0., 0., 0.],
[ 0., 0., 1., 0., 0.],
[ 0., 1., 1., 1., 0.],
[ 0., 0., 1., 0., 0.],
[ 0., 0., 0., 0., 0.]])
>>> # 3x3 structuring element with connectivity 1, used by default
>>> struct1 = ndimage.generate_binary_structure(2, 1)
>>> struct1
array([[False, True, False],
[ True, True, True],
[False, True, False]], dtype=bool)
>>> # 3x3 structuring element with connectivity 2
>>> struct2 = ndimage.generate_binary_structure(2, 2)
>>> struct2
array([[ True, True, True],
[ True, True, True],
[ True, True, True]], dtype=bool)
>>> ndimage.binary_dilation(a, structure=struct1).astype(a.dtype)
array([[ 0., 0., 0., 0., 0.],
[ 0., 0., 1., 0., 0.],
[ 0., 1., 1., 1., 0.],
[ 0., 0., 1., 0., 0.],
[ 0., 0., 0., 0., 0.]])
>>> ndimage.binary_dilation(a, structure=struct2).astype(a.dtype)
array([[ 0., 0., 0., 0., 0.],
[ 0., 1., 1., 1., 0.],
[ 0., 1., 1., 1., 0.],
[ 0., 1., 1., 1., 0.],
[ 0., 0., 0., 0., 0.]])
>>> ndimage.binary_dilation(a, structure=struct1,\\
... iterations=2).astype(a.dtype)
array([[ 0., 0., 1., 0., 0.],
[ 0., 1., 1., 1., 0.],
[ 1., 1., 1., 1., 1.],
[ 0., 1., 1., 1., 0.],
[ 0., 0., 1., 0., 0.]])
"""
input = numpy.asarray(input)
if structure is None:
structure = generate_binary_structure(input.ndim, 1)
origin = _ni_support._normalize_sequence(origin, input.ndim)
structure = numpy.asarray(structure)
structure = structure[tuple([slice(None, None, -1)] *
structure.ndim)]
for ii in range(len(origin)):
origin[ii] = -origin[ii]
if not structure.shape[ii] & 1:
origin[ii] -= 1
return _binary_erosion(input, structure, iterations, mask,
output, border_value, origin, 1, brute_force)
def binary_opening(input, structure=None, iterations=1, output=None,
origin=0):
"""
Multi-dimensional binary opening with the given structuring element.
The *opening* of an input image by a structuring element is the
*dilation* of the *erosion* of the image by the structuring element.
Parameters
----------
input : array_like
Binary array_like to be opened. Non-zero (True) elements form
the subset to be opened.
structure : array_like, optional
Structuring element used for the opening. Non-zero elements are
considered True. If no structuring element is provided an element
is generated with a square connectivity equal to one (i.e., only
nearest neighbors are connected to the center, diagonally-connected
elements are not considered neighbors).
iterations : {int, float}, optional
The erosion step of the opening, then the dilation step are each
repeated `iterations` times (one, by default). If `iterations` is
less than 1, each operation is repeated until the result does
not change anymore.
output : ndarray, optional
Array of the same shape as input, into which the output is placed.
By default, a new array is created.
origin : int or tuple of ints, optional
Placement of the filter, by default 0.
Returns
-------
binary_opening : ndarray of bools
Opening of the input by the structuring element.
See also
--------
grey_opening, binary_closing, binary_erosion, binary_dilation,
generate_binary_structure
Notes
-----
*Opening* [1]_ is a mathematical morphology operation [2]_ that
consists in the succession of an erosion and a dilation of the
input with the same structuring element. Opening therefore removes
objects smaller than the structuring element.
Together with *closing* (`binary_closing`), opening can be used for
noise removal.
References
----------
.. [1] http://en.wikipedia.org/wiki/Opening_%28morphology%29
.. [2] http://en.wikipedia.org/wiki/Mathematical_morphology
Examples
--------
>>> from scipy import ndimage
>>> a = np.zeros((5,5), dtype=int)
>>> a[1:4, 1:4] = 1; a[4, 4] = 1
>>> a
array([[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 1]])
>>> # Opening removes small objects
>>> ndimage.binary_opening(a, structure=np.ones((3,3))).astype(int)
array([[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]])
>>> # Opening can also smooth corners
>>> ndimage.binary_opening(a).astype(int)
array([[0, 0, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 0]])
>>> # Opening is the dilation of the erosion of the input
>>> ndimage.binary_erosion(a).astype(int)
array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]])
>>> ndimage.binary_dilation(ndimage.binary_erosion(a)).astype(int)
array([[0, 0, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 0]])
"""
input = numpy.asarray(input)
if structure is None:
rank = input.ndim
structure = generate_binary_structure(rank, 1)
tmp = binary_erosion(input, structure, iterations, None, None, 0,
origin)
return binary_dilation(tmp, structure, iterations, None, output, 0,
origin)
def binary_closing(input, structure=None, iterations=1, output=None,
origin=0):
"""
Multi-dimensional binary closing with the given structuring element.
The *closing* of an input image by a structuring element is the
*erosion* of the *dilation* of the image by the structuring element.
Parameters
----------
input : array_like
Binary array_like to be closed. Non-zero (True) elements form
the subset to be closed.
structure : array_like, optional
Structuring element used for the closing. Non-zero elements are
considered True. If no structuring element is provided an element
is generated with a square connectivity equal to one (i.e., only
nearest neighbors are connected to the center, diagonally-connected
elements are not considered neighbors).
iterations : {int, float}, optional
The dilation step of the closing, then the erosion step are each
repeated `iterations` times (one, by default). If iterations is
less than 1, each operations is repeated until the result does
not change anymore.
output : ndarray, optional
Array of the same shape as input, into which the output is placed.
By default, a new array is created.
origin : int or tuple of ints, optional
Placement of the filter, by default 0.
Returns
-------
binary_closing : ndarray of bools
Closing of the input by the structuring element.
See also
--------
grey_closing, binary_opening, binary_dilation, binary_erosion,
generate_binary_structure
Notes
-----
*Closing* [1]_ is a mathematical morphology operation [2]_ that
consists in the succession of a dilation and an erosion of the
input with the same structuring element. Closing therefore fills
holes smaller than the structuring element.
Together with *opening* (`binary_opening`), closing can be used for
noise removal.
References
----------
.. [1] http://en.wikipedia.org/wiki/Closing_%28morphology%29
.. [2] http://en.wikipedia.org/wiki/Mathematical_morphology
Examples
--------
>>> from scipy import ndimage
>>> a = np.zeros((5,5), dtype=int)
>>> a[1:-1, 1:-1] = 1; a[2,2] = 0
>>> a
array([[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 0, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]])
>>> # Closing removes small holes
>>> ndimage.binary_closing(a).astype(int)
array([[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]])
>>> # Closing is the erosion of the dilation of the input
>>> ndimage.binary_dilation(a).astype(int)
array([[0, 1, 1, 1, 0],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[0, 1, 1, 1, 0]])
>>> ndimage.binary_erosion(ndimage.binary_dilation(a)).astype(int)
array([[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]])
>>> a = np.zeros((7,7), dtype=int)
>>> a[1:6, 2:5] = 1; a[1:3,3] = 0
>>> a
array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0]])
>>> # In addition to removing holes, closing can also
>>> # coarsen boundaries with fine hollows.
>>> ndimage.binary_closing(a).astype(int)
array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0]])
>>> ndimage.binary_closing(a, structure=np.ones((2,2))).astype(int)
array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0]])
"""
input = numpy.asarray(input)
if structure is None:
rank = input.ndim
structure = generate_binary_structure(rank, 1)
tmp = binary_dilation(input, structure, iterations, None, None, 0,
origin)
return binary_erosion(tmp, structure, iterations, None, output, 0,
origin)
def binary_hit_or_miss(input, structure1=None, structure2=None,
output=None, origin1=0, origin2=None):
"""
Multi-dimensional binary hit-or-miss transform.
The hit-or-miss transform finds the locations of a given pattern
inside the input image.
Parameters
----------
input : array_like (cast to booleans)
Binary image where a pattern is to be detected.
structure1 : array_like (cast to booleans), optional
Part of the structuring element to be fitted to the foreground
(non-zero elements) of `input`. If no value is provided, a
structure of square connectivity 1 is chosen.
structure2 : array_like (cast to booleans), optional
Second part of the structuring element that has to miss completely
the foreground. If no value is provided, the complementary of
`structure1` is taken.
output : ndarray, optional
Array of the same shape as input, into which the output is placed.
By default, a new array is created.
origin1 : int or tuple of ints, optional
Placement of the first part of the structuring element `structure1`,
by default 0 for a centered structure.
origin2 : int or tuple of ints, optional
Placement of the second part of the structuring element `structure2`,
by default 0 for a centered structure. If a value is provided for
`origin1` and not for `origin2`, then `origin2` is set to `origin1`.
Returns
-------
binary_hit_or_miss : ndarray
Hit-or-miss transform of `input` with the given structuring
element (`structure1`, `structure2`).
See also
--------
ndimage.morphology, binary_erosion
References
----------
.. [1] http://en.wikipedia.org/wiki/Hit-or-miss_transform
Examples
--------
>>> from scipy import ndimage
>>> a = np.zeros((7,7), dtype=int)
>>> a[1, 1] = 1; a[2:4, 2:4] = 1; a[4:6, 4:6] = 1
>>> a
array([[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0]])
>>> structure1 = np.array([[1, 0, 0], [0, 1, 1], [0, 1, 1]])
>>> structure1
array([[1, 0, 0],
[0, 1, 1],
[0, 1, 1]])
>>> # Find the matches of structure1 in the array a
>>> ndimage.binary_hit_or_miss(a, structure1=structure1).astype(int)
array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]])
>>> # Change the origin of the filter
>>> # origin1=1 is equivalent to origin1=(1,1) here
>>> ndimage.binary_hit_or_miss(a, structure1=structure1,\\
... origin1=1).astype(int)
array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0]])
"""
input = numpy.asarray(input)
if structure1 is None:
structure1 = generate_binary_structure(input.ndim, 1)
if structure2 is None:
structure2 = numpy.logical_not(structure1)
origin1 = _ni_support._normalize_sequence(origin1, input.ndim)
if origin2 is None:
origin2 = origin1
else:
origin2 = _ni_support._normalize_sequence(origin2, input.ndim)
tmp1 = _binary_erosion(input, structure1, 1, None, None, 0, origin1,
0, False)
inplace = isinstance(output, numpy.ndarray)
result = _binary_erosion(input, structure2, 1, None, output, 0,
origin2, 1, False)
if inplace:
numpy.logical_not(output, output)
numpy.logical_and(tmp1, output, output)
else:
numpy.logical_not(result, result)
return numpy.logical_and(tmp1, result)
def binary_propagation(input, structure=None, mask=None,
output=None, border_value=0, origin=0):
"""
Multi-dimensional binary propagation with the given structuring element.
Parameters
----------
input : array_like
Binary image to be propagated inside `mask`.
structure : array_like, optional
Structuring element used in the successive dilations. The output
may depend on the structuring element, especially if `mask` has
several connex components. If no structuring element is
provided, an element is generated with a squared connectivity equal
to one.
mask : array_like, optional
Binary mask defining the region into which `input` is allowed to
propagate.
output : ndarray, optional
Array of the same shape as input, into which the output is placed.
By default, a new array is created.
border_value : int (cast to 0 or 1), optional
Value at the border in the output array.
origin : int or tuple of ints, optional
Placement of the filter, by default 0.
Returns
-------
binary_propagation : ndarray
Binary propagation of `input` inside `mask`.
Notes
-----
This function is functionally equivalent to calling binary_dilation
with the number of iterations less then one: iterative dilation until
the result does not change anymore.
The succession of an erosion and propagation inside the original image
can be used instead of an *opening* for deleting small objects while
keeping the contours of larger objects untouched.
References
----------
.. [1] http://cmm.ensmp.fr/~serra/cours/pdf/en/ch6en.pdf, slide 15.
.. [2] http://www.qi.tnw.tudelft.nl/Courses/FIP/noframes/fip-Morpholo.html#Heading102
Examples
--------
>>> from scipy import ndimage
>>> input = np.zeros((8, 8), dtype=int)
>>> input[2, 2] = 1
>>> mask = np.zeros((8, 8), dtype=int)
>>> mask[1:4, 1:4] = mask[4, 4] = mask[6:8, 6:8] = 1
>>> input
array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]])
>>> mask
array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 0, 1, 1]])
>>> ndimage.binary_propagation(input, mask=mask).astype(int)
array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]])
>>> ndimage.binary_propagation(input, mask=mask,\\
... structure=np.ones((3,3))).astype(int)
array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]])
>>> # Comparison between opening and erosion+propagation
>>> a = np.zeros((6,6), dtype=int)
>>> a[2:5, 2:5] = 1; a[0, 0] = 1; a[5, 5] = 1
>>> a
array([[1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 1]])
>>> ndimage.binary_opening(a).astype(int)
array([[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 1, 1, 1, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0]])
>>> b = ndimage.binary_erosion(a)
>>> b.astype(int)
array([[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]])
>>> ndimage.binary_propagation(b, mask=a).astype(int)
array([[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0]])
"""
return binary_dilation(input, structure, -1, mask, output,
border_value, origin)
def binary_fill_holes(input, structure=None, output=None, origin=0):
"""
Fill the holes in binary objects.
Parameters
----------
input : array_like
n-dimensional binary array with holes to be filled
structure : array_like, optional
Structuring element used in the computation; large-size elements
make computations faster but may miss holes separated from the
background by thin regions. The default element (with a square
connectivity equal to one) yields the intuitive result where all
holes in the input have been filled.
output : ndarray, optional
Array of the same shape as input, into which the output is placed.
By default, a new array is created.
origin : int, tuple of ints, optional
Position of the structuring element.
Returns
-------
out : ndarray
Transformation of the initial image `input` where holes have been
filled.
See also
--------
binary_dilation, binary_propagation, label
Notes
-----
The algorithm used in this function consists in invading the complementary
of the shapes in `input` from the outer boundary of the image,
using binary dilations. Holes are not connected to the boundary and are
therefore not invaded. The result is the complementary subset of the
invaded region.
References
----------
.. [1] http://en.wikipedia.org/wiki/Mathematical_morphology
Examples
--------
>>> from scipy import ndimage
>>> a = np.zeros((5, 5), dtype=int)
>>> a[1:4, 1:4] = 1
>>> a[2,2] = 0
>>> a
array([[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 0, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]])
>>> ndimage.binary_fill_holes(a).astype(int)
array([[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]])
>>> # Too big structuring element
>>> ndimage.binary_fill_holes(a, structure=np.ones((5,5))).astype(int)
array([[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 0, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]])
"""
mask = numpy.logical_not(input)
tmp = numpy.zeros(mask.shape, bool)
inplace = isinstance(output, numpy.ndarray)
if inplace:
binary_dilation(tmp, structure, -1, mask, output, 1, origin)
numpy.logical_not(output, output)
else:
output = binary_dilation(tmp, structure, -1, mask, None, 1,
origin)
numpy.logical_not(output, output)
return output
def grey_erosion(input, size=None, footprint=None, structure=None,
output=None, mode="reflect", cval=0.0, origin=0):
"""
Calculate a greyscale erosion, using either a structuring element,
or a footprint corresponding to a flat structuring element.
Grayscale erosion is a mathematical morphology operation. For the
simple case of a full and flat structuring element, it can be viewed
as a minimum filter over a sliding window.
Parameters
----------
input : array_like
Array over which the grayscale erosion is to be computed.
size : tuple of ints
Shape of a flat and full structuring element used for the grayscale
erosion. Optional if `footprint` or `structure` is provided.
footprint : array of ints, optional
Positions of non-infinite elements of a flat structuring element
used for the grayscale erosion. Non-zero values give the set of
neighbors of the center over which the minimum is chosen.
structure : array of ints, optional
Structuring element used for the grayscale erosion. `structure`
may be a non-flat structuring element.
output : array, optional
An array used for storing the ouput of the erosion may be provided.
mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional
The `mode` parameter determines how the array borders are
handled, where `cval` is the value when mode is equal to
'constant'. Default is 'reflect'
cval : scalar, optional
Value to fill past edges of input if `mode` is 'constant'. Default
is 0.0.
origin : scalar, optional
The `origin` parameter controls the placement of the filter.
Default 0
Returns
-------
output : ndarray
Grayscale erosion of `input`.
See also
--------
binary_erosion, grey_dilation, grey_opening, grey_closing
generate_binary_structure, ndimage.minimum_filter
Notes
-----
The grayscale erosion of an image input by a structuring element s defined
over a domain E is given by:
(input+s)(x) = min {input(y) - s(x-y), for y in E}
In particular, for structuring elements defined as
s(y) = 0 for y in E, the grayscale erosion computes the minimum of the
input image inside a sliding window defined by E.
Grayscale erosion [1]_ is a *mathematical morphology* operation [2]_.
References
----------
.. [1] http://en.wikipedia.org/wiki/Erosion_%28morphology%29
.. [2] http://en.wikipedia.org/wiki/Mathematical_morphology
Examples
--------
>>> from scipy import ndimage
>>> a = np.zeros((7,7), dtype=int)
>>> a[1:6, 1:6] = 3
>>> a[4,4] = 2; a[2,3] = 1
>>> a
array([[0, 0, 0, 0, 0, 0, 0],
[0, 3, 3, 3, 3, 3, 0],
[0, 3, 3, 1, 3, 3, 0],
[0, 3, 3, 3, 3, 3, 0],
[0, 3, 3, 3, 2, 3, 0],
[0, 3, 3, 3, 3, 3, 0],
[0, 0, 0, 0, 0, 0, 0]])
>>> ndimage.grey_erosion(a, size=(3,3))
array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 3, 2, 2, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]])
>>> footprint = ndimage.generate_binary_structure(2, 1)
>>> footprint
array([[False, True, False],
[ True, True, True],
[False, True, False]], dtype=bool)
>>> # Diagonally-connected elements are not considered neighbors
>>> ndimage.grey_erosion(a, size=(3,3), footprint=footprint)
array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 3, 1, 2, 0, 0],
[0, 0, 3, 2, 2, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]])
"""
if size is None and footprint is None and structure is None:
raise ValueError("size, footprint or structure must be specified")
return filters._min_or_max_filter(input, size, footprint, structure,
output, mode, cval, origin, 1)
def grey_dilation(input, size=None, footprint=None, structure=None,
output=None, mode="reflect", cval=0.0, origin=0):
"""
Calculate a greyscale dilation, using either a structuring element,
or a footprint corresponding to a flat structuring element.
Grayscale dilation is a mathematical morphology operation. For the
simple case of a full and flat structuring element, it can be viewed
as a maximum filter over a sliding window.
Parameters
----------
input : array_like
Array over which the grayscale dilation is to be computed.
size : tuple of ints
Shape of a flat and full structuring element used for the grayscale
dilation. Optional if `footprint` or `structure` is provided.
footprint : array of ints, optional
Positions of non-infinite elements of a flat structuring element
used for the grayscale dilation. Non-zero values give the set of
neighbors of the center over which the maximum is chosen.
structure : array of ints, optional
Structuring element used for the grayscale dilation. `structure`
may be a non-flat structuring element.
output : array, optional
An array used for storing the ouput of the dilation may be provided.
mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional
The `mode` parameter determines how the array borders are
handled, where `cval` is the value when mode is equal to
'constant'. Default is 'reflect'
cval : scalar, optional
Value to fill past edges of input if `mode` is 'constant'. Default
is 0.0.
origin : scalar, optional
The `origin` parameter controls the placement of the filter.
Default 0
Returns
-------
grey_dilation : ndarray
Grayscale dilation of `input`.
See also
--------
binary_dilation, grey_erosion, grey_closing, grey_opening
generate_binary_structure, ndimage.maximum_filter
Notes
-----
The grayscale dilation of an image input by a structuring element s defined
over a domain E is given by:
(input+s)(x) = max {input(y) + s(x-y), for y in E}
In particular, for structuring elements defined as
s(y) = 0 for y in E, the grayscale dilation computes the maximum of the
input image inside a sliding window defined by E.
Grayscale dilation [1]_ is a *mathematical morphology* operation [2]_.
References
----------
.. [1] http://en.wikipedia.org/wiki/Dilation_%28morphology%29
.. [2] http://en.wikipedia.org/wiki/Mathematical_morphology
Examples
--------
>>> from scipy import ndimage
>>> a = np.zeros((7,7), dtype=int)
>>> a[2:5, 2:5] = 1
>>> a[4,4] = 2; a[2,3] = 3
>>> a
array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 3, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 2, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]])
>>> ndimage.grey_dilation(a, size=(3,3))
array([[0, 0, 0, 0, 0, 0, 0],
[0, 1, 3, 3, 3, 1, 0],
[0, 1, 3, 3, 3, 1, 0],
[0, 1, 3, 3, 3, 2, 0],
[0, 1, 1, 2, 2, 2, 0],
[0, 1, 1, 2, 2, 2, 0],
[0, 0, 0, 0, 0, 0, 0]])
>>> ndimage.grey_dilation(a, footprint=np.ones((3,3)))
array([[0, 0, 0, 0, 0, 0, 0],
[0, 1, 3, 3, 3, 1, 0],
[0, 1, 3, 3, 3, 1, 0],
[0, 1, 3, 3, 3, 2, 0],
[0, 1, 1, 2, 2, 2, 0],
[0, 1, 1, 2, 2, 2, 0],
[0, 0, 0, 0, 0, 0, 0]])
>>> s = ndimage.generate_binary_structure(2,1)
>>> s
array([[False, True, False],
[ True, True, True],
[False, True, False]], dtype=bool)
>>> ndimage.grey_dilation(a, footprint=s)
array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 3, 1, 0, 0],
[0, 1, 3, 3, 3, 1, 0],
[0, 1, 1, 3, 2, 1, 0],
[0, 1, 1, 2, 2, 2, 0],
[0, 0, 1, 1, 2, 0, 0],
[0, 0, 0, 0, 0, 0, 0]])
>>> ndimage.grey_dilation(a, size=(3,3), structure=np.ones((3,3)))
array([[1, 1, 1, 1, 1, 1, 1],
[1, 2, 4, 4, 4, 2, 1],
[1, 2, 4, 4, 4, 2, 1],
[1, 2, 4, 4, 4, 3, 1],
[1, 2, 2, 3, 3, 3, 1],
[1, 2, 2, 3, 3, 3, 1],
[1, 1, 1, 1, 1, 1, 1]])
"""
if size is None and footprint is None and structure is None:
raise ValueError("size, footprint or structure must be specified")
if structure is not None:
structure = numpy.asarray(structure)
structure = structure[tuple([slice(None, None, -1)] *
structure.ndim)]
if footprint is not None:
footprint = numpy.asarray(footprint)
footprint = footprint[tuple([slice(None, None, -1)] *
footprint.ndim)]
input = numpy.asarray(input)
origin = _ni_support._normalize_sequence(origin, input.ndim)
for ii in range(len(origin)):
origin[ii] = -origin[ii]
if footprint is not None:
sz = footprint.shape[ii]
elif structure is not None:
sz = structure.shape[ii]
elif numpy.isscalar(size):
sz = size
else:
sz = size[ii]
if not sz & 1:
origin[ii] -= 1
return filters._min_or_max_filter(input, size, footprint, structure,
output, mode, cval, origin, 0)
def grey_opening(input, size=None, footprint=None, structure=None,
output=None, mode="reflect", cval=0.0, origin=0):
"""
Multi-dimensional greyscale opening.
A greyscale opening consists in the succession of a greyscale erosion,
and a greyscale dilation.
Parameters
----------
input : array_like
Array over which the grayscale opening is to be computed.
size : tuple of ints
Shape of a flat and full structuring element used for the grayscale
opening. Optional if `footprint` or `structure` is provided.
footprint : array of ints, optional
Positions of non-infinite elements of a flat structuring element
used for the grayscale opening.
structure : array of ints, optional
Structuring element used for the grayscale opening. `structure`
may be a non-flat structuring element.
output : array, optional
An array used for storing the ouput of the opening may be provided.
mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
The `mode` parameter determines how the array borders are
handled, where `cval` is the value when mode is equal to
'constant'. Default is 'reflect'
cval : scalar, optional
Value to fill past edges of input if `mode` is 'constant'. Default
is 0.0.
origin : scalar, optional
The `origin` parameter controls the placement of the filter.
Default 0
Returns
-------
grey_opening : ndarray
Result of the grayscale opening of `input` with `structure`.
See also
--------
binary_opening, grey_dilation, grey_erosion, grey_closing
generate_binary_structure
Notes
-----
The action of a grayscale opening with a flat structuring element amounts
to smoothen high local maxima, whereas binary opening erases small objects.
References
----------
.. [1] http://en.wikipedia.org/wiki/Mathematical_morphology
Examples
--------
>>> from scipy import ndimage
>>> a = np.arange(36).reshape((6,6))
>>> a[3, 3] = 50
>>> a
array([[ 0, 1, 2, 3, 4, 5],
[ 6, 7, 8, 9, 10, 11],
[12, 13, 14, 15, 16, 17],
[18, 19, 20, 50, 22, 23],
[24, 25, 26, 27, 28, 29],
[30, 31, 32, 33, 34, 35]])
>>> ndimage.grey_opening(a, size=(3,3))
array([[ 0, 1, 2, 3, 4, 4],
[ 6, 7, 8, 9, 10, 10],
[12, 13, 14, 15, 16, 16],
[18, 19, 20, 22, 22, 22],
[24, 25, 26, 27, 28, 28],
[24, 25, 26, 27, 28, 28]])
>>> # Note that the local maximum a[3,3] has disappeared
"""
tmp = grey_erosion(input, size, footprint, structure, None, mode,
cval, origin)
return grey_dilation(tmp, size, footprint, structure, output, mode,
cval, origin)
def grey_closing(input, size=None, footprint=None, structure=None,
output=None, mode="reflect", cval=0.0, origin=0):
"""
Multi-dimensional greyscale closing.
A greyscale closing consists in the succession of a greyscale dilation,
and a greyscale erosion.
Parameters
----------
input : array_like
Array over which the grayscale closing is to be computed.
size : tuple of ints
Shape of a flat and full structuring element used for the grayscale
closing. Optional if `footprint` or `structure` is provided.
footprint : array of ints, optional
Positions of non-infinite elements of a flat structuring element
used for the grayscale closing.
structure : array of ints, optional
Structuring element used for the grayscale closing. `structure`
may be a non-flat structuring element.
output : array, optional
An array used for storing the ouput of the closing may be provided.
mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
The `mode` parameter determines how the array borders are
handled, where `cval` is the value when mode is equal to
'constant'. Default is 'reflect'
cval : scalar, optional
Value to fill past edges of input if `mode` is 'constant'. Default
is 0.0.
origin : scalar, optional
The `origin` parameter controls the placement of the filter.
Default 0
Returns
-------
grey_closing : ndarray
Result of the grayscale closing of `input` with `structure`.
See also
--------
binary_closing, grey_dilation, grey_erosion, grey_opening,
generate_binary_structure
Notes
-----
The action of a grayscale closing with a flat structuring element amounts
to smoothen deep local minima, whereas binary closing fills small holes.
References
----------
.. [1] http://en.wikipedia.org/wiki/Mathematical_morphology
Examples
--------
>>> from scipy import ndimage
>>> a = np.arange(36).reshape((6,6))
>>> a[3,3] = 0
>>> a
array([[ 0, 1, 2, 3, 4, 5],
[ 6, 7, 8, 9, 10, 11],
[12, 13, 14, 15, 16, 17],
[18, 19, 20, 0, 22, 23],
[24, 25, 26, 27, 28, 29],
[30, 31, 32, 33, 34, 35]])
>>> ndimage.grey_closing(a, size=(3,3))
array([[ 7, 7, 8, 9, 10, 11],
[ 7, 7, 8, 9, 10, 11],
[13, 13, 14, 15, 16, 17],
[19, 19, 20, 20, 22, 23],
[25, 25, 26, 27, 28, 29],
[31, 31, 32, 33, 34, 35]])
>>> # Note that the local minimum a[3,3] has disappeared
"""
tmp = grey_dilation(input, size, footprint, structure, None, mode,
cval, origin)
return grey_erosion(tmp, size, footprint, structure, output, mode,
cval, origin)
def morphological_gradient(input, size=None, footprint=None,
structure=None, output=None, mode="reflect",
cval=0.0, origin=0):
"""
Multi-dimensional morphological gradient.
The morphological gradient is calculated as the difference between a
dilation and an erosion of the input with a given structuring element.
Parameters
----------
input : array_like
Array over which to compute the morphlogical gradient.
size : tuple of ints
Shape of a flat and full structuring element used for the mathematical
morphology operations. Optional if `footprint` or `structure` is
provided. A larger `size` yields a more blurred gradient.
footprint : array of ints, optional
Positions of non-infinite elements of a flat structuring element
used for the morphology operations. Larger footprints
give a more blurred morphological gradient.
structure : array of ints, optional
Structuring element used for the morphology operations.
`structure` may be a non-flat structuring element.
output : array, optional
An array used for storing the ouput of the morphological gradient
may be provided.
mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
The `mode` parameter determines how the array borders are
handled, where `cval` is the value when mode is equal to
'constant'. Default is 'reflect'
cval : scalar, optional
Value to fill past edges of input if `mode` is 'constant'. Default
is 0.0.
origin : scalar, optional
The `origin` parameter controls the placement of the filter.
Default 0
Returns
-------
morphological_gradient : ndarray
Morphological gradient of `input`.
See also
--------
grey_dilation, grey_erosion, ndimage.gaussian_gradient_magnitude
Notes
-----
For a flat structuring element, the morphological gradient
computed at a given point corresponds to the maximal difference
between elements of the input among the elements covered by the
structuring element centered on the point.
References
----------
.. [1] http://en.wikipedia.org/wiki/Mathematical_morphology
Examples
--------
>>> from scipy import ndimage
>>> a = np.zeros((7,7), dtype=int)
>>> a[2:5, 2:5] = 1
>>> ndimage.morphological_gradient(a, size=(3,3))
array([[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 0, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0]])
>>> # The morphological gradient is computed as the difference
>>> # between a dilation and an erosion
>>> ndimage.grey_dilation(a, size=(3,3)) -\\
... ndimage.grey_erosion(a, size=(3,3))
array([[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 0, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0]])
>>> a = np.zeros((7,7), dtype=int)
>>> a[2:5, 2:5] = 1
>>> a[4,4] = 2; a[2,3] = 3
>>> a
array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 3, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 2, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]])
>>> ndimage.morphological_gradient(a, size=(3,3))
array([[0, 0, 0, 0, 0, 0, 0],
[0, 1, 3, 3, 3, 1, 0],
[0, 1, 3, 3, 3, 1, 0],
[0, 1, 3, 2, 3, 2, 0],
[0, 1, 1, 2, 2, 2, 0],
[0, 1, 1, 2, 2, 2, 0],
[0, 0, 0, 0, 0, 0, 0]])
"""
tmp = grey_dilation(input, size, footprint, structure, None, mode,
cval, origin)
if isinstance(output, numpy.ndarray):
grey_erosion(input, size, footprint, structure, output, mode,
cval, origin)
return numpy.subtract(tmp, output, output)
else:
return (tmp - grey_erosion(input, size, footprint, structure,
None, mode, cval, origin))
def morphological_laplace(input, size=None, footprint=None,
structure=None, output=None,
mode="reflect", cval=0.0, origin=0):
"""
Multi-dimensional morphological laplace.
Parameters
----------
input : array_like
Input.
size : int or sequence of ints, optional
See `structure`.
footprint : bool or ndarray, optional
See `structure`.
structure : structure, optional
Either `size`, `footprint`, or the `structure` must be provided.
output : ndarray, optional
An output array can optionally be provided.
mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional
The mode parameter determines how the array borders are handled.
For 'constant' mode, values beyond borders are set to be `cval`.
Default is 'reflect'.
cval : scalar, optional
Value to fill past edges of input if mode is 'constant'.
Default is 0.0
origin : origin, optional
The origin parameter controls the placement of the filter.
Returns
-------
morphological_laplace : ndarray
Output
"""
tmp1 = grey_dilation(input, size, footprint, structure, None, mode,
cval, origin)
if isinstance(output, numpy.ndarray):
grey_erosion(input, size, footprint, structure, output, mode,
cval, origin)
numpy.add(tmp1, output, output)
numpy.subtract(output, input, output)
return numpy.subtract(output, input, output)
else:
tmp2 = grey_erosion(input, size, footprint, structure, None, mode,
cval, origin)
numpy.add(tmp1, tmp2, tmp2)
numpy.subtract(tmp2, input, tmp2)
numpy.subtract(tmp2, input, tmp2)
return tmp2
def white_tophat(input, size=None, footprint=None, structure=None,
output=None, mode="reflect", cval=0.0, origin=0):
"""
Multi-dimensional white tophat filter.
Parameters
----------
input : array_like
Input.
size : tuple of ints
Shape of a flat and full structuring element used for the filter.
Optional if `footprint` or `structure` is provided.
footprint : array of ints, optional
Positions of elements of a flat structuring element
used for the white tophat filter.
structure : array of ints, optional
Structuring element used for the filter. `structure`
may be a non-flat structuring element.
output : array, optional
An array used for storing the output of the filter may be provided.
mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
The `mode` parameter determines how the array borders are
handled, where `cval` is the value when mode is equal to
'constant'. Default is 'reflect'
cval : scalar, optional
Value to fill past edges of input if `mode` is 'constant'.
Default is 0.0.
origin : scalar, optional
The `origin` parameter controls the placement of the filter.
Default is 0.
Returns
-------
output : ndarray
Result of the filter of `input` with `structure`.
See also
--------
black_tophat
"""
tmp = grey_erosion(input, size, footprint, structure, None, mode,
cval, origin)
if isinstance(output, numpy.ndarray):
grey_dilation(tmp, size, footprint, structure, output, mode, cval,
origin)
return numpy.subtract(input, output, output)
else:
tmp = grey_dilation(tmp, size, footprint, structure, None, mode,
cval, origin)
return input - tmp
def black_tophat(input, size=None, footprint=None,
structure=None, output=None, mode="reflect",
cval=0.0, origin=0):
"""
Multi-dimensional black tophat filter.
Parameters
----------
input : array_like
Input.
size : tuple of ints, optional
Shape of a flat and full structuring element used for the filter.
Optional if `footprint` or `structure` is provided.
footprint : array of ints, optional
Positions of non-infinite elements of a flat structuring element
used for the black tophat filter.
structure : array of ints, optional
Structuring element used for the filter. `structure`
may be a non-flat structuring element.
output : array, optional
An array used for storing the output of the filter may be provided.
mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
The `mode` parameter determines how the array borders are
handled, where `cval` is the value when mode is equal to
'constant'. Default is 'reflect'
cval : scalar, optional
Value to fill past edges of input if `mode` is 'constant'. Default
is 0.0.
origin : scalar, optional
The `origin` parameter controls the placement of the filter.
Default 0
Returns
-------
black_tophat : ndarray
Result of the filter of `input` with `structure`.
See also
--------
white_tophat, grey_opening, grey_closing
"""
tmp = grey_dilation(input, size, footprint, structure, None, mode,
cval, origin)
if isinstance(output, numpy.ndarray):
grey_erosion(tmp, size, footprint, structure, output, mode, cval,
origin)
return numpy.subtract(output, input, output)
else:
tmp = grey_erosion(tmp, size, footprint, structure, None, mode,
cval, origin)
return tmp - input
def distance_transform_bf(input, metric="euclidean", sampling=None,
return_distances=True, return_indices=False,
distances=None, indices=None):
"""
Distance transform function by a brute force algorithm.
This function calculates the distance transform of the `input`, by
replacing each background element (zero values), with its
shortest distance to the foreground (any element non-zero).
In addition to the distance transform, the feature transform can
be calculated. In this case the index of the closest background
element is returned along the first axis of the result.
Parameters
----------
input : array_like
Input
metric : str, optional
Three types of distance metric are supported: 'euclidean', 'taxicab'
and 'chessboard'.
sampling : {int, sequence of ints}, optional
This parameter is only used in the case of the euclidean `metric`
distance transform.
The sampling along each axis can be given by the `sampling` parameter
which should be a sequence of length equal to the input rank, or a
single number in which the `sampling` is assumed to be equal along all
axes.
return_distances : bool, optional
The `return_distances` flag can be used to indicate if the distance
transform is returned.
The default is True.
return_indices : bool, optional
The `return_indices` flags can be used to indicate if the feature
transform is returned.
The default is False.
distances : float64 ndarray, optional
Optional output array to hold distances (if `return_distances` is
True).
indices : int64 ndarray, optional
Optional output array to hold indices (if `return_indices` is True).
Returns
-------
distances : ndarray
Distance array if `return_distances` is True.
indices : ndarray
Indices array if `return_indices` is True.
Notes
-----
This function employs a slow brute force algorithm, see also the
function distance_transform_cdt for more efficient taxicab and
chessboard algorithms.
"""
if (not return_distances) and (not return_indices):
msg = 'at least one of distances/indices must be specified'
raise RuntimeError(msg)
tmp1 = numpy.asarray(input) != 0
struct = generate_binary_structure(tmp1.ndim, tmp1.ndim)
tmp2 = binary_dilation(tmp1, struct)
tmp2 = numpy.logical_xor(tmp1, tmp2)
tmp1 = tmp1.astype(numpy.int8) - tmp2.astype(numpy.int8)
metric = metric.lower()
if metric == 'euclidean':
metric = 1
elif metric in ['taxicab', 'cityblock', 'manhattan']:
metric = 2
elif metric == 'chessboard':
metric = 3
else:
raise RuntimeError('distance metric not supported')
if sampling is not None:
sampling = _ni_support._normalize_sequence(sampling, tmp1.ndim)
sampling = numpy.asarray(sampling, dtype=numpy.float64)
if not sampling.flags.contiguous:
sampling = sampling.copy()
if return_indices:
ft = numpy.zeros(tmp1.shape, dtype=numpy.int32)
else:
ft = None
if return_distances:
if distances is None:
if metric == 1:
dt = numpy.zeros(tmp1.shape, dtype=numpy.float64)
else:
dt = numpy.zeros(tmp1.shape, dtype=numpy.uint32)
else:
if distances.shape != tmp1.shape:
raise RuntimeError('distances array has wrong shape')
if metric == 1:
if distances.dtype.type != numpy.float64:
raise RuntimeError('distances array must be float64')
else:
if distances.dtype.type != numpy.uint32:
raise RuntimeError('distances array must be uint32')
dt = distances
else:
dt = None
_nd_image.distance_transform_bf(tmp1, metric, sampling, dt, ft)
if return_indices:
if isinstance(indices, numpy.ndarray):
if indices.dtype.type != numpy.int32:
raise RuntimeError('indices must of int32 type')
if indices.shape != (tmp1.ndim,) + tmp1.shape:
raise RuntimeError('indices has wrong shape')
tmp2 = indices
else:
tmp2 = numpy.indices(tmp1.shape, dtype=numpy.int32)
ft = numpy.ravel(ft)
for ii in range(tmp2.shape[0]):
rtmp = numpy.ravel(tmp2[ii, ...])[ft]
rtmp.shape = tmp1.shape
tmp2[ii, ...] = rtmp
ft = tmp2
# construct and return the result
result = []
if return_distances and not isinstance(distances, numpy.ndarray):
result.append(dt)
if return_indices and not isinstance(indices, numpy.ndarray):
result.append(ft)
if len(result) == 2:
return tuple(result)
elif len(result) == 1:
return result[0]
else:
return None
def distance_transform_cdt(input, metric='chessboard',
return_distances=True, return_indices=False,
distances=None, indices=None):
"""
Distance transform for chamfer type of transforms.
Parameters
----------
input : array_like
Input
metric : {'chessboard', 'taxicab'}, optional
The `metric` determines the type of chamfering that is done. If the
`metric` is equal to 'taxicab' a structure is generated using
generate_binary_structure with a squared distance equal to 1. If
the `metric` is equal to 'chessboard', a `metric` is generated
using generate_binary_structure with a squared distance equal to
the dimensionality of the array. These choices correspond to the
common interpretations of the 'taxicab' and the 'chessboard'
distance metrics in two dimensions.
The default for `metric` is 'chessboard'.
return_distances, return_indices : bool, optional
The `return_distances`, and `return_indices` flags can be used to
indicate if the distance transform, the feature transform, or both
must be returned.
If the feature transform is returned (``return_indices=True``),
the index of the closest background element is returned along
the first axis of the result.
The `return_distances` default is True, and the
`return_indices` default is False.
distances, indices : ndarrays of int32, optional
The `distances` and `indices` arguments can be used to give optional
output arrays that must be the same shape as `input`.
"""
if (not return_distances) and (not return_indices):
msg = 'at least one of distances/indices must be specified'
raise RuntimeError(msg)
ft_inplace = isinstance(indices, numpy.ndarray)
dt_inplace = isinstance(distances, numpy.ndarray)
input = numpy.asarray(input)
if metric in ['taxicab', 'cityblock', 'manhattan']:
rank = input.ndim
metric = generate_binary_structure(rank, 1)
elif metric == 'chessboard':
rank = input.ndim
metric = generate_binary_structure(rank, rank)
else:
try:
metric = numpy.asarray(metric)
except:
raise RuntimeError('invalid metric provided')
for s in metric.shape:
if s != 3:
raise RuntimeError('metric sizes must be equal to 3')
if not metric.flags.contiguous:
metric = metric.copy()
if dt_inplace:
if distances.dtype.type != numpy.int32:
raise RuntimeError('distances must be of int32 type')
if distances.shape != input.shape:
raise RuntimeError('distances has wrong shape')
dt = distances
dt[...] = numpy.where(input, -1, 0).astype(numpy.int32)
else:
dt = numpy.where(input, -1, 0).astype(numpy.int32)
rank = dt.ndim
if return_indices:
sz = numpy.product(dt.shape, axis=0)
ft = numpy.arange(sz, dtype=numpy.int32)
ft.shape = dt.shape
else:
ft = None
_nd_image.distance_transform_op(metric, dt, ft)
dt = dt[tuple([slice(None, None, -1)] * rank)]
if return_indices:
ft = ft[tuple([slice(None, None, -1)] * rank)]
_nd_image.distance_transform_op(metric, dt, ft)
dt = dt[tuple([slice(None, None, -1)] * rank)]
if return_indices:
ft = ft[tuple([slice(None, None, -1)] * rank)]
ft = numpy.ravel(ft)
if ft_inplace:
if indices.dtype.type != numpy.int32:
raise RuntimeError('indices must of int32 type')
if indices.shape != (dt.ndim,) + dt.shape:
raise RuntimeError('indices has wrong shape')
tmp = indices
else:
tmp = numpy.indices(dt.shape, dtype=numpy.int32)
for ii in range(tmp.shape[0]):
rtmp = numpy.ravel(tmp[ii, ...])[ft]
rtmp.shape = dt.shape
tmp[ii, ...] = rtmp
ft = tmp
# construct and return the result
result = []
if return_distances and not dt_inplace:
result.append(dt)
if return_indices and not ft_inplace:
result.append(ft)
if len(result) == 2:
return tuple(result)
elif len(result) == 1:
return result[0]
else:
return None
def distance_transform_edt(input, sampling=None,
return_distances=True, return_indices=False,
distances=None, indices=None):
"""
Exact euclidean distance transform.
In addition to the distance transform, the feature transform can
be calculated. In this case the index of the closest background
element is returned along the first axis of the result.
Parameters
----------
input : array_like
Input data to transform. Can be any type but will be converted
into binary: 1 wherever input equates to True, 0 elsewhere.
sampling : float or int, or sequence of same, optional
Spacing of elements along each dimension. If a sequence, must be of
length equal to the input rank; if a single number, this is used for
all axes. If not specified, a grid spacing of unity is implied.
return_distances : bool, optional
Whether to return distance matrix. At least one of
return_distances/return_indices must be True. Default is True.
return_indices : bool, optional
Whether to return indices matrix. Default is False.
distances : ndarray, optional
Used for output of distance array, must be of type float64.
indices : ndarray, optional
Used for output of indices, must be of type int32.
Returns
-------
distance_transform_edt : ndarray or list of ndarrays
Either distance matrix, index matrix, or a list of the two,
depending on `return_x` flags and `distance` and `indices`
input parameters.
Notes
-----
The euclidean distance transform gives values of the euclidean
distance::
n
y_i = sqrt(sum (x[i]-b[i])**2)
i
where b[i] is the background point (value 0) with the smallest
Euclidean distance to input points x[i], and n is the
number of dimensions.
Examples
--------
>>> from scipy import ndimage
>>> a = np.array(([0,1,1,1,1],
... [0,0,1,1,1],
... [0,1,1,1,1],
... [0,1,1,1,0],
... [0,1,1,0,0]))
>>> ndimage.distance_transform_edt(a)
array([[ 0. , 1. , 1.4142, 2.2361, 3. ],
[ 0. , 0. , 1. , 2. , 2. ],
[ 0. , 1. , 1.4142, 1.4142, 1. ],
[ 0. , 1. , 1.4142, 1. , 0. ],
[ 0. , 1. , 1. , 0. , 0. ]])
With a sampling of 2 units along x, 1 along y:
>>> ndimage.distance_transform_edt(a, sampling=[2,1])
array([[ 0. , 1. , 2. , 2.8284, 3.6056],
[ 0. , 0. , 1. , 2. , 3. ],
[ 0. , 1. , 2. , 2.2361, 2. ],
[ 0. , 1. , 2. , 1. , 0. ],
[ 0. , 1. , 1. , 0. , 0. ]])
Asking for indices as well:
>>> edt, inds = ndimage.distance_transform_edt(a, return_indices=True)
>>> inds
array([[[0, 0, 1, 1, 3],
[1, 1, 1, 1, 3],
[2, 2, 1, 3, 3],
[3, 3, 4, 4, 3],
[4, 4, 4, 4, 4]],
[[0, 0, 1, 1, 4],
[0, 1, 1, 1, 4],
[0, 0, 1, 4, 4],
[0, 0, 3, 3, 4],
[0, 0, 3, 3, 4]]])
With arrays provided for inplace outputs:
>>> indices = np.zeros(((np.ndim(a),) + a.shape), dtype=np.int32)
>>> ndimage.distance_transform_edt(a, return_indices=True, indices=indices)
array([[ 0. , 1. , 1.4142, 2.2361, 3. ],
[ 0. , 0. , 1. , 2. , 2. ],
[ 0. , 1. , 1.4142, 1.4142, 1. ],
[ 0. , 1. , 1.4142, 1. , 0. ],
[ 0. , 1. , 1. , 0. , 0. ]])
>>> indices
array([[[0, 0, 1, 1, 3],
[1, 1, 1, 1, 3],
[2, 2, 1, 3, 3],
[3, 3, 4, 4, 3],
[4, 4, 4, 4, 4]],
[[0, 0, 1, 1, 4],
[0, 1, 1, 1, 4],
[0, 0, 1, 4, 4],
[0, 0, 3, 3, 4],
[0, 0, 3, 3, 4]]])
"""
if (not return_distances) and (not return_indices):
msg = 'at least one of distances/indices must be specified'
raise RuntimeError(msg)
ft_inplace = isinstance(indices, numpy.ndarray)
dt_inplace = isinstance(distances, numpy.ndarray)
# calculate the feature transform
input = numpy.atleast_1d(numpy.where(input, 1, 0).astype(numpy.int8))
if sampling is not None:
sampling = _ni_support._normalize_sequence(sampling, input.ndim)
sampling = numpy.asarray(sampling, dtype=numpy.float64)
if not sampling.flags.contiguous:
sampling = sampling.copy()
if ft_inplace:
ft = indices
if ft.shape != (input.ndim,) + input.shape:
raise RuntimeError('indices has wrong shape')
if ft.dtype.type != numpy.int32:
raise RuntimeError('indices must be of int32 type')
else:
ft = numpy.zeros((input.ndim,) + input.shape,
dtype=numpy.int32)
_nd_image.euclidean_feature_transform(input, sampling, ft)
# if requested, calculate the distance transform
if return_distances:
dt = ft - numpy.indices(input.shape, dtype=ft.dtype)
dt = dt.astype(numpy.float64)
if sampling is not None:
for ii in range(len(sampling)):
dt[ii, ...] *= sampling[ii]
numpy.multiply(dt, dt, dt)
if dt_inplace:
dt = numpy.add.reduce(dt, axis=0)
if distances.shape != dt.shape:
raise RuntimeError('indices has wrong shape')
if distances.dtype.type != numpy.float64:
raise RuntimeError('indices must be of float64 type')
numpy.sqrt(dt, distances)
else:
dt = numpy.add.reduce(dt, axis=0)
dt = numpy.sqrt(dt)
# construct and return the result
result = []
if return_distances and not dt_inplace:
result.append(dt)
if return_indices and not ft_inplace:
result.append(ft)
if len(result) == 2:
return tuple(result)
elif len(result) == 1:
return result[0]
else:
return None
|
mit
| 3,503,458,796,533,075,000
| 35.862132
| 119
| 0.558495
| false
| 3.609414
| false
| false
| false
|
kuzmoyev/Google-Calendar-Simple-API
|
tests/test_attendee.py
|
1
|
2820
|
from unittest import TestCase
from gcsa.attendee import Attendee, ResponseStatus
from gcsa.serializers.attendee_serializer import AttendeeSerializer
class TestAttendeeSerializer(TestCase):
def test_to_json(self):
attendee = Attendee(
email='mail@gmail.com',
display_name='Guest',
comment='I do not know him',
optional=True,
additional_guests=2,
response_status=ResponseStatus.NEEDS_ACTION
)
attendee_json = AttendeeSerializer.to_json(attendee)
self.assertEqual(attendee.email, attendee_json['email'])
self.assertEqual(attendee.display_name, attendee_json['displayName'])
self.assertEqual(attendee.comment, attendee_json['comment'])
self.assertEqual(attendee.optional, attendee_json['optional'])
self.assertNotIn('resource', attendee_json)
self.assertEqual(attendee.additional_guests, attendee_json['additionalGuests'])
self.assertEqual(attendee.response_status, attendee_json['responseStatus'])
def test_to_object(self):
attendee_json = {
'email': 'mail2@gmail.com',
'displayName': 'Guest2',
'comment': 'I do not know him either',
'optional': True,
'resource': True,
'additionalGuests': 1,
'responseStatus': ResponseStatus.ACCEPTED
}
attendee = AttendeeSerializer.to_object(attendee_json)
self.assertEqual(attendee_json['email'], attendee.email)
self.assertEqual(attendee_json['displayName'], attendee.display_name)
self.assertEqual(attendee_json['comment'], attendee.comment)
self.assertEqual(attendee_json['optional'], attendee.optional)
self.assertEqual(attendee_json['resource'], attendee.is_resource)
self.assertEqual(attendee_json['additionalGuests'], attendee.additional_guests)
self.assertEqual(attendee_json['responseStatus'], attendee.response_status)
attendee_json_str = """{
"email": "mail3@gmail.com",
"displayName": "Guest3",
"comment": "Who are these people?",
"optional": true,
"resource": false,
"additionalGuests": 66,
"responseStatus": "tentative"
}"""
serializer = AttendeeSerializer(attendee_json_str)
attendee = serializer.get_object()
self.assertEqual(attendee.email, "mail3@gmail.com")
self.assertEqual(attendee.display_name, "Guest3")
self.assertEqual(attendee.comment, "Who are these people?")
self.assertEqual(attendee.optional, True)
self.assertEqual(attendee.is_resource, False)
self.assertEqual(attendee.additional_guests, 66)
self.assertEqual(attendee.response_status, "tentative")
|
mit
| -4,699,221,430,244,430,000
| 40.470588
| 87
| 0.648582
| false
| 4.171598
| true
| false
| false
|
ParanoidNemo/twolame
|
cloud.py
|
1
|
1896
|
#! /usr/bin/env python
# Copyright (C) 2015 by Andrea Calzavacca <paranoid.nemo@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import os, sys, re
import configparser
import rc
from spam import clouds
from spam import beshell
from spam import methods
rc_file = os.path.join(beshell.Theme.path(), 'twolamerc')
format_file = os.path.join(beshell.Theme.path(), 'twolame', 'cloud_one.format')
rc.get_rc(rc_file)
css = os.path.join(beshell.Theme.path(), 'style.css.d', rc.CSS)
cloud_info = []
if rc.RCLONE == '1':
parser = configparser.ConfigParser()
rclone_cfg = rc.RCLONE_CONFIG_FILE
read_cfg = parser.read(rclone_cfg)
services_list = []
for item in parser.sections():
service = parser.get(item, 'type')
l = clouds.Rclone.space_info(item, service)
d = methods.create_dict(l)
outstring = methods.insert_data(methods.format_string(format_file), d)
cloud_info.append(outstring)
if rc.MEGA == '1':
l = clouds.Mega.space_info(rc.SIZE)
d = methods.create_dict(l)
outstring = methods.insert_data(methods.format_string(format_file), d)
cloud_info.append(outstring)
info = methods.create_dict(cloud_info)
info['{x}'] = css
|
gpl-3.0
| 2,340,330,728,297,430,000
| 30.6
| 79
| 0.699367
| false
| 3.263339
| false
| false
| false
|
mauriceyap/ccm-assistant
|
tests/test_alexa_main.py
|
1
|
4199
|
import unittest
from mock import patch
import alexa_main
TEST_APPLICATION_ID = "aPpLiCaTiOnId12345"
TEST_REQUEST_ID = "rEqUeStId123"
class TestAlexaMain(unittest.TestCase):
def setUp(self):
alexa_main.config.APPLICATION_ID = TEST_APPLICATION_ID
@patch("alexa_main.events.on_session_started")
@patch("alexa_main.events.on_launch")
def test_lambda_handler_throws_error_with_invalid_session_id(self, on_launch,
on_session_started):
test_invalid_application_id = "iAmAnInvalidId00000"
test_session_event_with_invalid_id = {
'session': {
'application': {
'applicationId': test_invalid_application_id
},
'new': False
},
'request': {
'requestId': TEST_REQUEST_ID,
'type': 'LaunchRequest'
},
'context': {}
}
test_context_only_event_with_invalid_id = {
'request': {},
'context': {
'System': {
'application': {
'applicationId': test_invalid_application_id
}
}
}
}
with self.assertRaises(ValueError) as cm_session_event:
alexa_main.lambda_handler(test_session_event_with_invalid_id, None)
with self.assertRaises(ValueError) as cm_context_event:
alexa_main.lambda_handler(test_context_only_event_with_invalid_id, None)
self.assertEqual(cm_session_event.exception.message, "Invalid Application ID")
self.assertEqual(cm_context_event.exception.message, "Invalid Application ID")
on_session_started.assert_not_called()
on_launch.assert_not_called()
@patch("alexa_main.events.on_session_started")
@patch("alexa_main.events.on_launch")
def test_lambda_handler_on_session_started_launch_request(self, on_launch, on_session_started):
test_session_obj = {
'application': {
'applicationId': TEST_APPLICATION_ID},
'new': True
}
test_event = {
'session': test_session_obj,
'request': {
'requestId': TEST_REQUEST_ID,
'type': 'LaunchRequest'
},
'context': {}
}
alexa_main.lambda_handler(test_event, None)
on_session_started.assert_called_once_with({
'requestId': TEST_REQUEST_ID,
}, test_session_obj)
on_launch.assert_called_once()
@patch("alexa_main.events.on_intent")
def test_lambda_handler_intent_request(self, on_intent):
test_request_obj = {
'requestId': TEST_REQUEST_ID,
'type': 'IntentRequest'
}
test_session_obj = {
'application': {
'applicationId': TEST_APPLICATION_ID,
},
'new': True
}
test_context_obj = {
"System": {},
"AudioPlayer": {}
}
test_event = {
'session': test_session_obj,
'request': test_request_obj,
'context': test_context_obj
}
alexa_main.lambda_handler(test_event, None)
on_intent.assert_called_once_with(test_request_obj, test_session_obj, test_context_obj)
@patch("alexa_main.events.on_session_ended")
def test_lambda_handler_session_ended_request(self, on_session_ended):
test_request_obj = {
'requestId': TEST_REQUEST_ID,
'type': 'SessionEndedRequest'
}
test_session_obj = {
'application': {
'applicationId': TEST_APPLICATION_ID,
},
'new': False
}
test_context_obj = {
"System": {},
"AudioPlayer": {}
}
test_event = {
'session': test_session_obj,
'request': test_request_obj,
'context': test_context_obj
}
alexa_main.lambda_handler(test_event, None)
on_session_ended.assert_called_once_with(test_request_obj, test_session_obj)
|
mit
| -4,778,385,089,163,808,000
| 32.062992
| 99
| 0.535127
| false
| 3.987654
| true
| false
| false
|
adobe/avmplus
|
halfmoon/templates/templates.py
|
1
|
36785
|
#!/usr/bin/env python
# -*- Mode: Python; indent-tabs-mode: nil -*-
# vi: set ts=2 sw=2 expandtab:
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import StringIO
from hrdefs import hrdefs
from sexp import parse
from ast import *
from mangle import *
# -----------------------------------------------------
#
# generate template builder code
#
# format argument value
#
def fmtArg(expr):
if expr.kind == 'TypeConst':
# return 'tb.typeConst(%s)' % expr.value.cg()
return expr.value.cgTraits()
elif expr.kind == 'NumConst':
if type(expr.value) == int:
return 'tb.intConst(%i)' % expr.value
elif type(expr.value) == float:
return 'tb.doubleConst(%d)' % expr.value
else:
raise ParseError("unsupported constant '%s'" % expr.value)
elif expr.kind == 'VarRef':
vartype = expr.type()
if vartype == int_type:
return 'tb.intConst(%s)' % expr.value
elif vartype == number_type:
return 'tb.doubleConst(%s)' % expr.value
else:
raise ParseError("var refs of type '%s' not yet supported" % vartype.dump())
else:
return expr.dump()
# generate IfCall code.
# optionally bind the result to a local with the given name.
# numresults specifies the number of results generated by the
# ifcall--we use this only if we generate a merge label.
# merge_label specifies an existing label to use instead of
# generating one. (TODO param count agreement is assumed--verify.)
#
# if we generated a merge label, return its C++ name, otherwise None.
#
def fmtIfCall(ifcall, lhsname, numresults, use_merge_label, indent):
args = [ifcall.condexpr] + ifcall.args
# argc = len(args)
# build arglist expr. collect varargs in local array if needed
# fixc = 1
# varc = argc - fixc
# if varc > 0:
# vaname = '%s_args' % (lhsname if lhsname else '')
# varargs = ', '.join([fmtArg(args[i]) for i in range(fixc, argc)])
# print '%sDef* %s[] = { %s };' % (indent, vaname, varargs)
# arglist = ', '.join([fmtArg(args[i]) for i in range(0, fixc)] + [str(varc), vaname])
# else:
# arglist = ', '.join([fmtArg(args[i]) for i in range(0, fixc)] + ['0', 'NULL'])
arglist = fmtArglist(lhsname, ifcall.base, args, indent)
# create IfInstr and add to IR
repname = "IfInstr"
lhs = '' if lhsname is None else '%s* %s = (%s*)' % (repname, lhsname, repname)
print '%s%stb.addInstr(new%s(%s));' % (indent, lhs, repname, arglist)
# an (if ...) may have a mixture of (goto ...) and plain exprs at its leaves.
# we need to generate a synthetic label for any plain exprs to jump to.
need_merge = not(ifcall.iftrue.allPathsEscape() and
ifcall.iffalse.allPathsEscape())
# for any of our paths which terminate without an explicit goto,
# we need to generate a goto to a synthetic merge label.
# our caller may have supplied a candidate in use_merge_label;
# if not, generate one now.
#
if need_merge:
if use_merge_label is None:
# create LabelInstr, but don't add to IR
merge_label = lhsname + '_merge'
print '%sLabelInstr* %s = newLabelInstr(%i);' % (indent, merge_label, numresults)
else:
merge_label = use_merge_label
else:
merge_label = None
print ''
fmtArm(ifcall.iftrue, lhsname, merge_label, indent)
fmtArm(ifcall.iffalse, lhsname, merge_label, indent)
return merge_label if use_merge_label is None else None
# helper - generate arm code
#
def fmtArm(arm, lhsname, merge_label, indent):
repname = 'ArmInstr'
armname = lhsname + '_' + arm.name
print '%s{ // %s %s arm' % (indent, lhsname, arm.name)
indent += ' '
# create ArmInstr, add to IR, save to local
# note: "(void)arm;" prevents unused variable warnings
print '%s%s* %s = (%s*)tb.addInstr(%s->arm(%s)); (void)%s;' % (
indent, repname, armname, repname, lhsname, arm.name, armname)
# create a local for each arm param
for i in range(0, len(arm.parnames)):
parname = arm.parnames[i]
print '%sDef* %s = &%s->params[%i]; (void)%s;' % (
indent, parname, armname, i, parname)
print ''
# generate arm body statements, up to final instruction
genTemStmts(arm, indent)
# if needed, generate a final goto to synthetic label
if not arm.allPathsEscape():
body = arm.body
retinstr = body[len(body) - 1]
repname = 'GotoInstr'
gotoname = armname + '_exit'
print '%s%s* %s = newGotoStmt(%s); ' % (
indent, repname, gotoname, merge_label)
for i in range(0, len(retinstr.expr.args)):
print '%s%s->args[%i] = %s;' % (
indent, gotoname, i, fmtArg(retinstr.expr.args[i]))
print '%stb.addInstr(%s); ' % (indent, gotoname)
# close the c++ block for this arm
indent = indent[:len(indent) - 2]
print '%s}' % indent
print ''
# helper - indicates the presence of a fixed-arg factory
# method for variable-arg instructions of a given shape.
# NOTE: carnal knowledge of InstrFactory API.
#
def hasFixedArgFactoryMethod(repname, argc):
return (repname == 'StopInstr' and argc == 2) or \
(repname == 'CallStmt2' and (argc == 4 or argc == 3)) or \
(repname == 'CallStmt3' and argc == 5) or \
(repname == 'IfInstr' and (argc == 0 or argc == 1 or argc == 2))
# generate argument list code. For instructions with fixed input
# shape, and common instances of some variable-input instructions,
# a single-shot factory method is available. In these cases we build
# a simple expression list of args.
#
# Other variable-input instructions have factory methods that take
# a count and an array for their variable args (they may still have
# a nonzero number of fixed args; these come first). For these, we
# generate code to create an array of args and assign it to a local,
# then return an expression list containing an arg count and a reference
# to the local.
#
def fmtArglist(lhsname, base, args, indent):
rep = getRep(base)
fixc = rep.shape[EFFECT_IN] + rep.shape[DATA_IN] # fixed arg count
argc = len(args)
varc = argc - fixc
if rep.isvarin() and not hasFixedArgFactoryMethod(rep.name, len(args)):
# build arglist expr. collect va_value in local array if needed
if varc > 0:
va_name = '%s_args' % (lhsname if lhsname else '')
va_value = ', '.join([fmtArg(args[i]) for i in range(fixc, argc)])
print '%sDef* %s[] = { %s };' % (indent, va_name, va_value)
arglist = ', '.join([fmtArg(args[i]) for i in range(0, fixc)] + [str(varc), va_name])
else:
arglist = ', '.join([fmtArg(args[i]) for i in range(0, fixc)] + ['0', 'NULL'])
else:
arglist = ', '.join([fmtArg(arg) for arg in args]) # simple arg list
return arglist
# generate Call code.
# optionally bind the result to a local with the given name.
#
def fmtCall(call, defs, indent, lhsname = None):
base, args = call.base, call.args
# format argument list, possibly generating locals along the way
arglist = fmtArglist(lhsname, base, args, indent)
# add kind arg if needed
rep = getRep(base)
if getRepCount(rep, defs) > 1:
arglist = base.hrname() + ', ' + arglist
# create and add instr, maybe store to local
repname = rep.name
lhs = '' if lhsname is None else '%s* %s = (%s*)' % (repname, lhsname, repname)
print '%s%stb.addInstr(new%s(%s));' % (indent, lhs, repname, arglist)
# format call to access the given output of a call.
# Instr API has effect_out(), value_out() for instrs which
# have only one effect or data output (respectively),
# and effectOut(i), value_out(i) for the > 1 cases. Here
# we take an index into the combined list of outputs.
#
def fmtAccessor(call, i):
effects = [t.isEffect() for t in call.types()]
effect = effects[i]
fmtix = '' if effects.count(effect) == 1 else '%s' % effects[:i].count(effect)
return '%s(%s)' % ('effect_out' if effect else 'value_out', fmtix)
# emit user-defined labelled (local) definitions for the given template.
# syntactic restrictions on templates say that gotos must always terminate
# execution paths, which makes our job here simple--all user-defined labelled
# defs end by jumping to a common endpoint label, given here by endlabelname.
#
def genLabelDefSection(tem, end_label_name, indent):
for label in tem.labels.values():
labelname = 'label_%s' % label.name.lstrip('@')
print '%s{ // label %s in %s' % (indent, labelname, tem.name)
indent += ' '
print '%stb.addInstr(%s);' % (indent, labelname)
# extract label params
for i in range(0, len(label.parnames)):
pname = label.parnames[i]
print '%sDef* %s = &%s->params[%i]; (void)%s;' % (
indent, pname, labelname, i, pname)
print ''
# label body
genTemStmts(label, indent)
# build goto end from return
retinstr = label.body[len(label.body) - 1]
repname = 'GotoInstr'
gotoname = labelname + '_exit'
print '%s%s* %s = newGotoStmt(%s);' % (
indent, repname, gotoname, end_label_name)
for i in range(0, len(retinstr.expr.args)):
print '%s%s->args[%i] = %s;' % (
indent, gotoname, i, fmtArg(retinstr.expr.args[i]))
print '%stb.addInstr(%s); ' % (indent, gotoname)
indent = indent[:len(indent) - 2]
print '%s}' % indent
print ''
# generate builder code from template statements
# note that stmtlist is currently either the template body, or
# the template body without the final (return) statement.
#
def genTemStmts(tem, indent):
# first, create label instrs. we need to have them around for gotos,
# but they don't go into the IR until the end of the template.
# note that if we have any labels, we'll need common endpoint label
if len(tem.labels) > 0:
print '%s// labels defined in %s, plus final endpoint' % (
indent, tem.name)
for label in tem.labels.values():
labelname = 'label_%s' % label.name.lstrip('@')
print '%sLabelInstr* %s = newLabelInstr(%i);' % (
indent, labelname, label.numParams())
# endpoint label
# note: we get the number of label params from the number
# of returned results.
end_label = 'label_%s' % tem.genLocalName('end')
print '%sLabelInstr* %s = newLabelInstr(%i);' % (
indent, end_label, tem.body[len(tem.body) - 1].expr.base.numParams())
print ''
else:
end_label = None
# emit mainline statement list
# for stmt in tem.body[:len(tem.body) - 1]:
for i in range(0, len(tem.body) - 1):
stmt = tem.body[i]
if stmt.kind == 'LocalDefs':
# lhs = ...
rhs = stmt.expr
if rhs.kind == 'Call':
# lhs = if/goto/call
basename = rhs.base.name
iname = tem.genLocalName(basename) if len(stmt.names) > 0 else None
if basename == 'if':
# if this if-stmt is the terminal statement of the template,
# and we have user-defined labelled defs, then the if can use our
# end_label as a post-if merge point, if needed.
use_merge_label = end_label if i == len(tem.body) - 2 else None
# fmt_ifcall will return the name of merge label to generate
new_merge_label = fmtIfCall(rhs, iname, len(stmt.names), use_merge_label, indent)
# add generated merge label to IR and extract vars
if new_merge_label:
print '%s// %s merge label, defs' % (indent, iname)
print '%stb.addInstr(%s);' % (indent, new_merge_label)
# create C++ local for each LocalDefs binding
for i in range(0, len(stmt.names)):
varname = stmt.names[i]
print '%sDef* %s = &%s->params[%i]; (void)%s;' % (
indent, varname, new_merge_label, i, varname)
elif basename.startswith('@'):
# goto label
labelname = 'label_%s' % rhs.base.name.lstrip('@')
repname = 'GotoInstr'
gotoname = tem.genLocalName('goto')
print '%sGotoInstr* %s = newGotoStmt(%s);' % (
indent, gotoname, labelname)
for i in range(0, len(rhs.args)):
print '%s%s->args[%i] = %s;' % (
indent, gotoname, i, fmtArg(rhs.args[i]))
print '%stb.addInstr(%s);' % (indent, gotoname)
else:
# call
fmtCall(rhs, defs, indent, iname)
# create local for each LocalDefs binding
for i in range(0, len(stmt.names)):
labelname = stmt.names[i]
accessor = fmtAccessor(rhs, i)
print '%sDef* %s = %s->%s; (void)%s;' % (
indent, labelname, iname, accessor, labelname)
print ''
else:
# lhs = non-call
labelname = stmt.names[0]
ldef = stmt.defs[stmt.names[0]]
print '%sDef* %s = %s;' % (indent, labelname, fmtArg(ldef))
print ''
elif stmt.kind == 'Call':
# otherwise it's just an unbound call, no lhs
# NOTE: this doesn't happen at the moment
fmtCall(stmt, defs, indent)
else:
# nothing else at the top level of a template body
raise ParseError('unknown statement type in body list: %s' % stmt.dump())
# wrap it up
# if needed, emit defined label code, plus final endpoint
if len(tem.labels) > 0:
genLabelDefSection(tem, end_label, indent)
# add endpoint label
print '%s// common endpoint block' % indent
print '%stb.addInstr(%s);' % (indent, end_label)
# create C++ local for each LocalDefs binding fromt
# terminal statement
term_stmt = tem.body[len(tem.body) - 2]
for i in range(0, len(term_stmt.names)):
varname = term_stmt.names[i]
print '%sDef* %s = &%s->params[%i]; (void)%s;' % (
indent, varname, end_label, i, varname)
print ''
# finally, add return instr for top-level templates
# TODO verify handling of extracted vars (above) in nested tems
if tem.parent is None:
ret_stmt = tem.body[len(tem.body) - 1]
fmtCall(ret_stmt.expr, defs, indent)
# generate builder switch case for a template
#
def genTemBuilderCase(tem):
hrname = tem.hrname()
print ' case %s: {' % hrname
print '/***'
print tem.dump()
print '***/'
print ''
print ' const Type* in_types[] = { %s };' % ', '.join([t.cgType() for t in tem.partypes])
print ' tb.start(%s, %i, in_types);' % (hrname, len(tem.partypes))
print ''
for i in range(0, len(tem.parnames)):
pname = tem.parnames[i]
print ' Def* %s = tb.paramRef(%i); (void)%s;' % (pname, i, pname)
print ''
genTemStmts(tem, ' ')
# emit labels
# emit return
print ''
print ' break;'
print ' }'
print ''
# generate template builder function impl
def genTemBuilderCases(defs):
for d in templates_only(defs):
genTemBuilderCase(d)
# -----------------------------------------------------
#
# generate type signature builder code
#
# generate input signature builder function
#
def genInputSigBuilder(defs):
print '/// return input type signature for given instruction'
print '///'
print 'const Type** InstrFactory::buildInputSignature(InstrKind kind) {'
print ' switch (kind) {'
for d in defs:
hrname = d.hrname()
print ' case %s: {' % hrname
print ' /* %s */' % d.dumpSig()
siglen = len(d.partypes)
if siglen == 0:
print ' return NULL;'
elif siglen == 1:
print ' return copySig(%s);' % d.partypes[0].cgType()
else:
print ' const Type* input_sig[] = { %s };' % ', '.join([t.cgType() for t in d.partypes])
print ' return copySig(%i, input_sig);' % len(d.partypes)
print ' }'
print ' default: {'
print ' assert(false && "unsupported opcode");'
print ' return NULL;'
print ' }'
print ' } // switch'
print '}'
# generate output signature builder function
#
def genOutputSigBuilder(defs):
print '/// return output type signature for given instruction'
print '///'
print 'const Type** InstrFactory::buildOutputSignature(InstrKind kind) {'
print ' switch (kind) {'
for d in defs:
hrname = d.hrname()
print ' case %s: {' % hrname
print ' /* %s */' % d.dumpSig()
siglen = len(d.rettypes)
if siglen == 0:
print ' return NULL;'
elif siglen == 1:
print ' return copySig(%s);' % d.rettypes[0].cgType()
else:
print ' const Type* output_sig[] = { %s };' % ', '.join([t.cgType() for t in d.rettypes])
print ' return copySig(%i, output_sig);' % len(d.rettypes)
print ' }'
print ' default: {'
print ' assert(false && "unsupported opcode");'
print ' return NULL;'
print ' }'
print ' } // switch'
print '}'
def genSigBuildersImpl(defs):
genInputSigBuilder(defs)
print ''
genOutputSigBuilder(defs)
# ------------------------------------------------------
#
# generate predicate methods
#
# helper: sorted list of all distinct reps
def allReps():
repset = set(replist +
instr_rep_overrides.values() +
shape_rep_overrides.values())
return sorted(repset, key=lambda rep: (rep.shape, rep.name))
# generate is-shape function for a given RepInfo
def genIsShape(defs, shapeinfo, proto = False):
print '/// true if given InstrKind is instance of %s' % shapeinfo.name
if proto:
print 'static bool is%s(InstrKind k);' % shapeinfo.name
else:
print 'bool InstrFactory::is%s(InstrKind k) {' % shapeinfo.name
print ' return instr_attrs[k].shape == %s;' % shapeinfo.enum()
print '}'
print ''
# generate has-template pred
def genHasTemplate(defs, proto = False):
print '/// true if given InstrKind has a template'
if proto:
print 'static bool hasTemplate(InstrKind k);'
else:
print 'bool InstrFactory::hasTemplate(InstrKind k) {'
print ' return instr_attrs[k].hastem;'
print '}'
print ''
# generate InstrFactory predicate impls
def genPredsImpl(defs):
genHasTemplate(defs, False)
for sh in allReps():
genIsShape(defs, sh)
# generate InstrFactory predicate protos
def genPredsProto(defs):
genHasTemplate(defs, True)
for sh in allReps():
genIsShape(defs, sh, True)
# --------------------------------------------------------
#
# generate shape and instr enums
#
# helper: return map of rep names to counts.
# CAUTION - relies on unique rep names. Can fail, but not silently.
# CAUTION - cache assumes single def list over lifetime of CG
rep_counts = None
def getRepCount(rep, defs):
global rep_counts
if rep_counts is None:
repnames = [r.name for r in allReps()]
defrepnames = [getRep(d).name for d in defs]
rep_counts = dict(zip(repnames, [defrepnames.count(repname) for repname in repnames]))
return rep_counts[rep.name]
#
def shapeData(sh):
return "%i, %i, %s" % (
sh[0] + sh[1], sh[2] + sh[3], vararg_names[sh[4]])
#
def genEnums(defs, proto = False):
reps = allReps()
if proto:
print '/// High level intermediate representation (HR) opcodes'
print '///'
print 'enum InstrKind {'
for i in range(0, len(defs)):
d = defs[i]
print ' %s, %s// %s %s' % (
d.hrname(), ' ' * max(0, 24 - len(d.hrname())),
getRep(d).name, 'template' if d.isTemplate() else '')
print ' HR_MAX = %s + 1' % defs[len(defs) - 1].hrname()
print '};'
print ''
print '/// VarargKind designates variability in at most one'
print "/// of an instruction's four argument groups."
print '///'
print 'enum VarargKind {'
for i in [DATA_IN, DATA_OUT, NONE]:
print ' %s,' % vararg_names[i]
print ' VARARGKIND_MAX = %s' % vararg_names[NONE]
print '};'
print ''
print '/// ShapeRep describes the representation of an instruction shape.'
print '/// Note that when varargs are specified, the corresponding'
print '/// member gives a minimum, rather than exact, quantity.'
print '/// For example, a ShapeRep with vararg == %s and datain == 2' % vararg_names[DATA_IN]
print '/// describes instructions with *at least* 2 data inputs.'
print '///'
print 'struct ShapeRep {'
print ' int num_uses; // number of Use inputs'
print ' int num_defs; // number of Def outputs'
print ' VarargKind vararg; // vararg position, if any'
print '};'
print ''
print '/// InstrShape is an enumeration of HR instruction shapes.'
print '/// The representation details of each InstrShape s is described by'
print '/// shape_reps[s].'
print '///'
print 'enum InstrShape {'
for i in range(0, len(reps)):
rep = reps[i]
shapedump = shapeData(rep.shape)
print ' %s, %s// %s %s%i instrs' % (rep.enum(),
' ' * max(0, 24 - len(rep.enum())), shapedump,
' ' * max(0, 24 - len(shapedump)), getRepCount(rep, defs))
print ' SHAPE_MAX = %s + 1' % reps[len(reps) - 1].enum()
print '};'
print ''
if proto:
print '/// shape_reps[] gives the representations of'
print '/// the shapes enumerated by InstrShape.'
print '///'
print 'extern const ShapeRep shape_reps[SHAPE_MAX];'
print ''
else:
print '/// shape_reps[] gives the representations of'
print '/// the shapes enumerated by InstrShape.'
print '///'
print 'extern const ShapeRep shape_reps[SHAPE_MAX] = {'
for rep in reps:
sh = rep.shape
print ' { %s }, %s// %s' % (shapeData(sh),
' ' * max(0, 10 - len(vararg_names[sh[4]])), rep.enum())
print '};'
print ''
if proto:
print '/// InstrAttrs contains attributes specific to (and universal'
print '/// across all instances of) a particular HR instruction.'
print '///'
print 'struct InstrAttrs {'
print ' const char* name; // printable name'
print ' InstrShape shape; // shape (const)'
print ' bool hastem; // true if instruction has a template (const)'
print '};'
print ''
print '/// instr_attrs describes the instructions enumerated in InstrKind.'
print '///'
print 'extern const InstrAttrs instr_attrs[HR_MAX];'
print ''
else:
print '/// instr_attrs describes the instructions enumerated in InstrKind.'
print '///'
print 'extern const InstrAttrs instr_attrs[HR_MAX] = {'
for d in defs:
print ' { "%s", %s%s, %s },' % (d.name, ' ' * max(0, 24 - len(d.hrname())),
getRep(d).enum(), 'true' if d.isTemplate() else 'false')
print '};'
print ''
# generate enum declarations
def genEnumsProto(defs):
genEnums(defs, True)
# generate enum data definitions
def genEnumsImpl(defs):
genEnums(defs, False)
# -----------------------------------------------------
#
# generate kind-driven dispatcher infrastructure
#
# generate kind adapter methods
#
def genKindAdapterMethods(defs):
for d in defs:
rep = getRep(d)
print ('RETURN_TYPE do_%s(%s* i) { return static_cast<SELF_CLASS*>(this)->do_default(i); }'
% (d.name, rep.name))
# generate dispatch function switch cases
#
def genKindAdapterCases(defs):
for d in defs:
hrname = d.hrname()
rep = getRep(d)
print 'case %s: ' % hrname
print ' return a->do_%s(cast<%s>(instr));' % (d.name, rep.name)
# -----------------------------------------------------
#
# generate shape-driven dispatcher infrastructure
#
# generate shape adapter methods
#
def genShapeAdapterMethods(defs):
reps = allReps()
for rep in reps:
print ('RETURN_TYPE do_%s(%s* i) { return static_cast<SELF_CLASS*>(this)->do_default(i); }'
% (rep.name, rep.name))
# generate dispatch function switch cases
#
def genShapeAdapterCases(defs):
reps = allReps()
for rep in reps:
print 'case %s: ' % rep.enum()
print ' return a->do_%s(cast<%s>(instr));' % (rep.name, rep.name)
# -----------------------------------------------------
#
# generate C++ definitions of runtime helper functions
#
cpp_type_map = {
'Atom': 'Atom',
'Boolean': 'BoolKind',
'Class': 'ClassClosure*',
'Env': 'MethodEnv*',
'Int': 'int32_t',
'Method': 'MethodInfo*',
'Name': 'const Multiname*',
'Namespace': 'Namespace*',
'Number': 'double',
'Ord': 'int',
'String': 'String*',
'Effect': 'void',
'Traits': 'Traits*',
'Uint': 'uint32_t',
'Object': 'Atom',
'ScriptObject': 'ScriptObject*',
'Array': 'ArrayObject*',
'VectorInt': 'IntVectorObject*',
'VectorUInt': 'UIntVectorObject*',
'VectorDouble': 'DoubleVectorObject*',
'Function': 'ClassClosure*',
'Bot': 'void',
}
def cpp_typename(t):
name = t.name
if name.endswith('~'):
name = name[0:len(name)-1]
return cpp_type_map[name] if name in cpp_type_map else '?'+name
# true if the shape for d treats the last fixed arg as the first vararg
def has_extra_vararg(d):
return getRep(d).name.startswith('CallStmt')
def make_argsig(d):
sig = [cpp_typename(t) for t in d.partypes if cpp_typename(t) != 'void']
if d.isvarin:
vartype = sig[len(sig)-1]
fixc = getRep(d).shape[DATA_IN] - (1 if has_extra_vararg(d) else 0)
sig = sig[0:fixc] + ['int'] + ['%s*' % vartype]
return ['MethodFrame*'] + sig
def ret_type(d):
sig = [t for t in d.rettypes if not t.isEffect()]
return sig[0] if len(sig) == 1 else None
def make_ret_ctype(d):
t = ret_type(d)
return cpp_typename(t) if t else 'void'
# Make a list of just primitive instructions
def protos_only(defs):
return [d for d in defs if not(d.isTemplate())]
# return a list of all template instructions
def templates_only(defs):
return [d for d in defs if d.isTemplate()]
# Exclude instructions with TopData or Top in their signature,
# or any instruction with 2+ data outputs.
def do_generate_stub(d):
fullsig = [t.name for t in d.partypes + d.rettypes]
return not ('TopData' in fullsig or 'Top' in fullsig or 'State' in fullsig)\
and d.shape[DATA_OUT] in range(2)
# Generate a class with C++ prototypes for each stub.
def gen_stub_protos(defs):
protos = protos_only(defs)
stubs = [d for d in protos if do_generate_stub(d)]
print "namespace halfmoon {"
print "using namespace avmplus;"
print "struct Stubs {"
print " static const int stub_count = %d;" % len(protos)
print
for d in stubs:
print ' // %s' % d.dumpSig()
arg_sig = make_argsig(d)
ret_ctype = make_ret_ctype(d)
print ' static %s do_%s(%s);' % (ret_ctype, d.name, ', '.join(arg_sig))
print
print "};"
print
print "/* One-line implementations, for copy/paste convenience:"
for d in stubs:
arg_sig = make_argsig(d)
ret_ctype = make_ret_ctype(d)
ret_stmt = 'return 0; ' if ret_ctype != 'void' else ''
print ' %s Stubs::do_%s(%s) { assert(false && "%s not implemented"); %s}' %\
(ret_ctype, d.name, ', '.join(arg_sig), d.name, ret_stmt)
print "*/"
print "}"
# Map C++ type names to nanojit::ArgType enums.
def lir_argtype(ctype):
if ctype == 'void':
return 'ARGTYPE_V'
if ctype == 'double':
return 'ARGTYPE_D'
if ctype == 'uint32_t':
return 'ARGTYPE_UI'
if ctype in ['int', 'int32_t', 'BoolKind']:
return 'ARGTYPE_I'
return 'ARGTYPE_P'
# Generate the LIR typesig builder expression by mapping the C++
# paramter types to LIR ArgType enums.
def lir_typesig(d):
argtypes = [lir_argtype(s) for s in make_argsig(d)]
sigtypes = [lir_argtype(make_ret_ctype(d))] + argtypes
return 'CallInfo::typeSig%d(%s)' % (len(argtypes), ', '.join(sigtypes))
# an opcode is pure if it has no side effects. Since side-effect
# types are mapped to C++ 'void', we scan for void.
def lir_ispure(d):
return 1 if 'void' not in [cpp_typename(t) for t in d.partypes] else 0
def lir_accset(d):
return 'ACCSET_NONE' if lir_ispure(d) else 'ACCSET_ALL'
# generate a table of nanojit CallInfo structures; one for each stub.
def gen_stub_lirtable(defs):
protos = protos_only(defs)
print "namespace halfmoon {"
print "const nanojit::CallInfo LirEmitter::lir_table[] = {"
for d in protos:
if do_generate_stub(d):
print ' { (uintptr_t)&Stubs::do_%s, %s, ABI_CDECL, %d, %s verbose_only(, "%s")},' %\
(d.name, lir_typesig(d), lir_ispure(d), lir_accset(d), d.name)
else:
print ' { 0, 0, ABI_CDECL, 0, ACCSET_NONE verbose_only(, "%s")},' % d.name
print "};"
print
print "const int LirEmitter::stub_fixc[] = {"
for d in protos:
fixc = (getRep(d).shape[DATA_IN] - (1 if has_extra_vararg(d) else 0)) if d.isvarin\
else -1 # -1 means stub has fixed arg count despite variadic shape
print ' %d, // %s' % (fixc, d.name)
print "};"
print "}"
# generate a table of LLVMEmitter StubInfo structures; one for each stub.
def gen_stub_llvmtable(defs):
return gen_stub_llvmtable_common(defs, 32)
def gen_stub_llvmtable64(defs):
return gen_stub_llvmtable_common(defs, 64)
def gen_stub_llvmtable_common(defs,arch):
protos = protos_only(defs)
type_strings = []
type_strings_cxx = {}
def type_string_index(ret, args):
type_string = ";".join([make_llvm_type_string(nm, getAvmMangleTypedefs(arch)) for nm in [ret] + args])
if type_string in type_strings:
return type_strings.index(type_string)
type_strings.append(type_string)
type_strings_cxx[type_string] = '%s ()(%s)' % (ret, ', '.join(args))
return len(type_strings)-1
save_stdout = sys.stdout
sys.stdout = buffer = StringIO.StringIO()
for scheme in mangleSchemes:
kindIndex = 0;
# We need to print 2 different stub tables.
# One to handle mangles function names when the target OS is Mac or iOS
# And the other to handle mangled function names when the target OS is Windows
# The tables are named based on the target OS on which the packaged app will be running
print "const LLVMModule::StubInfo %sllvm_stub_table[%d] = {" % (scheme.getCppLatch(),len(protos))
print
for d in protos:
print ' // %d: %s' % (kindIndex, d.dumpSig())
kindIndex = kindIndex+1
fixc = (getRep(d).shape[DATA_IN] - (1 if has_extra_vararg(d) else 0)) if d.isvarin\
else -1 # -1 means stub has fixed arg count despite variadic shape
if do_generate_stub(d):
arg_sig = make_argsig(d)
ret_ctype = make_ret_ctype(d)
fn_name = 'halfmoon::Stubs::do_'+d.name
func_attrs = Attribute.STATIC | Attribute.PUBLIC | Attribute.CDECL
mgl_name = scheme.mangle(fn_name, ret_ctype, arg_sig, func_attrs, getAvmMangleTypedefs(arch))
print ' // %s %s(%s)' % (ret_ctype, fn_name, ', '.join(arg_sig))
print ' { "%s", "%s", llvm_stub_types[%d], %s, %d },' % ( d.name, mgl_name,
type_string_index(ret_ctype, arg_sig), 'true' if lir_ispure(d) else 'false', fixc)
print
else:
print ' { "%s", 0, 0, false, %d },' % (d.name, fixc)
print
print "};"
print
sys.stdout = save_stdout
print "namespace compile_abc {"
print "static const int llvm_stub_count = %d;" % len(protos)
print "static const char* llvm_stub_types[%d] = {" % len(type_strings)
typeIndex = 0
for t in type_strings:
print ' // %d: %s' % (typeIndex, type_strings_cxx[t])
typeIndex = typeIndex+1
print ' "%s",' % (t)
print
print "};"
print
print buffer.getvalue()
print "}"
# return the interpreter getter expression for type t
interp_getter_name = {
'double' : 'interp->getDouble',
'int' : 'interp->getOrdinal',
'int32_t' : 'interp->getInt',
'uint32_t' : 'interp->getUint',
'BoolKind' : 'interp->getBool',
'String*' : 'interp->getString',
'Namespace*' : 'interp->getNs',
'Atom' : 'interp->getAtom',
'Traits*' : 'interp->getTraits',
'MethodEnv*' : 'interp->getEnv',
'MethodInfo*' : 'interp->getMethod',
'const Multiname*' : 'interp->getName',
'ScriptObject*' : 'interp->getObject',
'ArrayObject*' : '(ArrayObject*)interp->getObject',
'IntVectorObject*' : '(IntVectorObject*)interp->getObject',
'UIntVectorObject*' : '(UIntVectorObject*)interp->getObject',
'DoubleVectorObject*' : '(DoubleVectorObject*)interp->getObject',
'ClassClosure*' : '(ClassClosure*)interp->getObject',
}
def interp_getter(t):
return interp_getter_name[cpp_typename(t)]
# return the Interpreter Value constructor name for the return type of d
def interp_value(d):
ct = cpp_typename(ret_type(d))
return 'AtomValue' if ct == 'Atom'\
else 'OrdValue' if ct == 'int'\
else 'Value'
# generate a class of helper functions for the interpreter. Each one
# unpacks arguments, invokes the stub, then saves the result Value.
# var-in instructions are handled by passing a count and pointer to
# values. void stubs are handled by not saving the result.
def gen_stub_callers(defs):
protos = protos_only(defs)
stubs = [d for d in protos if do_generate_stub(d)]
print "namespace halfmoon {"
print "class StubCaller {"
print " public:"
for d in stubs:
exprs = ['%s(instr->use(%d))' % (interp_getter(d.partypes[i]), i) for i in range(len(d.partypes))\
if cpp_typename(d.partypes[i]) != 'void']
print ' // %s' % d.dumpSig()
print ' static void do_%s(Interpreter* interp, %s* instr) {' % (d.name, getRep(d).name)
if d.isvarin:
fixc = getRep(d).shape[DATA_IN] - (1 if has_extra_vararg(d) else 0)
var_type = d.partypes[len(d.partypes)-1]
var_ctype = cpp_typename(var_type)
vargetter = interp_getter(var_type)
print ' int argc = instr->arg_count();'
print ' Use* arg_uses = instr->args();'
print ' %s* args = (%s*)interp->args_out_;' % (var_ctype, var_ctype)
print ' for (int i = 0; i < argc; ++i)'
print ' args[i] = %s(arg_uses[i]);' % (vargetter)
exprs = exprs[0:fixc] + ['argc, args']
exprs = ['&interp->frame_'] + exprs
arg_expr = ',\n '.join(exprs)
ret_ctype = make_ret_ctype(d)
if ret_ctype == 'void':
print ' Stubs::do_%s(%s);' % (d.name, arg_expr)
print ' (void)interp;'
else:
print ' interp->resultVal(instr->value_out()) = %s(Stubs::do_%s(%s));' %\
(interp_value(d), d.name, arg_expr)
if len(exprs) == 1 and ret_ctype == 'void':
print ' (void)instr;'
print ' }'
print
print "};"
print
# generate a table with pointers to the helper functions, indexed by InstrKind
print "const Interpreter::StubCall Interpreter::stub_table[] = {"
for d in protos:
if do_generate_stub(d):
print ' (StubCall)&StubCaller::do_%s,' % d.name
else:
print ' 0, // %s' % d.name
print "};"
print
print "}"
# End generation of helpers for stubs
# --------------------------------------------------------
#
# generator harness and helpers
#
def printheader():
print '///'
print '/// generated by templates.py -- do not edit'
print '///'
print
gendir = "../generated"
def genfile(defs, gen, filename):
if not(os.path.exists(gendir)):
os.mkdir(gendir)
f = open('%s/%s' % (gendir, filename), 'wb')
try:
sys.stdout = f
printheader()
gen(defs)
finally:
f.close()
sys.stdout = sys.__stdout__
# group defs into primitives-then-templates
def sortdefs(defs):
return protos_only(defs) + templates_only(defs)
def gendefs(defs):
defs = sortdefs(defs)
genfile(defs, genEnumsProto, "InstrFactory_defs_proto.hh")
genfile(defs, genEnumsImpl, "InstrFactory_defs_impl.hh")
genfile(defs, genPredsProto, "InstrFactory_preds_proto.hh")
genfile(defs, genPredsImpl, "InstrFactory_preds_impl.hh")
genfile(defs, genSigBuildersImpl, "InstrFactory_signatures_impl.hh")
genfile(defs, genTemBuilderCases, "InstrFactory_buildTemplate_cases.hh")
genfile(defs, genKindAdapterMethods, "KindAdapter_methods.hh")
genfile(defs, genKindAdapterCases, "KindAdapter_cases.hh")
genfile(defs, genShapeAdapterMethods, "ShapeAdapter_methods.hh")
genfile(defs, genShapeAdapterCases, "ShapeAdapter_cases.hh")
genfile(defs, gen_stub_protos, "Stub_protos.hh")
genfile(defs, gen_stub_lirtable, "Stub_lirtable.hh")
genfile(defs, gen_stub_llvmtable, "Stub_llvmtable.hh")
genfile(defs, gen_stub_llvmtable64, "Stub_llvmtable_64.hh")
genfile(defs, gen_stub_callers, "Stub_callers.hh")
def trace(s):
save = sys.stdout
sys.stdout = sys.__stdout__
print s
sys.stdout = save
# -----------------------------------------------------
#
# main
#
# dump processed defs
def dump(defs):
for d in defs:
print '\n' + d.dump()
# generator functions callable from the command line
gens = {
'defs': gendefs, # generate code
'dump': dump # dump internal reps
}
if len(sys.argv) > 1 and sys.argv[1] in gens:
gen = gens[sys.argv[1]]
else:
print "Error: must specify defs or dump as command-line argument"
sys.exit(1)
try:
sexprs = [sexpr for sexpr in parse(hrdefs) if isValid(sexpr)]
defs = [toDef(sexpr) for sexpr in sexprs]
process(defs)
gen(defs)
except ParseError as e:
print 'parse error: %s' % e.message()
sys.exit(1)
|
mpl-2.0
| 1,539,823,929,326,712,300
| 33.15506
| 106
| 0.617453
| false
| 3.213225
| false
| false
| false
|
tshi04/machine-learning-codes
|
headGAN-ff/model.py
|
1
|
2150
|
import re
import numpy as np
import tensorflow as tf
class discriminator(object):
def __init__(self):
self.name = 'keydis'
def __call__(self, input_data, reuse=False):
with tf.variable_scope(self.name) as self.ds:
if reuse:
self.ds.reuse_variables()
nf_len1 = 3
nf_filter = 32
input_data = tf.transpose(input_data,[0,3,2,1])
wdvec_dim = int(input_data.shape[1])
seq_len = int(input_data.shape[2])
input_channel = int(input_data.shape[3])
w_conv1 = tf.Variable(tf.truncated_normal([wdvec_dim,nf_len1,input_channel,nf_filter],stddev=0.1),name='w_conv1')
b_conv1 = tf.Variable(tf.truncated_normal([nf_filter]),name='b_conv1')
h_conv1 = tf.nn.conv2d(input=input_data,filter=w_conv1,strides=[1,1,1,1],padding='VALID',name='conv1')
h_conv1 = tf.add(h_conv1,b_conv1,name='h_conv1')
h_flat = tf.reshape(h_conv1,[-1,(seq_len-nf_len1+1)*nf_filter],name='h_flat')
w_fc1 = tf.Variable(tf.truncated_normal([(seq_len-nf_len1+1)*nf_filter,1],stddev=0.1),name='w_fc1')
logits = tf.matmul(h_flat,w_fc1)
return logits
@property
def vars(self):
return tf.contrib.framework.get_variables(self.ds)
class generator(object):
def __init__(self):
self.name = 'keygen'
def __call__(self, input_data, reuse=False):
with tf.variable_scope(self.name) as self.gs:
if reuse:
self.gs.reuse_variables()
nf_len1 = 20 # filter length
nf_filter = 100 # number of filters
wdvec_dim = int(input_data.shape[1])
input_channel = int(input_data.shape[3])
w_conv1 = tf.Variable(tf.truncated_normal([wdvec_dim,nf_len1,input_channel,nf_filter],stddev=0.1),name='w_conv1')
b_conv1 = tf.Variable(tf.truncated_normal([nf_filter]),name='b_conv1')
h_conv1 = tf.nn.conv2d(input=input_data, filter=w_conv1,strides=[1,1,20,1],padding='VALID',name='conv1')
h_conv1 = tf.add(h_conv1,b_conv1,name='h_conv1')
return h_conv1
@property
def vars(self):
return tf.contrib.framework.get_variables(self.gs)
|
gpl-3.0
| 1,230,414,954,405,514,500
| 38.090909
| 118
| 0.61814
| false
| 2.882038
| false
| false
| false
|
Zanzibar82/streamondemand.test
|
servers_sports/ucaster.py
|
1
|
3368
|
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# streamondemand - XBMC Plugin
# Conector para ucaster
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os
from core import scrapertools
from core import logger
from core import config
DEBUG = config.get_setting("debug")
def find_url_play(data, headers):
logger.info("[ucaster.py] find_url_play")
'''
<script type='text/javascript'> width=726, height=500, channel='danysportscucu', g='1';</script><script type='text/javascript' src='http://www.ucaster.eu/static/scripts/ucaster.js'></script>
<script type="text/javascript">
<!--//--><![CDATA[// ><!--
width=610, height=470, channel='tashsport02', g='1';
//--><!]]>
</script><script type="text/javascript" src="http://www.ucaster.eu/static/scripts/ucaster.js"></script>
'''
fid = scrapertools.find_single_match (data, "channel='([^']+)'[^<]+</script><script type='text/javascript' src='http://www.ucaster.eu/static/scripts/ucaster.js'")
if fid == '':
fid = scrapertools.find_single_match (data, "channel='([^']+)'[^<]+<[^<]+</script><script type=['\"]text/javascript['\"] src=['\"]http://www.ucaster.eu/static/scripts/ucaster.js['\"]")
if fid == '':
return ''
pageurl = 'http://www.embeducaster.com/embedded/%s/1/726/500' % fid #http://www.embeducaster.com/embedded/danysportscucu/1/726/500
data2 = scrapertools.cachePage(pageurl, headers=headers)
if (DEBUG): logger.info("data2="+data2)
'''
<div class="player_div" align="center">
<span>
<script type="text/javascript" src="/static/scripts/swfobject.js"></script>
<div id="flashcontent">
<strong>You need to upgrade your Flash Player in order to watch movies from ucaster.eu</strong>
</div>
<script type="text/javascript">
var so = new SWFObject("/static/scripts/fplayer.swf", "fplayer", "726", "500", "9");
so.addParam('allowfullscreen','true');
so.addParam('allowscriptaccess','always');
so.addParam('wmode','transparent');
so.addParam('FlashVars', 'id=78955&s=danysportscucu&g=1&a=1&l=Dany Rojadirecta.me');
so.write("flashcontent");
</script>
</span>
</div>
'''
data3 = scrapertools.cache_page('http://www.embeducaster.com:1935/loadbalancer',headers=headers)
rtmpurl = 'rtmp://' + scrapertools.find_single_match (data3, "redirect=(.*)") + '/live'
idvalue, svalue = scrapertools.find_single_match (data2, "'FlashVars', 'id=([^&]+)&s=([^&]+)")
swfurl = 'http://www.embeducaster.com' + scrapertools.find_single_match (data2, 'new SWFObject\("([^"]+)"')
url = '%s playpath=%s?id=%s swfUrl=%s swfVfy=1 conn=S:OK live=1 pageUrl=%s' % (rtmpurl, svalue, idvalue, swfurl, pageurl)
#url = '%s playpath=%s?id=%s swfUrl=%s conn=S:OK live=1 timeout=20 pageUrl=%s --live' % (rtmpurl, svalue, idvalue, swfurl, pageurl)
return url
|
gpl-3.0
| -695,613,796,193,651,600
| 48.529412
| 192
| 0.554632
| false
| 3.519331
| false
| false
| false
|
Ultimaker/Cura
|
plugins/UltimakerMachineActions/UMOUpgradeSelection.py
|
1
|
1985
|
# Copyright (c) 2017 Ultimaker B.V.
# Uranium is released under the terms of the LGPLv3 or higher.
from UM.Settings.ContainerRegistry import ContainerRegistry
from cura.MachineAction import MachineAction
from PyQt5.QtCore import pyqtSlot, pyqtSignal, pyqtProperty
from UM.i18n import i18nCatalog
from UM.Application import Application
catalog = i18nCatalog("cura")
from cura.Settings.CuraStackBuilder import CuraStackBuilder
class UMOUpgradeSelection(MachineAction):
"""The Ultimaker Original can have a few revisions & upgrades.
This action helps with selecting them, so they are added as a variant.
"""
def __init__(self):
super().__init__("UMOUpgradeSelection", catalog.i18nc("@action", "Select upgrades"))
self._qml_url = "UMOUpgradeSelectionMachineAction.qml"
def _reset(self):
self.heatedBedChanged.emit()
heatedBedChanged = pyqtSignal()
@pyqtProperty(bool, notify = heatedBedChanged)
def hasHeatedBed(self):
global_container_stack = Application.getInstance().getGlobalContainerStack()
if global_container_stack:
return global_container_stack.getProperty("machine_heated_bed", "value")
@pyqtSlot(bool)
def setHeatedBed(self, heated_bed = True):
global_container_stack = Application.getInstance().getGlobalContainerStack()
if global_container_stack:
# Make sure there is a definition_changes container to store the machine settings
definition_changes_container = global_container_stack.definitionChanges
if definition_changes_container == ContainerRegistry.getInstance().getEmptyInstanceContainer():
definition_changes_container = CuraStackBuilder.createDefinitionChangesContainer(
global_container_stack, global_container_stack.getId() + "_settings")
definition_changes_container.setProperty("machine_heated_bed", "value", heated_bed)
self.heatedBedChanged.emit()
|
lgpl-3.0
| 2,034,681,844,096,167,200
| 42.152174
| 107
| 0.723929
| false
| 4.135417
| false
| false
| false
|
zeal4u/FCA_Faceted_Search
|
bin/web_backend.py
|
1
|
2649
|
# -*- coding: utf-8 -*-
__author__ = 'jsz'
__version__ = 0.1
import web
import json
from search_engine import SearchService
from search_engine import FacetEncoder
from models import BookEncoder
# APIs exposed to front end
urls = (
"/keyWordsSearch","KeyWordsSearch",
"/facetSearch","FacetSearch",
"/expandSearch","ExpandSearch",
"/defineSearch","DefineSearch",
"/historySummary","HistorySummary",
"/(.*)", "Index"
)
app = web.application(urls, globals())
class Index:
def GET(self, url):
u"""
:param url: needs, or will throw exception
"""
raise web.redirect('/static/web-angular/app/index.html')
def json_encoder(search_result):
result = {
'facets': json.loads(json.dumps(search_result.facets, cls=FacetEncoder)),
'content': json.loads(json.dumps(search_result.content, cls=BookEncoder))
}
return json.dumps(result)
class KeyWordsSearch:
def GET(self):
u"""
关键词搜索后台接口
:param key_word: String 从前台获取关键词进行搜索
:return: SearchResult 关键词搜索结果集
"""
key_words_str = web.input()['key_words']
key_words = key_words_str.split()
ip = web.ctx.ip
service = SearchService.get_service_by_ip(ip)
search_result = service.key_words_search(key_words)
return json_encoder(search_result)
class FacetSearch:
def GET(self):
u"""
分面搜索接口
:param new_attr: int 新增分面属性
:return SearchResult 分面搜索结果集
"""
new_attr = int(web.input()['new_attr'])
ip = web.ctx.ip
service = SearchService.get_service_by_ip(ip)
search_result = service.facet_search(new_attr)
return json_encoder(search_result)
class ExpandSearch:
def GET(self):
u"""
根据当前查询节点进行泛化
:param degree: float 泛化程度 取值0~1
:return: SearchResult 泛化搜索结果集
"""
return "ExpandSearch"
class DefineSearch:
def GET(self):
u"""
根据当前查询节点进行细化
:param degree: float 细化程度 取值0~1
:return: SearchResult 细化搜索结果集
"""
return "DefineSearch"
class HistorySummary:
def GET(self):
u"""
根据查询历史推荐结果
:return: SearchResult 历史查询推荐结果集
"""
return "HistorySummary"
if __name__ == '__main__':
app.run()
|
mit
| 5,996,236,848,290,108,000
| 21.155963
| 81
| 0.575983
| false
| 2.981481
| false
| false
| false
|
and3rson/isc
|
examples/test_client.py
|
1
|
2134
|
#!/usr/bin/env python3.6
from isc.client import Client
from threading import Thread, Event
from time import time
from random import random
import logging
ITERATIONS = 1
CONN_POOL_SIZE = 1
COUNT = 1000
class Process(Thread):
def __init__(self, client):
super(Process, self).__init__()
self.proceed_evt = Event()
self.client = client
self.timediff = 0
def run(self):
self.proceed_evt.wait()
for i in range(0, ITERATIONS):
start = time()
self.client.example.add(random(), random(), wait=0)
self.timediff += int((time() - start) * 1000)
def create_client():
client = Client(exchange='isctest')
client.set_logging_level(logging.INFO)
client.start()
return client
"""
client = create_client()
client.example.start_tracking()
"""
print('Creating', CONN_POOL_SIZE, 'connections')
clients = []
events = []
for _ in range(CONN_POOL_SIZE):
event = Event()
client = create_client()
clients.append(client)
events.append(event)
client.on_connect += event.set
for i, (client, event) in enumerate(zip(clients, events)):
print('Waiting for client', i, 'to become ready')
event.wait()
print('Client', i, 'ready')
print('Creating', COUNT, 'requesters')
threads = []
for i in range(0, COUNT):
threads.append(Process(clients[i % CONN_POOL_SIZE]))
print('Starting workers')
for thread in threads:
thread.start()
print('Starting attack ({} requests per worker)...'.format(ITERATIONS))
for thread in threads:
thread.proceed_evt.set()
start = time()
for thread in threads:
thread.join()
timediff = int((time() - start) * 1000)
print('Done in {}ms'.format(timediff))
print('avg: {}ms, min: {}ms, max: {}ms'.format(
sum([thread.timediff / ITERATIONS for thread in threads]) / len(threads),
min([thread.timediff / ITERATIONS for thread in threads]),
max([thread.timediff / ITERATIONS for thread in threads])
))
"""
print('Final server summary:')
summary = client.example.get_summary()
for line in summary:
print(line)
"""
for client in clients:
client.stop()
|
gpl-3.0
| 511,173,291,182,243,840
| 20.77551
| 77
| 0.64761
| false
| 3.419872
| false
| false
| false
|
Hoikas/korman
|
korman/exporter/logger.py
|
1
|
2584
|
# This file is part of Korman.
#
# Korman is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Korman is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Korman. If not, see <http://www.gnu.org/licenses/>.
import os.path
import sys
class ExportAnalysis:
"""This is used to collect artist action items from the export process. You can warn about
portability issues, possible oversights, etc. The benefit here is that the user doesn't have
to look through all of the gobbledygook in the export log.
"""
_porting = []
_warnings = []
def save(self):
# TODO
pass
def port(self, message, indent=0):
self._porting.append(message)
print(" " * indent, end="")
print("PORTING: {}".format(message))
def warn(self, message, indent=0):
self._warnings.append(message)
print(" " * indent, end="")
print("WARNING: {}".format(message))
class ExportLogger:
"""Yet Another Logger(TM)"""
def __init__(self, ageFile):
# Make the log file name from the age file path -- this ensures we're not trying to write
# the log file to the same directory Blender.exe is in, which might be a permission error
path, ageFile = os.path.split(ageFile)
ageName, _crap = os.path.splitext(ageFile)
fn = os.path.join(path, "{}_export.log".format(ageName))
self._file = open(fn, "w")
for i in dir(self._file):
if not hasattr(self, i):
setattr(self, i, getattr(self._file, i))
def __enter__(self):
self._stdout, sys.stdout = sys.stdout, self._file
self._stderr, sys.stderr = sys.stderr, self._file
def __exit__(self, type, value, traceback):
sys.stdout = self._stdout
sys.stderr = self._stderr
def flush(self):
self._file.flush()
self._stdout.flush()
self._stderr.flush()
def write(self, str):
self._file.write(str)
self._stdout.write(str)
def writelines(self, seq):
self._file.writelines(seq)
self._stdout.writelines(seq)
|
gpl-3.0
| -5,733,352,604,971,628,000
| 32.558442
| 99
| 0.627709
| false
| 3.833828
| false
| false
| false
|
Kotaimen/stonemason
|
stonemason/pyramid/geo/tms.py
|
1
|
10943
|
# -*- encoding: utf-8 -*-
"""
stonemason.pyramid.geo.geosys
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Geographic system
"""
__author__ = 'kotaimen'
__date__ = '3/20/15'
import collections
from osgeo import osr
from osgeo import ogr
osr.UseExceptions()
ogr.UseExceptions()
from stonemason.pyramid import Pyramid
from stonemason.pyramid import TileIndex, MetaTileIndex
class TileMapError(RuntimeError):
pass
_Envelope = collections.namedtuple('Envelope', 'left bottom right top')
class Envelope(_Envelope):
"""A rectangular area on the projection surface, defined by two corner
points ``(left, bottom, right, top)``.
"""
@staticmethod
def from_ogr(e):
"""Create a envelope from a call from :func:`ogr.Geometry.GetEnvelope()`,
which is defined by ``(minx, maxx, miny, maxy)``.
"""
return Envelope(e[0], e[2], e[1], e[3])
def to_geometry(self, srs=None):
"""Convert the envelope to a :class:`ogr.Geometry` instance, with
specified spatial reference system"""
left, bottom, right, top = self
bbox = (left, bottom, right, bottom,
right, top, left, top, left, bottom)
wkt = 'POLYGON((%.9f %.9f, %.9f %.9f, %.9f %.9f, %.9f %.9f, %.9f %.9f))' % bbox
return ogr.CreateGeometryFromWkt(wkt, srs)
class TileMapSystem(object):
"""Defines geographic attributes of a `pyramid` tile map system.
>>> from stonemason.pyramid import Pyramid, MetaTileIndex
>>> from stonemason.pyramid.geo import TileMapSystem
>>> pyramid = Pyramid(geogcs='EPSG:4326', projcs='EPSG:3857')
>>> tms = TileMapSystem(pyramid)
>>> tms.geogcs # doctest: +ELLIPSIS
<osgeo.osr.SpatialReference; proxy of <Swig Object of type 'OSRSpatialReferenceShadow *' at ...> >
>>> tms.pyramid.geogcs
'+proj=longlat +datum=WGS84 +no_defs '
>>> tms.forward_projection # doctest: +ELLIPSIS
<osgeo.osr.CoordinateTransformation; proxy of <Swig Object of type 'OSRCoordinateTransformationShadow *' at ...> >
>>> index = MetaTileIndex(4, 12, 12, 8)
>>> tms.calc_tile_envelope(index)
Envelope(left=0.0, bottom=-20037508.34, right=20037508.34, top=0.0)
.. note:: `TileMapSystem` uses `GDAL <http://www.gdal.org/>`_ for spatial
calculations, the actual list of supported spatial references and
coordinate transforms depends on `GDAL` installation and may vary
between distributions.
.. seealso:: `Geometry`_, `SpatialReference`_, `CoordinateTransformation`_
.. _Geometry: http://gdal.org/python/osgeo.ogr.Geometry-class.html
.. _SpatialReference: http://gdal.org/python/osgeo.osr.SpatialReference-class.html
.. _CoordinateTransformation: http://gdal.org/python/osgeo.osr.CoordinateTransformation-class.html
:param pyramid: The `pyramid` defines the tile map system, the following
attributes are used to create `TileMapSystem`:
``Pyramid.geogcs``
Geographic coordinate system, can be any string supported by
:func:`~osgeo.ogr.SpatialReference.SetFromUserInput`.
``Pyramid.projcs``
Projection coordinate system, can be any string supported by
:func:`~osgeo.ogr.SpatialReference.SetFromUserInput`.
When set to ``None``, `TileMapSystem` will try to figure
out one from ``geogcs``.
``Pyramid.geogbounds``
Boundary of the map in geography coordinate system. Specified using
envelope ``(min_lon, min_lat, max_lon, max_lat)``.
The envelope is not considered as a ogr simple geometry and may
behaviour incorrectly for some GCS if it crosses meridian line.
``Pyramid.projbounds``
Boundary of the map in projection coordinate system. Specified using
envelope ``(left, bottom, right, top)``. When set to ``None``,
this will be calculated by projecting ``geogbounds`` form ``geogcs``
to ``projcs``. Note this calculation may fail or give a incorrect
result due to limitations in GDAL.
:type pyramid: :class:`~stonemason.pyramid.Pyramid`
"""
def __init__(self, pyramid):
assert isinstance(pyramid, Pyramid)
self._projcs = None
self._geogcs = None
self._forward_projection = None
self._backward_projection = None
self._geogbounds = None
self._projbounds = None
self._init_spatial_ref(pyramid)
self._init_projections(pyramid)
self._init_bounds(pyramid)
# construct a normalized pyramid from calculations above
self._pyramid = Pyramid(
levels=pyramid.levels,
stride=pyramid.stride,
projcs=self._projcs.ExportToProj4(),
geogcs=self._geogcs.ExportToProj4(),
geogbounds=Envelope.from_ogr(self._geogbounds.GetEnvelope()),
projbounds=Envelope.from_ogr(self._projbounds.GetEnvelope()),
)
@property
def projcs(self):
"""Projection coordinate system.
:rtype: :class:`osgeo.osr.SpatialReference`
"""
return self._projcs
@property
def geogcs(self):
"""Geographic coordinate system.
:rtype: :class:`osgeo.osr.SpatialReference`
"""
return self._geogcs
@property
def forward_projection(self):
"""Defines coordinate transformation from geographic coordinate
system to projection coordinate system.
:rtype: :class:`osgeo.osr.CoordinateTransformation`
"""
return self._forward_projection
@property
def backward_projection(self):
"""Defines coordinate transformation from projection coordinate
system to geographic coordinate system.
:rtype: :class:`osgeo.osr.CoordinateTransformation`
"""
return self._backward_projection
@property
def geog_bounds(self):
"""Bounds of the tile map system in geometry coordinate system.
:rtype: :class:`osgeo.osr.Geometry`
"""
return self._geogbounds
@property
def proj_bounds(self):
"""Bounds of the tile map system in projection coordinate system.
:rtype: :class:`osgeo.osr.Geometry`
"""
return self._projbounds
@property
def pyramid(self):
"""Normalized pyramid object.
:rtype: :class:`~stonemason.pyramid.Pyramid`
"""
return self._pyramid
def _init_spatial_ref(self, pyramid):
# create projection coordinate system from Pyramid
self._projcs = osr.SpatialReference()
self._projcs.SetFromUserInput(pyramid.projcs)
# must be a map projection
if not self._projcs.IsProjected():
raise TileMapError('Not a projection coordinate system.')
# create geographic coordinate system from Pyramid
self._geogcs = osr.SpatialReference()
if pyramid.geogcs is not None:
self._geogcs.SetFromUserInput(pyramid.geogcs)
else:
# try figure out geogcs of the projection if its not specified
code = self._projcs.GetAuthorityCode('geogcs')
authority = self._projcs.GetAuthorityName('geogcs')
if code is None or authority is None:
raise TileMapError("Cannot figure out geogcs automaticlly.")
self._geogcs.SetFromUserInput('%s:%s' % (authority, code))
# XXX: Fix up wkt +over issue
# By default PROJ.4 wraps output longitudes in the range -180 to 180.
# The +over switch can be used to disable the default wrapping which
# is done at a low level.
projcs = self._projcs.ExportToProj4()
if '+over' not in projcs.split():
projcs += ' +over'
self._projcs.ImportFromProj4(projcs)
geogcs = self._geogcs.ExportToProj4()
if '+over' not in geogcs.split():
geogcs += ' +over'
self._geogcs.ImportFromProj4(geogcs)
def _init_projections(self, pyramid):
self._forward_projection = osr.CoordinateTransformation(self._geogcs,
self._projcs)
self._backward_projection = osr.CoordinateTransformation(self._geogcs,
self._projcs)
def _init_bounds(self, pyramid):
self._geogbounds = Envelope(*pyramid.geogbounds) \
.to_geometry(self._geogcs)
if pyramid.projbounds is None:
geobounds = self._geogbounds.Clone()
geobounds.Transform(self._forward_projection)
self._projbounds = geobounds
else:
self._projbounds = Envelope(*pyramid.projbounds) \
.to_geometry(self._projcs)
def _calc_max_bbox(self):
envelope = self.proj_bounds.GetEnvelope()
min_x, max_x, min_y, max_y = envelope
size_x = abs(max_x - min_x)
size_y = abs(max_y - min_y)
scale = max([size_x, size_y])
# fit projection bounds to a square box, if necessary
if size_x > size_y:
offset_x = min_x
offset_y = min_y - (size_x - size_y) / 2
elif size_x < size_y:
offset_x = min_x - (size_y - size_x) / 2
offset_y = min_y
else:
offset_x = min_x
offset_y = min_y
return offset_x, offset_y, scale
def calc_tile_envelope(self, index):
""" Calculates envelope of given `TileIndex` of `MetaTileIndex` under
projection coordinate system.
:param index: Given tile index or metatile index
:type index: :class:`~stonemason.pyramid.TileIndex` or
:class:`~stonemason.pyramid.MetaTileIndex`
:return: Calculated envelope
:rtype: :class:`~stonemason.pyramid.geo.Envelope`
"""
# just convert metatile index to higher level tile index
if isinstance(index, MetaTileIndex):
index = index.to_tile_index()
assert isinstance(index, TileIndex)
# XXX: should've cached this
offset_x, offset_y, scale = self._calc_max_bbox()
z, x, y = index.z, index.x, index.y
norm_factor = 2. ** z
norm_min_x = x / norm_factor
norm_max_x = (x + 1) / norm_factor
norm_min_y = 1 - (y + 1) / norm_factor
norm_max_y = 1 - y / norm_factor
envelope = Envelope(norm_min_x * scale + offset_x,
norm_min_y * scale + offset_y,
norm_max_x * scale + offset_x,
norm_max_y * scale + offset_y)
return envelope
def __repr__(self):
return '''GeographicSystem
projcs: %s
geogcs: %s
projbounds: %s
geogbounds: %s
)''' % (self._projcs.ExportToWkt(),
self._geogcs.ExportToWkt(),
self._projbounds.ExportToWkt(),
self._geogbounds.ExportToWkt())
|
mit
| 1,514,051,397,815,786,000
| 34.186495
| 118
| 0.607512
| false
| 3.905425
| false
| false
| false
|
elsantodel90/RAAGo
|
aago_ranking/users/admin.py
|
1
|
1070
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django import forms
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as AuthUserAdmin
from django.contrib.auth.forms import UserChangeForm, UserCreationForm
from .models import User
class MyUserChangeForm(UserChangeForm):
class Meta(UserChangeForm.Meta):
model = User
class MyUserCreationForm(UserCreationForm):
error_message = UserCreationForm.error_messages.update(
{
'duplicate_username': 'This username has already been taken.'
}
)
class Meta(UserCreationForm.Meta):
model = User
def clean_username(self):
username = self.cleaned_data['username']
try:
User.objects.get(username=username)
except User.DoesNotExist:
return username
raise forms.ValidationError(self.error_messages['duplicate_username'])
@admin.register(User)
class UserAdmin(AuthUserAdmin):
form = MyUserChangeForm
add_form = MyUserCreationForm
|
gpl-3.0
| -776,621,858,203,700,000
| 25.75
| 78
| 0.703738
| false
| 4.297189
| false
| false
| false
|
caktus/ibid
|
ibid/plugins/core.py
|
1
|
11725
|
# Copyright (c) 2008-2010, Michael Gorven, Stefano Rivera
# Released under terms of the MIT/X/Expat Licence. See COPYING for details.
import re
from datetime import datetime, timedelta
from random import choice
import logging
import ibid
from ibid.config import IntOption, ListOption, DictOption
from ibid.plugins import Processor, handler
from ibid.plugins.identity import identify
class Addressed(Processor):
priority = -1500
addressed = False
names = ListOption('names', 'Names to respond to', [ibid.config['botname']])
verbs = ListOption('verbs', u'Verbs to ignore', ('is', 'has', 'was', 'might', 'may', 'would', 'will', "isn't", "hasn't", "wasn't", "wouldn't", "won't", 'can', "can't", 'did', "didn't", 'said', 'says', 'should', "shouldn't", 'does', "doesn't"))
def setup(self):
names = '|'.join(re.escape(x) for x in self.names)
verbs = '|'.join(re.escape(x) for x in self.verbs)
self.patterns = [
re.compile(r'^\s*(?P<nick>%s)' % names
+ r'(?:\s*[:;.?>!,-]+\s+|\s+|\s*[,:]\s*)(?P<body>.*)',
re.I | re.DOTALL),
# "hello there, bot"-style addressing. But we want to be sure that
# there wasn't normal addressing too:
re.compile(r'^(?:\S+:.*|(?P<body>.*),\s*(?P<nick>%s))[\s?!.]*$' % names,
re.I | re.DOTALL)
]
self.verb_pattern = re.compile(r'^(?:%s)\s+(?:%s)\s+' % (names, verbs),
re.I | re.DOTALL)
@handler
def handle_addressed(self, event):
if 'addressed' not in event:
event.addressed = False
if self.verb_pattern.match(event.message['stripped']):
return
for pattern in self.patterns:
matches = pattern.search(event.message['stripped'])
if matches and matches.group('nick'):
new_message = matches.group('body')
event.addressed = matches.group('nick')
event.message['clean'] = new_message
event.message['deaddressed'] = \
pattern.search(event.message['raw']).group('body')
class Strip(Processor):
priority = -1600
addressed = False
event_types = (u'message', u'action', u'notice')
pattern = re.compile(r'^\s*(.*?)\s*[?!.]*\s*$', re.DOTALL)
@handler
def handle_strip(self, event):
if isinstance(event.message, basestring):
event.message = {'raw': event.message, 'deaddressed': event.message,}
event.message['clean'] = event.message['stripped'] \
= self.pattern.search(event.message['raw']).group(1)
class Ignore(Processor):
priority = -1500
addressed = False
event_types = (u'message', u'action', u'notice', u'invite')
nicks = ListOption('ignore', 'List of nicks to ignore', [])
@handler
def handle_ignore(self, event):
for who in self.nicks:
if event.sender['nick'] == who:
event.processed = True
class IgnorePublic(Processor):
priority = -1490
@handler
def ignore_public(self, event):
if event.public and not ibid.auth.authorise(event, u'publicresponse'):
event.addresponse(
u"Sorry, I'm not allowed to talk to you in public. "
'Ask me by private message.'
)
class Address(Processor):
priority = 1600
processed = True
addressed = False
event_types = (u'message', u'action', u'notice', u'state', u'invite')
acknowledgements = ListOption('acknowledgements', 'Responses for positive acknowledgements',
(u'Okay', u'Sure', u'Done', u'Righto', u'Alrighty', u'Yessir'))
refusals = ListOption('refusals', 'Responses for negative acknowledgements',
(u'No', u"I won't", u"Shan't", u"I'm sorry, but I can't do that"))
@handler
def address(self, event):
for response in event.responses:
if isinstance(response['reply'], bool):
if response:
response['reply'] = choice(self.acknowledgements)
else:
response['reply'] = choice(self.refusals)
if (response.get('address', False)
and not response.get('action', False)
and not response.get('notice', False)
and event.public):
response['reply'] = ('%s: %s' % (
event.sender['nick'], response['reply']))
class Timestamp(Processor):
priority = -1900
def process(self, event):
event.time = datetime.utcnow()
class Complain(Processor):
priority = 950
processed = True
event_types = (u'message', u'action', u'invite')
complaints = DictOption('complaints', 'Complaint responses', {
'nonsense': (
u'Huh?', u'Sorry...',
u'Excuse me?', u'*blink*', u'What?',
),
'notauthed': (
u"I'm not your bitch", u"Just do it yourself",
u"I'm not going to listen to you", u"You're not the boss of me",
),
'exception': (
u"I'm not feeling too well", u"That didn't go down very well. Burp.",
u"That didn't seem to agree with me",
),
'network': (
u'The tubes are clogged!', u"I can't reach that site",
u"That site seems to be down",
),
})
@handler
def complain(self, event):
if 'complain' in event and not event.responses:
event.addresponse(choice(self.complaints[event.complain]))
elif event.processed:
return
else:
event.addresponse(choice(self.complaints['nonsense']))
class RateLimit(Processor):
priority = -1000
event_types = (u'message', u'action', u'notice')
limit_time = IntOption('limit_time', 'Time period over which to measure messages', 10)
limit_messages = IntOption('limit_messages', 'Number of messages to allow during the time period', 5)
messages = {}
@handler
def ratelimit(self, event):
if event.identity not in self.messages:
self.messages[event.identity] = [event.time]
else:
self.messages[event.identity].append(event.time)
self.messages[event.identity] = filter(
lambda x: event.time - x < timedelta(seconds=self.limit_time),
self.messages[event.identity])
if len(self.messages[event.identity]) > self.limit_messages:
if event.public:
event.addresponse(u'Geez, give me some time to think!', address=False)
else:
event.processed = True
class Format(Processor):
priority = 2000
def _truncate(self, line, length):
if length is not None:
eline = line.encode('utf-8')
if len(eline) > length:
# horizontal ellipsis = 3 utf-8 bytes
return eline[:length-3].decode('utf-8', 'ignore') \
+ u'\N{horizontal ellipsis}'
return line
def process(self, event):
filtered = []
for response in event.responses:
source = response['source'].lower()
supports = ibid.sources[source].supports
maxlen = ibid.sources[source].truncation_point(response, event)
if response.get('action', False) and 'action' not in supports:
response['reply'] = u'*%s*' % response['reply']
conflate = response.get('conflate', True)
# Expand response into multiple single-line responses:
if (not conflate and 'multiline' not in supports):
for line in response['reply'].split('\n'):
r = {'reply': self._truncate(line, maxlen)}
for k in response.iterkeys():
if k not in ('reply'):
r[k] = response[k]
filtered.append(r)
# Expand response into multiple multi-line responses:
elif (not conflate and 'multiline' in supports
and maxlen is not None):
message = response['reply']
while len(message.encode('utf-8')) > maxlen:
splitpoint = len(message.encode('utf-8')[:maxlen] \
.decode('utf-8', 'ignore'))
parts = [message[:splitpoint].rstrip(),
message[splitpoint:].lstrip()]
for sep in u'\n.;:, ':
if sep in u'\n ':
search = message[:splitpoint+1]
else:
search = message[:splitpoint]
if sep in search:
splitpoint = search.rindex(sep)
parts = [message[:splitpoint+1].rstrip(),
message[splitpoint+1:]]
break
r = {'reply': parts[0]}
for k in response.iterkeys():
if k not in ('reply'):
r[k] = response[k]
filtered.append(r)
message = parts[1]
response['reply'] = message
filtered.append(response)
else:
line = response['reply']
# Remove any characters that make no sense on IRC-like sources:
if 'multiline' not in supports:
line = line.expandtabs(1) \
.replace('\n', conflate == True
and u' ' or conflate or u'')
response['reply'] = self._truncate(line, maxlen)
filtered.append(response)
event.responses = filtered
class UnicodeWarning(Processor):
priority = 1950
def setup(self):
self.log = logging.getLogger('plugins.unicode')
def process(self, object):
if isinstance(object, dict):
for value in object.values():
self.process(value)
elif isinstance(object, list):
for value in object:
self.process(value)
elif isinstance(object, str):
self.log.warning(u'Found a non-unicode string: %r' % object)
class ChannelTracker(Processor):
priority = -1550
addressed = False
event_types = (u'state', u'source')
@handler
def track(self, event):
if event.type == u'source':
if event.status == u'disconnected':
ibid.channels.pop(event.source, None)
elif event.status == u'left':
ibid.channels[event.source].pop(event.channel, None)
elif event.public:
if event.state == u'online' and hasattr(event, 'othername'):
oldid = identify(event.session, event.source, event.othername)
for channel in ibid.channels[event.source].values():
if oldid in channel:
channel.remove(oldid)
channel.add(event.identity)
elif event.state == u'online':
ibid.channels[event.source][event.channel].add(event.identity)
elif event.state == u'offline' and not hasattr(event, 'othername'):
if event.channel:
ibid.channels[event.source][event.channel].remove(event.identity)
else:
for channel in ibid.channels[event.source].values():
channel.discard(event.identity)
# vi: set et sta sw=4 ts=4:
|
gpl-3.0
| -4,911,617,517,799,839,000
| 37.316993
| 247
| 0.532367
| false
| 4.160752
| false
| false
| false
|
openaid-IATI/deprecated-version-OIPA-v2
|
iati/data/migrations/0035_auto__add_indicatorcitydata.py
|
1
|
35113
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'IndicatorCityData'
db.create_table('data_indicatorcitydata', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('indicator', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['data.Indicator'])),
('city', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['data.City'])),
('value', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('year', self.gf('django.db.models.fields.IntegerField')(max_length=5)),
))
db.send_create_signal('data', ['IndicatorCityData'])
def backwards(self, orm):
# Deleting model 'IndicatorCityData'
db.delete_table('data_indicatorcitydata')
models = {
'data.activitystatistics': {
'Meta': {'object_name': 'ActivityStatistics'},
'iati_identifier': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['data.IATIActivity']", 'unique': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'total_budget': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2', 'blank': 'True'})
},
'data.activitystatustype': {
'Meta': {'object_name': 'ActivityStatusType'},
'code': ('django.db.models.fields.IntegerField', [], {'max_length': '8', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.Country']", 'null': 'True', 'blank': 'True'})
},
'data.aidtype': {
'Meta': {'object_name': 'AidType'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '5', 'primary_key': 'True'})
},
'data.budget': {
'Meta': {'object_name': 'Budget'},
'currency': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.CurrencyType']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'period_end': ('django.db.models.fields.DateField', [], {}),
'period_start': ('django.db.models.fields.DateField', [], {}),
'type': ('django.db.models.fields.IntegerField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.DecimalField', [], {'max_digits': '20', 'decimal_places': '2'})
},
'data.city': {
'Meta': {'object_name': 'City'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.Country']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'data.collaborationtype': {
'Meta': {'object_name': 'CollaborationType'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '55', 'primary_key': 'True'})
},
'data.country': {
'Meta': {'object_name': 'Country'},
'country_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'dac_country_code': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'dac_region_code': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'dac_region_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'iso': ('django.db.models.fields.CharField', [], {'max_length': '2', 'primary_key': 'True'}),
'iso2': ('django.db.models.fields.CharField', [], {'max_length': '5', 'null': 'True', 'blank': 'True'}),
'iso3': ('django.db.models.fields.CharField', [], {'max_length': '5', 'null': 'True', 'blank': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'})
},
'data.countrystatistics': {
'Meta': {'object_name': 'CountryStatistics'},
'country': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['data.Country']", 'unique': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'total_activities': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'data.currencytype': {
'Meta': {'object_name': 'CurrencyType'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.Country']", 'null': 'True', 'blank': 'True'})
},
'data.financetype': {
'Meta': {'object_name': 'FinanceType'},
'code': ('django.db.models.fields.IntegerField', [], {'max_length': '5', 'primary_key': 'True'})
},
'data.flowtype': {
'Meta': {'object_name': 'FlowType'},
'code': ('django.db.models.fields.IntegerField', [], {'max_length': '5', 'primary_key': 'True'})
},
'data.iatiactivity': {
'Meta': {'object_name': 'IATIActivity'},
'activity_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.ActivityStatusType']", 'null': 'True', 'blank': 'True'}),
'collaboration_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.CollaborationType']", 'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'default_aid_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.AidType']", 'null': 'True', 'blank': 'True'}),
'default_finance_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.FinanceType']", 'null': 'True', 'blank': 'True'}),
'default_flow_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.FlowType']", 'null': 'True', 'blank': 'True'}),
'default_tied_status_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.TiedAidStatusType']", 'null': 'True', 'blank': 'True'}),
'end_actual': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'end_planned': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'iati_identifier': ('django.db.models.fields.CharField', [], {'max_length': '50', 'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'reporting_organisation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.Organisation']"}),
'start_actual': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'start_planned': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'data.iatiactivitybudget': {
'Meta': {'object_name': 'IATIActivityBudget', '_ormbases': ['data.Budget']},
'budget_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['data.Budget']", 'unique': 'True', 'primary_key': 'True'}),
'iati_activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.IATIActivity']"})
},
'data.iatiactivitycontact': {
'Meta': {'object_name': 'IATIActivityContact'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'iati_activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.IATIActivity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mailing_address': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'organisation': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'person_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'telephone': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'})
},
'data.iatiactivitycountry': {
'Meta': {'object_name': 'IATIActivityCountry'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.Country']"}),
'iati_activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.IATIActivity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'percentage': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'data.iatiactivitydescription': {
'Meta': {'object_name': 'IATIActivityDescription'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'iati_activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.IATIActivity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.Language']", 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'})
},
'data.iatiactivitydocument': {
'Meta': {'object_name': 'IATIActivityDocument'},
'format': ('django.db.models.fields.CharField', [], {'max_length': '55', 'null': 'True', 'blank': 'True'}),
'iati_activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.IATIActivity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.Language']", 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'data.iatiactivitypolicymarker': {
'Meta': {'object_name': 'IATIActivityPolicyMarker'},
'code': ('django.db.models.fields.IntegerField', [], {'max_length': '5'}),
'iati_activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.IATIActivity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'significance_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.SignificanceType']", 'null': 'True', 'blank': 'True'}),
'vocabulary_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.VocabularyType']", 'null': 'True', 'blank': 'True'})
},
'data.iatiactivityregion': {
'Meta': {'object_name': 'IATIActivityRegion'},
'iati_activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.IATIActivity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'percentage': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.Region']"})
},
'data.iatiactivitysector': {
'Meta': {'object_name': 'IATIActivitySector'},
'iati_activity': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sectors'", 'to': "orm['data.IATIActivity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'percentage': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'sector': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.Sector']"})
},
'data.iatiactivitytitle': {
'Meta': {'object_name': 'IATIActivityTitle'},
'iati_activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.IATIActivity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.Language']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'data.iatiactivitywebsite': {
'Meta': {'object_name': 'IATIActivityWebsite'},
'iati_activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.IATIActivity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'data.iatitransaction': {
'Meta': {'object_name': 'IATITransaction', '_ormbases': ['data.Transaction']},
'iati_activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.IATIActivity']"}),
'transaction_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['data.Transaction']", 'unique': 'True', 'primary_key': 'True'})
},
'data.indicator': {
'Meta': {'object_name': 'Indicator'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'primary_key': 'True'})
},
'data.indicatorcitydata': {
'Meta': {'object_name': 'IndicatorCityData'},
'city': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.City']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.Indicator']"}),
'value': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {'max_length': '5'})
},
'data.indicatordata': {
'Meta': {'object_name': 'IndicatorData'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.Country']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.Indicator']"}),
'value': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {'max_length': '5'})
},
'data.language': {
'Meta': {'object_name': 'Language'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '55'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'data.organisation': {
'Meta': {'object_name': 'Organisation'},
'date_created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'org_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'org_name_lang': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'ref': ('django.db.models.fields.CharField', [], {'max_length': '25', 'primary_key': 'True'}),
'type': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'data.organisationstatistics': {
'Meta': {'object_name': 'OrganisationStatistics'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'organisation': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['data.Organisation']", 'unique': 'True'}),
'total_activities': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'data.otheridentifier': {
'Meta': {'object_name': 'OtherIdentifier'},
'iati_activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.IATIActivity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner_ref': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'data.participatingorganisation': {
'Meta': {'object_name': 'ParticipatingOrganisation'},
'date_created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'iati_activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.IATIActivity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'org_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'org_name_lang': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'ref': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'type': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'data.planneddisbursement': {
'Meta': {'object_name': 'PlannedDisbursement'},
'currency': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.CurrencyType']"}),
'iati_activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.IATIActivity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'period_end': ('django.db.models.fields.DateField', [], {}),
'period_start': ('django.db.models.fields.DateField', [], {})
},
'data.region': {
'Meta': {'object_name': 'Region'},
'code': ('django.db.models.fields.IntegerField', [], {'max_length': '5', 'primary_key': 'True'})
},
'data.sector': {
'Meta': {'object_name': 'Sector'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '55', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'vocabulary_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.VocabularyType']", 'null': 'True', 'blank': 'True'})
},
'data.significancetype': {
'Meta': {'object_name': 'SignificanceType'},
'code': ('django.db.models.fields.IntegerField', [], {'max_length': '5', 'primary_key': 'True'})
},
'data.tiedaidstatustype': {
'Meta': {'object_name': 'TiedAidStatusType'},
'code': ('django.db.models.fields.IntegerField', [], {'max_length': '5', 'primary_key': 'True'})
},
'data.transaction': {
'Meta': {'object_name': 'Transaction'},
'aid_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.AidType']", 'null': 'True', 'blank': 'True'}),
'currency': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.CurrencyType']", 'null': 'True', 'blank': 'True'}),
'disbursement_channel': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'finance_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.FinanceType']", 'null': 'True', 'blank': 'True'}),
'flow_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.FlowType']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'provider_org': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'provider_org'", 'to': "orm['data.Organisation']"}),
'receiver_org': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'receiver_org'", 'null': 'True', 'to': "orm['data.Organisation']"}),
'tied_aid_status_type': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'transaction_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'transaction_type': ('django.db.models.fields.CharField', [], {'max_length': '55'}),
'value': ('django.db.models.fields.DecimalField', [], {'max_digits': '20', 'decimal_places': '2'}),
'value_date': ('django.db.models.fields.DateField', [], {})
},
'data.typedeprivationcity': {
'Meta': {'object_name': 'TypeDeprivationCity'},
'extra_type_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'four_shelter_deprivations': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.UnHabitatIndicatorCity']"}),
'is_matrix': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'non_slum_household': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'one_shelter_deprivation': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'rural': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'slum_household': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'three_shelter_deprivations': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'total': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'two_shelter_deprivations': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'type_deprivation': ('django.db.models.fields.IntegerField', [], {}),
'urban': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
'data.typedeprivationcountry': {
'Meta': {'object_name': 'TypeDeprivationCountry'},
'extra_type_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'four_shelter_deprivations': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.UnHabitatIndicatorCountry']"}),
'is_matrix': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'non_slum_household': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'one_shelter_deprivation': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'rural': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'slum_household': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'three_shelter_deprivations': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'total': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'two_shelter_deprivations': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'type_deprivation': ('django.db.models.fields.IntegerField', [], {}),
'urban': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
'data.unhabitatindicatorcity': {
'Meta': {'object_name': 'UnHabitatIndicatorCity'},
'avg_annual_rate_change_urban_agglomerations': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'bottle_water': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.City']"}),
'composting_toilet': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'connection_to_electricity': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'cpi_4_dimensions': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'cpi_5_dimensions': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'cpi_environment_index': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'cpi_equity_index': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'cpi_infrastructure_index': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'cpi_productivity_index': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'cpi_quality_of_live_index': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'diarrhea_had_ari': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'diarrhea_last_two_weeks': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'enrollment_female_primary_education': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'enrollment_male_primary_education': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'fever_last_two_weeks': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'has_telephone': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'improved_floor': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'improved_flush_toilet': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'improved_pit_latrine': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'improved_spring_surface_water': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'improved_toilet': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'improved_water': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'perc_malnourished': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'perc_measles': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'piped_water': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'pit_latrine_with_slab_or_covered_latrine': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'pit_latrine_without_slab': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'pop_urban_agglomerations': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'population': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'protected_well': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'public_tap_pump_borehole': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'pump_borehole': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'rainwater': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'slum_proportion_living_urban': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'sufficient_living': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'under_five_mortality_rate': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'urban_population': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'urban_slum_population': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {}),
'year_plus_range': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'data.unhabitatindicatorcountry': {
'Meta': {'object_name': 'UnHabitatIndicatorCountry'},
'avg_annual_rate_change_percentage_urban': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'avg_annual_rate_change_total_population': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'bottle_water': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'composting_toilet': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'connection_to_electricity': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.Country']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'enrollment_female_primary_education': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'enrollment_male_primary_education': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'has_telephone': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'improved_floor': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'improved_flush_toilet': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'improved_pit_latrine': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'improved_spring_surface_water': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'improved_toilet': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'improved_water': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'piped_water': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'pit_latrine_with_slab_or_covered_latrine': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'pit_latrine_without_slab': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'pop_rural_area': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'pop_urban_area': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'pop_urban_percentage': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'population': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'protected_well': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'public_tap_pump_borehole': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'pump_borehole': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'rainwater': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'slum_proportion_living_urban': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'sufficient_living': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'under_five_mortality_rate': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'urban_population': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'urban_slum_population': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {}),
'year_plus_range': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'data.vocabularytype': {
'Meta': {'object_name': 'VocabularyType'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'primary_key': 'True'})
}
}
complete_apps = ['data']
|
agpl-3.0
| -1,100,802,931,386,025,900
| 82.208531
| 182
| 0.552018
| false
| 3.540331
| false
| false
| false
|
ctu-geoforall-lab-sandbox/qgis-aerogen-plugin
|
aerogen.py
|
1
|
7702
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
AeroGen
A QGIS plugin
AeroGen Plugin
-------------------
begin : 2017-04-24
git sha : $Format:%H$
copyright : (C) 2017 by CTU GeoForAll Lab
email : martin.landa@fsv.cvut.cz
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from PyQt4.QtCore import QSettings, QTranslator, qVersion, QCoreApplication, Qt
from PyQt4.QtGui import QAction, QIcon, QToolButton
# Initialize Qt resources from file resources.py
import resources
# Import the code for the DockWidget
from aerogen_dockwidget import AeroGenDockWidget
import os.path
class AeroGen:
"""QGIS Plugin Implementation."""
def __init__(self, iface):
"""Constructor.
:param iface: An interface instance that will be passed to this class
which provides the hook by which you can manipulate the QGIS
application at run time.
:type iface: QgsInterface
"""
# Save reference to the QGIS interface
self.iface = iface
# initialize plugin directory
self.plugin_dir = os.path.dirname(__file__)
# initialize locale
locale = QSettings().value('locale/userLocale')[0:2]
locale_path = os.path.join(
self.plugin_dir,
'i18n',
'AeroGen_{}.qm'.format(locale))
if os.path.exists(locale_path):
self.translator = QTranslator()
self.translator.load(locale_path)
if qVersion() > '4.3.3':
QCoreApplication.installTranslator(self.translator)
# Declare instance attributes
self.actions = []
self.menu = self.tr(u'&AeroGen')
# add plugin icon into plugin toolbar
self.toolButton = QToolButton()
#print "** INITIALIZING AeroGen"
self.pluginIsActive = False
self.dockwidget = None
# noinspection PyMethodMayBeStatic
def tr(self, message):
"""Get the translation for a string using Qt translation API.
We implement this ourselves since we do not inherit QObject.
:param message: String for translation.
:type message: str, QString
:returns: Translated version of message.
:rtype: QString
"""
# noinspection PyTypeChecker,PyArgumentList,PyCallByClass
return QCoreApplication.translate('AeroGen', message)
def add_action(
self,
icon_path,
text,
callback,
enabled_flag=True,
add_to_menu=True,
add_to_toolbar=True,
status_tip=None,
whats_this=None,
parent=None):
"""Add a toolbar icon to the toolbar.
:param icon_path: Path to the icon for this action. Can be a resource
path (e.g. ':/plugins/foo/bar.png') or a normal file system path.
:type icon_path: str
:param text: Text that should be shown in menu items for this action.
:type text: str
:param callback: Function to be called when the action is triggered.
:type callback: function
:param enabled_flag: A flag indicating if the action should be enabled
by default. Defaults to True.
:type enabled_flag: bool
:param add_to_menu: Flag indicating whether the action should also
be added to the menu. Defaults to True.
:type add_to_menu: bool
:param add_to_toolbar: Flag indicating whether the action should also
be added to the toolbar. Defaults to True.
:type add_to_toolbar: bool
:param status_tip: Optional text to show in a popup when mouse pointer
hovers over the action.
:type status_tip: str
:param parent: Parent widget for the new action. Defaults None.
:type parent: QWidget
:param whats_this: Optional text to show in the status bar when the
mouse pointer hovers over the action.
:returns: The action that was created. Note that the action is also
added to self.actions list.
:rtype: QAction
"""
icon = QIcon(icon_path)
action = QAction(icon, text, parent)
action.triggered.connect(callback)
action.setEnabled(enabled_flag)
if status_tip is not None:
action.setStatusTip(status_tip)
if whats_this is not None:
action.setWhatsThis(whats_this)
if add_to_toolbar:
self.toolButton.setDefaultAction(action)
self.iface.addToolBarWidget(self.toolButton)
if add_to_menu:
self.iface.addPluginToMenu(
self.menu,
action)
self.actions.append(action)
return action
def initGui(self):
"""Create the menu entries and toolbar icons inside the QGIS GUI."""
icon_path = ':/plugins/AeroGen/icon.png'
self.add_action(
icon_path,
text=self.tr(u'AeroGen'),
callback=self.run,
parent=self.iface.mainWindow())
#--------------------------------------------------------------------------
def onClosePlugin(self):
"""Cleanup necessary items here when plugin dockwidget is closed"""
#print "** CLOSING AeroGen"
# disconnects
self.dockwidget.closingPlugin.disconnect(self.onClosePlugin)
# remove this statement if dockwidget is to remain
# for reuse if plugin is reopened
# Commented next statement since it causes QGIS crashe
# when closing the docked window:
# self.dockwidget = None
self.pluginIsActive = False
def unload(self):
"""Removes the plugin menu item and icon from QGIS GUI."""
#print "** UNLOAD AeroGen"
for action in self.actions:
self.iface.removePluginMenu(
self.tr(u'&AeroGen'),
action)
self.iface.removeToolBarIcon(action)
#--------------------------------------------------------------------------
def run(self):
"""Run method that loads and starts the plugin"""
if not self.pluginIsActive:
self.pluginIsActive = True
#print "** STARTING AeroGen"
# dockwidget may not exist if:
# first run of plugin
# removed on close (see self.onClosePlugin method)
if self.dockwidget == None:
# Create the dockwidget (after translation) and keep reference
self.dockwidget = AeroGenDockWidget()
# connect to provide cleanup on closing of dockwidget
self.dockwidget.closingPlugin.connect(self.onClosePlugin)
# show the dockwidget
# TODO: fix to allow choice of dock location
self.iface.addDockWidget(Qt.LeftDockWidgetArea, self.dockwidget)
self.dockwidget.show()
|
gpl-3.0
| 1,069,905,357,386,615,600
| 32.198276
| 79
| 0.550247
| false
| 4.65659
| false
| false
| false
|
chiefenne/PyAero
|
src/SplineRefine.py
|
1
|
13180
|
import copy
import numpy as np
from scipy import interpolate
from PySide2 import QtGui, QtCore
from Utils import Utils
import GraphicsItemsCollection as gic
import GraphicsItem
import ContourAnalysis as ca
import logging
logger = logging.getLogger(__name__)
class SplineRefine:
def __init__(self):
# get MainWindow instance (overcomes handling parents)
self.mainwindow = QtCore.QCoreApplication.instance().mainwindow
def doSplineRefine(self, tolerance=172.0, points=150, ref_te=3,
ref_te_n=6, ref_te_ratio=3.0):
logger.debug('Arrived in doSplineRefine')
# get raw coordinates
x, y = self.mainwindow.airfoil.raw_coordinates
# interpolate a spline through the raw contour points
# constant point distribution used here
# typically nose radius poorly resolevd by that
self.spline_data = self.spline(x, y, points=points, degree=3)
# refine the contour in order to meet the tolerance
# this keeps the constant distribution but refines around the nose
spline_data = copy.deepcopy(self.spline_data)
self.refine(spline_data, tolerance=tolerance)
# redo spline on refined contour
# spline only evaluated at refined contour points (evaluate=True)
coo, u, t, der1, der2, tck = self.spline_data
x, y = coo
self.spline_data = self.spline(x, y, points=points, degree=3,
evaluate=True)
# refine the trailing edge of the spline
self.refine_te(ref_te, ref_te_n, ref_te_ratio)
# add spline data to airfoil object
self.mainwindow.airfoil.spline_data = self.spline_data
# add splined and refined contour to the airfoil contourGroup
# makeSplineMarkers call within makeContourSpline
self.mainwindow.airfoil.makeContourSpline()
# get LE radius, etc.
spline_data = self.mainwindow.airfoil.spline_data
curvature_data = ca.ContourAnalysis.getCurvature(spline_data)
rc, xc, yc, xle, yle, le_id = \
ca.ContourAnalysis.getLeRadius(spline_data, curvature_data)
self.makeLeCircle(rc, xc, yc, xle, yle)
logger.info('Leading edge radius: {:11.8f}'.format(rc))
logger.info('Leading edge circle tangent at point: {}'.format(le_id))
def makeLeCircle(self, rc, xc, yc, xle, yle):
# delete exitsing LE circle ItemGroup from scene
if hasattr(self.mainwindow.airfoil, 'le_circle'):
self.mainwindow.scene.removeItem(self.mainwindow.airfoil.le_circle)
del self.mainwindow.airfoil.le_circle
# put LE circle, center and tangent point in a list
circles = list()
circle = gic.GraphicsCollection()
circle.pen.setColor(QtGui.QColor(0, 150, 0, 255))
circle.pen.setWidthF(0.3)
# no pen thickness change when zoomed
circle.pen.setCosmetic(True)
circle.brush.setColor(QtGui.QColor(10, 200, 10, 150))
circle.Circle(xc, yc, rc)
circle = GraphicsItem.GraphicsItem(circle)
circles.append(circle)
circle = gic.GraphicsCollection()
circle.pen.setColor(QtGui.QColor(255, 0, 0, 255))
circle.pen.setWidthF(0.3)
# no pen thickness change when zoomed
circle.pen.setCosmetic(True)
circle.brush.setColor(QtGui.QColor(255, 0, 0, 255))
circle.Circle(xc, yc, 0.0002)
circle = GraphicsItem.GraphicsItem(circle)
circles.append(circle)
circle = gic.GraphicsCollection()
circle.pen.setColor(QtGui.QColor(255, 0, 0, 255))
circle.pen.setWidthF(1.6)
# no pen thickness change when zoomed
circle.pen.setCosmetic(True)
circle.brush.setColor(QtGui.QColor(255, 0, 0, 255))
circle.Circle(xle, yle, 0.0002)
circle = GraphicsItem.GraphicsItem(circle)
circles.append(circle)
self.mainwindow.airfoil.le_circle = \
self.mainwindow.scene.createItemGroup(circles)
self.mainwindow.airfoil.le_circle.setZValue(110)
self.mainwindow.centralwidget.cb7.setChecked(True)
self.mainwindow.centralwidget.cb7.setEnabled(True)
def spline(self, x, y, points=200, degree=2, evaluate=False):
"""Interpolate spline through given points
Args:
spline (int, optional): Number of points on the spline
degree (int, optional): Degree of the spline
evaluate (bool, optional): If True, evaluate spline just at
the coordinates of the knots
"""
# interpolate B-spline through data points
# returns knots of control polygon
# tck ... tuple (t,c,k) containing the vector of knots,
# the B-spline coefficients, and the degree of the spline.
# u ... array of the parameters for each knot
# NOTE: s=0.0 is important as no smoothing should be done on the spline
# after interpolating it
tck, u = interpolate.splprep([x, y], s=0.0, k=degree)
# number of points on interpolated B-spline (parameter t)
t = np.linspace(0.0, 1.0, points)
# if True, evaluate spline just at the coordinates of the knots
if evaluate:
t = u
# evaluate B-spline at given parameters
# der=0: returns point coordinates
coo = interpolate.splev(t, tck, der=0)
# evaluate 1st derivative at given parameters
der1 = interpolate.splev(t, tck, der=1)
# evaluate 2nd derivative at given parameters
der2 = interpolate.splev(t, tck, der=2)
spline_data = [coo, u, t, der1, der2, tck]
return spline_data
def refine(self, spline_data, tolerance=170.0, recursions=0):
"""Recursive refinement with respect to angle criterion (tol).
If angle between two adjacent line segments is less than tol,
a recursive refinement of the contour is performed until
tol is met.
Args:
tol (float, optional): Angle between two adjacent contour segments
recursions (int, optional): NO USER INPUT HERE
Needed just for level information
during recursions
"""
# self.spline_data = [coo, u, t, der1, der2, tck]
xx, yy = spline_data[0]
t = spline_data[2]
tck = spline_data[5]
logger.debug('\nPoints before refining: {} \n'.format(len(xx)))
xn = copy.deepcopy(xx)
yn = copy.deepcopy(yy)
tn = copy.deepcopy(t)
j = 0
refinements = 0
first = True
refined = dict()
for i in range(len(xx) - 2):
refined[i] = False
# angle between two contour line segments
a = np.array([xx[i], yy[i]])
b = np.array([xx[i + 1], yy[i + 1]])
c = np.array([xx[i + 2], yy[i + 2]])
angle = Utils.angle_between(a - b, c - b, degree=True)
if angle < tolerance:
logger.debug('Refining between segments {} {},'
.format(i, i + 1))
logger.debug('Tol={0:5.1f}, Angle={1:05.1f}\n'
.format(tolerance, angle))
refined[i] = True
refinements += 1
# parameters for new points
t1 = (t[i] + t[i + 1]) / 2.
t2 = (t[i + 1] + t[i + 2]) / 2.
# coordinates of new points
p1 = interpolate.splev(t1, tck, der=0)
p2 = interpolate.splev(t2, tck, der=0)
# insert points and their parameters into arrays
if i > 0 and not refined[i - 1]:
xn = np.insert(xn, i + 1 + j, p1[0])
yn = np.insert(yn, i + 1 + j, p1[1])
tn = np.insert(tn, i + 1 + j, t1)
j += 1
xn = np.insert(xn, i + 2 + j, p2[0])
yn = np.insert(yn, i + 2 + j, p2[1])
tn = np.insert(tn, i + 2 + j, t2)
j += 1
if first and recursions > 0:
logger.debug('Recursion level: {} \n'.format(recursions))
first = False
logger.debug('Points after refining: {}'.format(len(xn)))
# update coordinate array, including inserted points
spline_data[0] = (xn, yn)
# update parameter array, including parameters of inserted points
spline_data[2] = tn
# this is the recursion :)
if refinements > 0:
self.refine(spline_data, tolerance, recursions + 1)
# stopping from recursion if no refinements done in this recursion
else:
# update derivatives, including inserted points
spline_data[3] = interpolate.splev(tn, tck, der=1)
spline_data[4] = interpolate.splev(tn, tck, der=2)
logger.debug('No more refinements.')
logger.debug('\nTotal number of recursions: {}'
.format(recursions - 1))
# due to recursive call to refine, here no object can be returned
# instead use self to transfer data to the outer world :)
self.spline_data = copy.deepcopy(spline_data)
return
def refine_te(self, ref_te, ref_te_n, ref_te_ratio):
"""Refine the airfoil contour at the trailing edge
Args:
ref_te (TYPE): Description
ref_te_n (TYPE): Description
ref_te_ratio (TYPE): Description
Returns:
TYPE: Description
"""
# get parameter of point to which refinement reaches
tref = self.spline_data[2][ref_te]
# calculate the new spacing at the trailing edge points
spacing = self.spacing(divisions=ref_te_n, ratio=ref_te_ratio,
thickness=tref)
# insert new points with the spacing into the airfoil contour data
x, y = self.spline_data[0]
t = self.spline_data[2]
tck = self.spline_data[5]
# remove points which will be refined
index = range(ref_te + 1)
x = np.delete(x, index)
y = np.delete(y, index)
t = np.delete(t, index)
index = range(len(x))[-(ref_te + 1):]
x = np.delete(x, index)
y = np.delete(y, index)
t = np.delete(t, index)
# add refined points
for s in spacing[::-1]:
# upper side
p = interpolate.splev(s, tck, der=0)
x = np.insert(x, 0, p[0])
y = np.insert(y, 0, p[1])
t = np.insert(t, 0, s)
# lower side
p = interpolate.splev(1. - s, tck, der=0)
x = np.append(x, p[0])
y = np.append(y, p[1])
t = np.append(t, 1. - s)
# update coordinate array, including inserted points
self.spline_data[0] = (x, y)
# update parameter array, including parameters of inserted points
self.spline_data[2] = t
# update derivatives, including inserted points
self.spline_data[3] = interpolate.splev(t, tck, der=1)
self.spline_data[4] = interpolate.splev(t, tck, der=2)
def spacing(self, divisions=10, ratio=1.0, thickness=1.0):
"""Calculate point distribution on a line
Args:
divisions (int, optional): Number of subdivisions
ratio (float, optional): Ratio of last to first subdivision size
thickness (float, optional): length of line
Returns:
TYPE: Description
"""
if divisions == 1:
sp = [0.0, 1.0]
return np.array(sp)
growth = ratio**(1.0 / (float(divisions) - 1.0))
if growth == 1.0:
growth = 1.0 + 1.0e-10
s0 = 1.0
s = [s0]
for i in range(1, divisions + 1):
app = s0 * growth**i
s.append(app)
sp = np.array(s)
sp -= sp[0]
sp /= sp[-1]
sp *= thickness
return sp
def writeContour(self):
xr = self.raw_coordinates[0]
xc = self.coordinates[0]
yc = self.coordinates[1]
s = '# Spline with {0} points based on initial contour'.format(len(xc))
s1 = '({0} points)\n'.format(len(xr))
info = s + s1
with open(self.name + '_spline_' + str(len(xc)) + '.dat', 'w') as f:
f.write('#\n')
f.write('# Airfoil: ' + self.name + '\n')
f.write('# Created from ' + self.filename + '\n')
f.write(info)
f.write('#\n')
for i in range(len(xc)):
data = '{:10.8f} {:10.8f} \n'.format(xc[i], yc[i])
f.write(data)
|
mit
| -7,586,054,523,396,383,000
| 34.611111
| 79
| 0.547117
| false
| 3.789534
| false
| false
| false
|
emory-libraries/eulxml
|
eulxml/xmlmap/core.py
|
1
|
28298
|
# file eulxml/xmlmap/core.py
#
# Copyright 2010,2011 Emory University Libraries
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
import logging
import os
import warnings
import urllib
import time
from lxml import etree
from lxml.builder import ElementMaker
import six
from six.moves.urllib.request import urlopen
from eulxml.utils.compat import u
from eulxml.xmlmap.fields import Field
logger = logging.getLogger(__name__)
__all__ = ['XmlObject', 'parseUri', 'parseString', 'loadSchema',
'load_xmlobject_from_string', 'load_xmlobject_from_file',
'load_xslt']
# NB: When parsing XML in this module, we explicitly create a new parser
# each time. Without this, lxml 2.2.7 uses a global default parser. When
# parsing strings, lxml appears to set that parser into no-network mode,
# causing subsequent network-based parses to fail. Specifically, under
# lxml 2.2.7, the second call here fails::
#
# >>> etree.fromstring('<foo/>') # set global parser to no-network
# >>> etree.parse('http://www.w3.org/2001/xml.xsd') # fails in no-network mode
#
# If we simply construct a separate parser each time, parses will be
# marginally slower, but this lxml bug will not affect us.
#
# This lxml behavior has been logged as a bug:
# https://bugs.launchpad.net/lxml/+bug/673205
def parseUri(stream, uri=None):
"""Read an XML document from a URI, and return a :mod:`lxml.etree`
document."""
return etree.parse(stream, parser=_get_xmlparser(), base_url=uri)
def parseString(string, uri=None):
"""Read an XML document provided as a byte string, and return a
:mod:`lxml.etree` document. String cannot be a Unicode string.
Base_uri should be provided for the calculation of relative URIs."""
return etree.fromstring(string, parser=_get_xmlparser(), base_url=uri)
# internal cache for loaded schemas, so we only load each schema once
_loaded_schemas = {}
def loadSchema(uri, base_uri=None):
"""Load an XSD XML document (specified by filename or URL), and return a
:class:`lxml.etree.XMLSchema`.
"""
# uri to use for reporting errors - include base uri if any
if uri in _loaded_schemas:
return _loaded_schemas[uri]
error_uri = uri
if base_uri is not None:
error_uri += ' (base URI %s)' % base_uri
try:
logger.debug('Loading schema %s' % uri)
_loaded_schemas[uri] = etree.XMLSchema(etree.parse(uri,
parser=_get_xmlparser(),
base_url=base_uri))
return _loaded_schemas[uri]
except IOError as io_err:
# add a little more detail to the error message - but should still be an IO error
raise IOError('Failed to load schema %s : %s' % (error_uri, io_err))
except etree.XMLSchemaParseError as parse_err:
# re-raise as a schema parse error, but ensure includes details about schema being loaded
raise etree.XMLSchemaParseError('Failed to parse schema %s -- %s' % (error_uri, parse_err))
def load_xslt(filename=None, xsl=None):
'''Load and compile an XSLT document (specified by filename or string)
for repeated use in transforming XML.
'''
parser = _get_xmlparser()
if filename is not None:
xslt_doc = etree.parse(filename, parser=parser)
if xsl is not None:
xslt_doc = etree.fromstring(xsl, parser=parser)
return etree.XSLT(xslt_doc)
def _http_uri(uri):
return uri.startswith('http:') or uri.startswith('https:')
class _FieldDescriptor(object):
def __init__(self, field):
self.field = field
def __get__(self, obj, objtype):
if obj is None:
# NOTE: return the *field* here rather than self;
# allows sphinx autodocumentation to inspect the type properly
return self.field
return self.field.get_for_node(obj.node, obj.context)
def __set__(self, obj, value):
return self.field.set_for_node(obj.node, obj.context, value)
def __delete__(self, obj):
return self.field.delete_for_node(obj.node, obj.context)
class XmlObjectType(type):
"""
A metaclass for :class:`XmlObject`.
Analogous in principle to Django's ``ModelBase``, this metaclass
functions rather differently. While it'll likely get a lot closer over
time, we just haven't been growing ours long enough to demand all of the
abstractions built into Django's models. For now, we do three things:
1. take any :class:`~eulxml.xmlmap.fields.Field` members and convert
them to descriptors,
2. store all of these fields and all of the base classes' fields in a
``_fields`` dictionary on the class, and
3. if any local (non-parent) fields look like self-referential
:class:`eulxml.xmlmap.NodeField` objects then patch them up
to refer to the newly-created :class:`XmlObject`.
"""
def __new__(cls, name, bases, defined_attrs):
use_attrs = {}
fields = {}
recursive_fields = []
# inherit base fields first; that way current class field defs will
# override parents. note that since the parents already added fields
# from *their* parents (because they were built from XmlObjectType),
# we don't have to recurse.
for base in bases:
base_fields = getattr(base, '_fields', None)
if base_fields:
fields.update(base_fields)
base_xsd = getattr(base, 'XSD_SCHEMA', None)
schema_obj = None
for attr_name, attr_val in defined_attrs.items():
# XXX: not a fan of isinstance here. maybe use something like
# django's contribute_to_class?
if isinstance(attr_val, Field):
if isinstance(attr_val, SchemaField):
# special case: schema field will look at the schema and return appropriate field type
if 'XSD_SCHEMA' in defined_attrs or base_xsd:
# load schema_obj the first time we need it
if schema_obj is None:
# if xsd schema is directly defined, use that
if 'XSD_SCHEMA' in defined_attrs:
schema_obj = load_xmlobject_from_file(defined_attrs['XSD_SCHEMA'],
XsdSchema)
# otherwise, use nearest parent xsd
else:
schema_obj = load_xmlobject_from_file(base_xsd, XsdSchema)
attr_val = attr_val.get_field(schema_obj)
field = attr_val
fields[attr_name] = field
use_attrs[attr_name] = _FieldDescriptor(field)
# collect self-referential NodeFields so that we can resolve
# them once we've created the new class
node_class = getattr(field, 'node_class', None)
if isinstance(node_class, six.string_types):
if node_class in ('self', name):
recursive_fields.append(field)
else:
msg = ('Class %s has field %s with node_class %s, ' +
'but the only supported class names are ' +
'"self" and %s.') % (name, attr_val,
repr(node_class),
repr(name))
raise ValueError(msg)
# if a field 'foo' has a 'create_for_node' method, then add
# a 'create_foo' method to call it. generally this isn't
# helpful, but NodeField uses it.
if hasattr(attr_val, 'create_for_node'):
create_method_name = 'create_' + attr_name
create_method = cls._make_create_field(create_method_name, attr_val)
use_attrs[create_method_name] = create_method
else:
use_attrs[attr_name] = attr_val
use_attrs['_fields'] = fields
super_new = super(XmlObjectType, cls).__new__
new_class = super_new(cls, name, bases, use_attrs)
# patch self-referential NodeFields (collected above) with the
# newly-created class
for field in recursive_fields:
assert field.node_class in ('self', name)
field.node_class = new_class
return new_class
@staticmethod
def _make_create_field(field_name, field):
def create_field(xmlobject):
field.create_for_node(xmlobject.node, xmlobject.context)
create_field.__name__ = str(field_name)
return create_field
@six.python_2_unicode_compatible
class XmlObject(six.with_metaclass(XmlObjectType, object)):
"""
A Python object wrapped around an XML node.
Typical programs will define subclasses of :class:`XmlObject` with
various field members. Some programs will use
:func:`load_xmlobject_from_string` and :func:`load_xmlobject_from_file`
to create instances of these subclasses. Other programs will create them
directly, passing a node argument to the constructor. If the
subclass defines a :attr:`ROOT_NAME` then this node argument is
optional: Programs may then create instances directly with no
constructor arguments.
Programs can also pass an optional dictionary to the constructor to
specify namespaces for XPath evaluation.
If keyword arguments are passed in to the constructor, they will be used to
set initial values for the corresponding fields on the :class:`XmlObject`.
(Only currently supported for non-list fields.)
Custom equality/non-equality tests: two instances of :class:`XmlObject` are
considered equal if they point to the same lxml element node.
"""
node = None
"""The top-level xml node wrapped by the object"""
ROOT_NAME = None
"""A default root element name (without namespace prefix) used when an object
of this type is created from scratch."""
ROOT_NS = None
"""The default namespace used when an object of this type is created from
scratch."""
ROOT_NAMESPACES = {}
"""A dictionary whose keys are namespace prefixes and whose values are
namespace URIs. These namespaces are used to create the root element when an
object of this type is created from scratch; should include the namespace
and prefix for the root element, if it has one. Any additional namespaces
will be added to the root element."""
XSD_SCHEMA = None
"""URI or file path to the XSD schema associated with this :class:`XmlObject`,
if any. If configured, will be used for optional validation when calling
:meth:`load_xmlobject_from_string` and :meth:`load_xmlobject_from_file`,
and with :meth:`is_valid`.
"""
schema_validate = True
'''Override for schema validation; if a schema must be defined for
the use of :class:`xmlmap.fields.SchemaField` for a sub-xmlobject
that should not be validated, set to False.'''
@property
def xmlschema(self):
"""A parsed XSD schema instance of
:class:`lxml.etree.XMLSchema`; will be loaded the first time
it is requested on any instance of this class if XSD_SCHEMA is
set and xmlchema is None. If you wish to load and parse the
schema at class definition time, instead of at class instance
initialization time, you may want to define your schema in
your subclass like this::
XSD_SCHEMA = "http://www.openarchives.org/OAI/2.0/oai_dc.xsd"
xmlschema = xmlmap.loadSchema(XSD_SCHEMA)
"""
if self.XSD_SCHEMA:
return loadSchema(self.XSD_SCHEMA)
# NOTE: DTD and RNG validation could be handled similarly to XSD validation logic
def __init__(self, node=None, context=None, **kwargs):
if node is None:
node = self._build_root_element()
self.node = node
# FIXME: context probably needs work
# get namespaces from current node OR its parent (in case of an lxml 'smart' string)
if hasattr(node, 'nsmap'):
nsmap = node.nsmap
elif hasattr(node, 'getParent'):
nsmap = node.nsmap
else:
nsmap = {}
# xpath has no notion of a default namespace - omit any namespace with no prefix
self.context = {'namespaces': dict([(prefix, ns) for prefix, ns
in six.iteritems(nsmap) if prefix])}
if context is not None:
self.context.update(context)
if hasattr(self, 'ROOT_NAMESPACES'):
# also include any root namespaces to guarantee that expected prefixes are available
self.context['namespaces'].update(self.ROOT_NAMESPACES)
for field, value in six.iteritems(kwargs):
# TODO (maybe): handle setting/creating list fields
setattr(self, field, value)
def _build_root_element(self):
opts = {}
if hasattr(self, 'ROOT_NS'):
opts['namespace'] = self.ROOT_NS
if hasattr(self, 'ROOT_NAMESPACES'):
opts['nsmap'] = self.ROOT_NAMESPACES
E = ElementMaker(**opts)
root = E(self.ROOT_NAME)
return root
def xsl_transform(self, filename=None, xsl=None, return_type=None, **params):
"""Run an xslt transform on the contents of the XmlObject.
XSLT can be passed in as an XSLT object generated by :meth:`load_xslt`
or as filename or string. If a params dictionary is specified, its items
will be passed as parameters to the XSL transformation, and any string
values will automatically be encoded as XSL string parameters.
.. Note::
If XSL is being used multiple times, it is recommended to
use :meth`:load_xslt` to load and compile the XSLT once.
:param filename: xslt filename (optional, one of file and xsl is required)
:param xsl: xslt as string OR compiled XSLT object as returned by
:meth:`load_xslt` (optional)
:param return_type: type of object to return; optional, defaults to
:class:`XmlObject`; specify unicode or string for text output
:returns: an instance of :class:`XmlObject` or the return_type specified
"""
# NOTE: converting _XSLTResultTree to XmlObject because of a bug in its unicode method
# - to output xml result, use serialize instead of unicode
if return_type is None:
return_type = XmlObject
# automatically encode any string params as XSLT string parameters
for key, val in six.iteritems(params):
if isinstance(val, six.string_types):
params[key] = etree.XSLT.strparam(val)
parser = _get_xmlparser()
# if a compiled xslt object is passed in, use that first
if xsl is not None and isinstance(xsl, etree.XSLT):
result = xsl(self.node, **params)
else:
# otherwise, load the xslt
if filename is not None:
xslt_doc = etree.parse(filename, parser=parser)
if xsl is not None:
xslt_doc = etree.fromstring(xsl, parser=parser)
# NOTE: there is a memory bug that results in malloc errors and
# segfaults when using the parsed etree.XSLT approach here.
# As a workaround, using the document xslt method instead.
if self.node == self.node.getroottree().getroot():
# if current node is root node, use entire document for transform
xmltree = self.node.getroottree()
else:
# otherwise, construct a temporary partial document from this node
partial_doc = etree.fromstring(self.serialize(), parser=parser)
xmltree = partial_doc.getroottree()
result = xmltree.xslt(xslt_doc, **params)
# If XSLT returns nothing, transform returns an _XSLTResultTree
# with no root node. Log a warning, and don't generate an
# empty xmlobject which will behave unexpectedly.
# text output does not include a root node, so check separately
if issubclass(return_type, six.string_types):
if result is None:
logger.warning("XSL transform generated an empty result")
return
else:
return return_type(result)
if result is None or result.getroot() is None:
logger.warning("XSL transform generated an empty result")
else:
# pass in root node, rather than the result tree object
return return_type(result.getroot())
def __str__(self):
if isinstance(self.node, six.string_types):
return self.node
return self.node.xpath("normalize-space(.)")
def __string__(self):
if isinstance(self.node, six.string_types):
return self.node
return u(self).encode('ascii', 'xmlcharrefreplace')
def __eq__(self, other):
# consider two xmlobjects equal if they are pointing to the same xml node
if hasattr(other, 'node') and self.node == other.node:
return True
# consider two xmlobjects equal if they serialize the same
if hasattr(other, 'serialize') and self.serialize() == other.serialize():
return True
# NOTE: does not address "equivalent" xml, which is potentially very complex
return False
def __ne__(self, other):
return not self.__eq__(other)
def serialize(self, stream=None, pretty=False):
"""Serialize the contents of the XmlObject to a stream. Serializes
current node only; for the entire XML document, use :meth:`serializeDocument`.
If no stream is specified, returns a string.
:param stream: stream or other file-like object to write content to (optional)
:param pretty: pretty-print the XML output; boolean, defaults to False
:rtype: stream passed in or an instance of :class:`cStringIO.StringIO`
"""
return self._serialize(self.node, stream=stream, pretty=pretty)
def serializeDocument(self, stream=None, pretty=False):
"""Serialize the contents of the entire XML document (including Doctype
declaration, if there is one), with an XML declaration, for the current
XmlObject to a stream.
If no stream is specified, returns a string.
:param stream: stream or other file-like object to write content to (optional)
:param pretty: pretty-print the XML output; boolean, defaults to False
:rtype: stream passed in or an instance of :class:`cStringIO.StringIO`
"""
return self._serialize(self.node.getroottree(), stream=stream, pretty=pretty,
xml_declaration=True)
def _serialize(self, node, stream=None, pretty=False, xml_declaration=False):
# actual logic of xml serialization
if stream is None:
string_mode = True
stream = six.BytesIO()
else:
string_mode = False
# NOTE: etree c14n doesn't seem to like fedora info: URIs
stream.write(etree.tostring(node, encoding='UTF-8', pretty_print=pretty,
xml_declaration=xml_declaration))
if string_mode:
data = stream.getvalue()
stream.close()
return data
return stream
def is_valid(self):
"""Determine if the current document is valid as far as we can determine.
If there is a schema associated, check for schema validity. Otherwise,
return True.
:rtype: boolean
"""
# valid if there are no validation errors
return self.validation_errors() == []
def validation_errors(self):
"""Return a list of validation errors. Returns an empty list
if the xml is schema valid or no schema is defined. If a
schema is defined but :attr:`schema_validate` is False, schema
validation will be skipped.
Currently only supports schema validation.
:rtype: list
"""
# if we add other types of validation (DTD, RNG), incorporate them here
if self.xmlschema and self.schema_validate and not self.schema_valid():
return self.schema_validation_errors()
return []
def schema_valid(self):
"""Determine if the current document is schema-valid according to the
configured XSD Schema associated with this instance of :class:`XmlObject`.
:rtype: boolean
:raises: Exception if no XSD schema is defined for this XmlObject instance
"""
if self.xmlschema is not None:
# clear out errors so they are not duplicated by repeated
# validations on the same schema object
self.xmlschema._clear_error_log()
# NOTE: _clear_error_log is technically private, but I can't find
# any public method to clear the validation log.
return self.xmlschema.validate(self.node)
else:
raise Exception('No XSD schema is defined, cannot validate document')
def schema_validation_errors(self):
"""
Retrieve any validation errors that occured during schema validation
done via :meth:`is_valid`.
:returns: a list of :class:`lxml.etree._LogEntry` instances
:raises: Exception if no XSD schema is defined for this XmlObject instance
"""
if self.xmlschema is not None:
return self.xmlschema.error_log
else:
raise Exception('No XSD schema is defined, cannot return validation errors')
def is_empty(self):
"""
Returns True if the root node contains no child elements, no
attributes, and no text. Returns False if any are present.
"""
return len(self.node) == 0 and len(self.node.attrib) == 0 \
and not self.node.text and not self.node.tail # regular text or text after a node
""" April 2016. Removing Urllib2Resolver so we can support
loading local copies of schema and skip validation in get_xml_parser """
def _get_xmlparser(xmlclass=XmlObject, validate=False, resolver=None):
"""Initialize an instance of :class:`lxml.etree.XMLParser` with appropriate
settings for validation. If validation is requested and the specified
instance of :class:`XmlObject` has an XSD_SCHEMA defined, that will be used.
Otherwise, uses DTD validation. Switched resolver to None to skip validation.
"""
if validate:
if hasattr(xmlclass, 'XSD_SCHEMA') and xmlclass.XSD_SCHEMA is not None:
# If the schema has already been loaded, use that.
# (since we accessing the *class*, accessing 'xmlschema' returns a property,
# not the initialized schema object we actually want).
xmlschema = getattr(xmlclass, '_xmlschema', None)
# otherwise, load the schema
if xmlschema is None:
xmlschema = loadSchema(xmlclass.XSD_SCHEMA)
opts = {'schema': xmlschema}
else:
# if configured XmlObject does not have a schema defined, assume DTD validation
opts = {'dtd_validation': True}
else:
# If validation is not requested, then the parsing should fail
# only for well-formedness issues.
#
# Therefore, we must turn off collect_ids, otherwise lxml will
# have a problem with duplicate IDs as it collects
# them. However, the XML spec declares ID uniqueness as a
# validation constraint, not a well-formedness
# constraint. (See https://www.w3.org/TR/xml/#id.)
opts = {"collect_ids": False}
parser = etree.XMLParser(**opts)
if resolver is not None:
parser.resolvers.add(resolver)
return parser
def load_xmlobject_from_string(string, xmlclass=XmlObject, validate=False,
resolver=None):
"""Initialize an XmlObject from a string.
If an xmlclass is specified, construct an instance of that class instead
of :class:`~eulxml.xmlmap.XmlObject`. It should be a subclass of XmlObject.
The constructor will be passed a single node.
If validation is requested and the specified subclass of :class:`XmlObject`
has an XSD_SCHEMA defined, the parser will be configured to validate against
the specified schema. Otherwise, the parser will be configured to use DTD
validation, and expect a Doctype declaration in the xml content.
:param string: xml content to be loaded, as a string
:param xmlclass: subclass of :class:`~eulxml.xmlmap.XmlObject` to initialize
:param validate: boolean, enable validation; defaults to false
:rtype: instance of :class:`~eulxml.xmlmap.XmlObject` requested
"""
parser = _get_xmlparser(xmlclass=xmlclass, validate=validate, resolver=resolver)
element = etree.fromstring(string, parser)
return xmlclass(element)
def load_xmlobject_from_file(filename, xmlclass=XmlObject, validate=False,
resolver=None):
"""Initialize an XmlObject from a file.
See :meth:`load_xmlobject_from_string` for more details; behaves exactly the
same, and accepts the same parameters, except that it takes a filename
instead of a string.
:param filename: name of the file that should be loaded as an xmlobject.
:meth:`etree.lxml.parse` will accept a file name/path, a file object, a
file-like object, or an HTTP or FTP url, however file path and URL are
recommended, as they are generally faster for lxml to handle.
"""
parser = _get_xmlparser(xmlclass=xmlclass, validate=validate, resolver=resolver)
tree = etree.parse(filename, parser)
return xmlclass(tree.getroot())
from eulxml.xmlmap.fields import *
# Import these for backward compatibility. Should consider deprecating these
# and asking new code to pull them from descriptor
# XSD schema xmlobjects - used in XmlObjectType to process SchemaFields
# FIXME: where should these actually go? depends on both XmlObject and fields
class XsdType(XmlObject):
ROOT_NAME = 'simpleType'
name = StringField('@name')
base = StringField('xs:restriction/@base')
restricted_values = StringListField('xs:restriction/xs:enumeration/@value')
def base_type(self):
# for now, only supports simple types - eventually, may want logic to
# traverse extended types to get to base XSD type
if ':' in self.base: # for now, ignore prefix (could be xsd, xs, etc. - how to know which?)
prefix, basetype = self.base.split(':')
else:
basetype = self.base
return basetype
class XsdSchema(XmlObject):
ROOT_NAME = 'schema'
ROOT_NS = 'http://www.w3.org/2001/XMLSchema'
ROOT_NAMESPACES = {'xs': ROOT_NS}
def get_type(self, name=None, xpath=None):
if xpath is None:
if name is None:
raise Exception("Must specify either name or xpath")
xpath = '//*[@name="%s"]' % name
result = self.node.xpath(xpath)
if len(result) == 0:
raise Exception("No Schema type definition found for xpath '%s'" % xpath)
elif len(result) > 1:
raise Exception("Too many schema type definitions found for xpath '%s' (found %d)" \
% (xpath, len(result)))
return XsdType(result[0], context=self.context) # pass in namespaces
|
apache-2.0
| 6,997,299,404,527,287,000
| 40.675994
| 106
| 0.63478
| false
| 4.28433
| false
| false
| false
|
citiususc/construe
|
construe/knowledge/abstraction_patterns/rhythm/patterns.py
|
1
|
5602
|
# -*- coding: utf-8 -*-
# pylint: disable-msg= E1002, E1101
"""
Created on Wed Nov 21 09:04:17 2012
This file contains the definition of a set of very simple abstraction patterns
in order to perform rhythm interpretation on an ECG signal.
@author: T. Teijeiro
"""
import copy
import construe.knowledge.observables as o
from construe.knowledge.constants import (PW_DURATION, ST_INTERVAL,
N_PR_INTERVAL, N_QT_INTERVAL,
ASYSTOLE_RR, PQ_INTERVAL, QRS_DUR)
from construe.model import Interval as Iv
from construe.model.automata import PatternAutomata, ABSTRACTED, ENVIRONMENT
from construe.utils.units_helper import msec2samples as ms2sp
def _rstart_tconst(pattern, qrs):
"""
Temporal constraints for the Rhythm Start abstraction pattern.
"""
pattern.tnet.set_equal(qrs.time, pattern.hypothesis.time)
def _p_qrs_tconst(pattern, pwave):
"""
Temporal constraints of the P Wave wrt the corresponding QRS complex
"""
obseq = pattern.obs_seq
idx = pattern.get_step(pwave)
if idx == 0 or not isinstance(obseq[idx-1], o.QRS):
return
qrs = obseq[idx-1]
pattern.tnet.add_constraint(pwave.start, pwave.end, PW_DURATION)
#PR interval
pattern.tnet.add_constraint(pwave.start, qrs.start, N_PR_INTERVAL)
pattern.tnet.set_before(pwave.end, qrs.start)
def _t_qrs_tconst(pattern, twave):
"""
Temporal constraints of the T waves with the corresponding QRS complex
"""
obseq = pattern.obs_seq
idx = pattern.get_step(twave)
#We find the qrs observation precedent to this T wave.
try:
qrs = next(obseq[i] for i in range(idx-1, -1, -1)
if isinstance(obseq[i], o.QRS))
tnet = pattern.tnet
if idx > 0 and isinstance(obseq[idx-1], o.PWave):
pwave = obseq[idx-1]
tnet.add_constraint(pwave.end, twave.start, Iv(ST_INTERVAL.start,
PQ_INTERVAL.end + QRS_DUR.end))
#ST interval
tnet.add_constraint(qrs.end, twave.start, ST_INTERVAL)
#QT duration
tnet.add_constraint(qrs.start, twave.end, N_QT_INTERVAL)
except StopIteration:
pass
def _prev_rhythm_tconst(pattern, rhythm):
"""Temporal constraints of a cardiac rhythm with the precedent one."""
pattern.tnet.set_equal(pattern.hypothesis.start, rhythm.end)
def _asyst_prev_rhythm_tconst(pattern, rhythm):
"""Temporal constraints of an asystole with the precedent rhythm."""
pattern.tnet.set_equal(pattern.hypothesis.start, rhythm.end)
pattern.tnet.add_constraint(pattern.hypothesis.start,
pattern.hypothesis.end, ASYSTOLE_RR)
def _qrs1_tconst(pattern, qrs):
"""Temporal constraints of the first QRS in the asystole."""
pattern.tnet.set_equal(pattern.hypothesis.start, qrs.time)
pattern.tnet.set_before(qrs.end, pattern.hypothesis.end)
def _qrs2_tconst(pattern, qrs):
"""Temporal constraints of the delayed QRS in the asystole."""
pattern.tnet.set_equal(qrs.time, pattern.hypothesis.end)
if len(pattern.evidence[o.QRS]) > 1:
prev = pattern.evidence[o.QRS][0]
pattern.tnet.add_constraint(prev.time, qrs.time, ASYSTOLE_RR)
def _rhythmstart_gconst(pattern, _):
"""General constraints of the rhythm start pattern."""
#We assume an starting mean rhythm of 75ppm, but the range allows from 65
#to 85bpm
pattern.hypothesis.meas = o.CycleMeasurements((ms2sp(800), ms2sp(200)),
(0, 0), (0, 0))
def _asystole_gconst(pattern, _):
"""General constraints of the asystole pattern."""
#The rhythm information is copied from the precedent rhythm.
if pattern.evidence[o.Cardiac_Rhythm]:
rhythm = pattern.evidence[o.Cardiac_Rhythm][0]
pattern.hypothesis.meas = copy.copy(rhythm.meas)
RHYTHMSTART_PATTERN = PatternAutomata()
RHYTHMSTART_PATTERN.name = "Rhythm Start"
RHYTHMSTART_PATTERN.Hypothesis = o.RhythmStart
RHYTHMSTART_PATTERN.add_transition(0, 1, o.QRS, ABSTRACTED, _rstart_tconst,
_rhythmstart_gconst)
RHYTHMSTART_PATTERN.add_transition(1, 2, o.PWave, ABSTRACTED, _p_qrs_tconst)
RHYTHMSTART_PATTERN.add_transition(2, 3, o.TWave, ABSTRACTED, _t_qrs_tconst)
RHYTHMSTART_PATTERN.add_transition(1, 3, o.TWave, ABSTRACTED, _t_qrs_tconst)
RHYTHMSTART_PATTERN.add_transition(1, 3)
RHYTHMSTART_PATTERN.final_states.add(3)
RHYTHMSTART_PATTERN.abstractions[o.QRS] = (RHYTHMSTART_PATTERN.transitions[0],)
RHYTHMSTART_PATTERN.freeze()
ASYSTOLE_PATTERN = PatternAutomata()
ASYSTOLE_PATTERN.name = "Asystole"
ASYSTOLE_PATTERN.Hypothesis = o.Asystole
ASYSTOLE_PATTERN.add_transition(0, 1, o.Cardiac_Rhythm, ENVIRONMENT,
_asyst_prev_rhythm_tconst)
ASYSTOLE_PATTERN.add_transition(1, 2, o.QRS, ENVIRONMENT, _qrs1_tconst)
ASYSTOLE_PATTERN.add_transition(2, 3, o.QRS, ABSTRACTED, _qrs2_tconst,
_asystole_gconst)
ASYSTOLE_PATTERN.add_transition(3, 4, o.PWave, ABSTRACTED, _p_qrs_tconst)
ASYSTOLE_PATTERN.add_transition(4, 5, o.TWave, ABSTRACTED, _t_qrs_tconst)
ASYSTOLE_PATTERN.add_transition(3, 5, o.TWave, ABSTRACTED, _t_qrs_tconst)
ASYSTOLE_PATTERN.add_transition(3, 5)
ASYSTOLE_PATTERN.final_states.add(5)
ASYSTOLE_PATTERN.abstractions[o.QRS] = (ASYSTOLE_PATTERN.transitions[2],)
ASYSTOLE_PATTERN.freeze()
if __name__ == "__main__":
pass
|
agpl-3.0
| 8,645,814,896,490,964,000
| 40.80597
| 79
| 0.661014
| false
| 2.953084
| false
| false
| false
|
grow/pygrow
|
install.py
|
1
|
7591
|
#!/usr/bin/python2
# -*- coding: utf-8 -*-
"""Standalone Grow SDK installer. Downloads Grow SDK and sets up command aliases."""
import argparse
import datetime
import json
import os
import platform
import re
import sys
import tempfile
import urllib
import urllib2
import zipfile
DOWNLOAD_URL_FORMAT = 'https://github.com/grow/grow/releases/download/{version}/{name}'
RELEASES_API = 'https://api.github.com/repos/grow/grow/releases'
RC_FILES = ['.bashrc', '.zshrc', '.bash_profile', '.profile']
RC_FILE_DEFAULT = '.bashrc'
BIN_PATH = '~/bin/grow'
# TODO: Remove when no longer checking for alias.
ALIAS_FILES = ['.bash_aliases', '.bash_profile', '.profile', '.bashrc']
ALIAS_RE = re.compile(r'^alias grow\=([\'"])(.*)\1$', re.MULTILINE)
if 'Linux' in platform.system():
PLATFORM = 'linux'
elif 'Darwin' in platform.system():
PLATFORM = 'mac'
else:
print('{} is not a supported platform. Please file an issue at '
'https://github.com/grow/grow/issues'.format(sys.platform))
sys.exit(-1)
def hai(text, *args):
print text.format(*args, **{
'red': '\033[0;31m',
'/red': '\033[0;m',
'green': '\033[0;32m',
'/green': '\033[0;m',
'yellow': '\033[0;33m',
'/yellow': '\033[0;m',
'white': '\033[0;37m',
'/white': '\033[0;m',
})
def orly(text, default=False):
resp = raw_input(text).strip().lower()
if resp == 'y':
return True
elif resp == 'n':
return False
return default
# TODO: Remove when no longer checking for alias.
def get_existing_aliases():
"""Find all existing aliases using the regex."""
files_to_alias = {}
for basename in ALIAS_FILES:
basepath = os.path.expanduser('~/{}'.format(basename))
if os.path.exists(basepath):
profile = open(basepath).read()
matches = re.findall(ALIAS_RE, profile)
if matches:
files_to_alias[basepath] = [x[1] for x in matches]
return files_to_alias
def get_rc_path():
for basename in RC_FILES:
basepath = os.path.expanduser('~/{}'.format(basename))
if os.path.exists(basepath):
return basepath
return os.path.expanduser('~/{}'.format(RC_FILE_DEFAULT))
def get_release_for_platform(releases, platform):
"""Find the latest release available for the platform."""
for release in releases:
for each_asset in release['assets']:
if platform in each_asset.get('name', '').lower():
return release
return None
def has_bin_in_path(bin_path):
"""Determine if the binary path is part of the system paths."""
return bin_path in os.environ['PATH'].split(':')
def install(rc_path=None, bin_path=None, force=False):
"""Download and install the binary."""
resp = json.loads(urllib.urlopen(RELEASES_API).read())
try:
release = get_release_for_platform(resp, PLATFORM)
except KeyError:
hai('{red}There was a problem accessing the GitHub Releases API.{/red}')
if 'message' in resp:
hai('{red}{}{/red}', resp['message'])
sys.exit(-1)
if release is None:
print 'Not available for platform: {}.'.format(platform.system())
sys.exit(-1)
version = release['tag_name']
asset = None
for each_asset in release['assets']:
if PLATFORM in each_asset.get('name', '').lower():
asset = each_asset
break
download_url = DOWNLOAD_URL_FORMAT.format(
version=version, name=asset['name'])
bin_path = os.path.expanduser(bin_path or BIN_PATH)
bin_dir = os.path.dirname(bin_path)
rc_comment = '# Added by Grow SDK Installer ({})'.format(
datetime.datetime.now())
rc_path = os.path.expanduser(rc_path or get_rc_path())
rc_path_append = 'export PATH={}:$PATH'.format(bin_dir)
hai('{yellow}Welcome to the installer for Grow SDK v{}{/yellow}', version)
hai('{yellow}Release notes: {/yellow}https://github.com/grow/grow/releases/tag/{}', version)
hai('{yellow}[ ]{/yellow} {green}This script will install:{/green} {}', bin_path)
bin_in_path = has_bin_in_path(bin_dir)
if bin_in_path:
hai(
'{green}[✓] You already have the binary directory in PATH:{/green} {}',
bin_dir)
else:
hai(
'{yellow}[ ]{/yellow} {green}{} will be added to the PATH in:{/green} {}',
bin_dir, rc_path)
if not force:
try:
result = orly('Continue installation? [Y]es / [n]o: ', default=True)
except KeyboardInterrupt:
result = False
if not result:
hai('\n\r{red}Aborted installation.{/red}')
sys.exit(-1)
try:
os.makedirs(bin_dir)
except OSError:
# If the directory already exists, let it go.
pass
remote = urllib2.urlopen(download_url)
try:
hai('Downloading from {}'.format(download_url))
local, temp_path = tempfile.mkstemp()
with os.fdopen(local, 'w') as local_file:
while True:
content = remote.read(1048576) # 1MB.
if not content:
sys.stdout.write(' done!\n')
sys.stdout.flush()
break
local_file.write(content)
sys.stdout.write('.')
sys.stdout.flush()
remote.close()
with open(temp_path, 'rb') as fp:
zp = zipfile.ZipFile(fp)
try:
zp.extract('grow', os.path.dirname(bin_path))
except IOError as e:
if 'Text file busy' in str(e):
hai('Unable to overwrite {}. Try closing Grow and installing again.'.format(
bin_path))
hai('You can use the installer by running: curl https://install.grow.io | bash')
sys.exit(-1)
raise
hai('{green}[✓] Installed Grow SDK to:{/green} {}', bin_path)
stat = os.stat(bin_path)
os.chmod(bin_path, stat.st_mode | 0111)
finally:
os.remove(temp_path)
if not bin_in_path:
with open(rc_path, 'a') as fp:
fp.write('\n' + rc_comment + '\n')
fp.write(rc_path_append)
hai('{green}[✓] Added {} to path in:{/green} {}',
bin_path, rc_path)
hai('{green}[✓] All done. Grow v{} successfully installed.{/green}', version)
if not bin_in_path:
hai(' To use Grow: reload your shell session OR use `source {}`,', rc_path)
hai(' then type `grow` and press enter.')
# TODO: Remove when no longer checking for alias.
aliases = get_existing_aliases()
if aliases:
hai('{red}Aliases for grow detected in: {}{/red}', ', '.join(aliases.keys()))
hai(' {red}please remove the old aliases to prevent version conflicts.{/red}')
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--bin-path', default=None,
help='Where to install `grow` executable. Ex: ~/bin/grow')
parser.add_argument('--force', dest='force', action='store_true',
help='Whether to force install and bypass prompts.')
parser.add_argument('--rc-path', default=None,
help='Profile to update with PATH. Ex: ~/.bashrc')
parser.set_defaults(force=False)
return parser.parse_args()
def main():
args = parse_args()
install(rc_path=args.rc_path, bin_path=args.bin_path, force=args.force)
if __name__ == '__main__':
main()
|
mit
| -4,453,811,987,850,760,000
| 32.258772
| 100
| 0.576553
| false
| 3.635187
| false
| false
| false
|
Angeldude/csound
|
tests/commandline/test.py
|
1
|
9537
|
#!/usr/bin/python
# Csound Test Suite
# By Steven Yi <stevenyi at gmail dot com>
import os
import sys
from testUI import TestApplication
from Tkinter import *
parserType = ""
showUIatClose = False
##csoundExecutable = r"C:/Users/new/csound-csound6-git/csound.exe "
csoundExecutable =""
class Test:
def __init__(self, fileName, description, expected=True):
self.fileName = fileName
self.description = ""
self.expected = expected
def showUI(results):
root = Tk()
app = TestApplication(master=root)
app.setResults(results)
app.mainloop()
root.destroy()
def showHelp():
message = """Csound Test Suite by Steven Yi<stevenyi@gmail.com>
Runs tests using new parser and shows return values of tests. Results
are written to results.txt file. To show the results using a UI, pass
in the command "--show-ui" like so:
./test.py --show-ui
The test suite defaults to using the new parser. To use the old parser for
the tests, use "--old-parser" in the command like so:
./test.py --show-ui --old-parser
"""
print message
def runTest():
runArgs = "-Wdo test.wav"
if (parserType == "--old-parser"):
print "Testing with old parser"
else:
print "Testing with new parser"
tests = [
["test1.csd", "Simple Test, Single Channel"],
["test2.csd", "Simple Test, 2 Channel"],
["test3.csd", "Simple Test, using i-rate variables, 2 Channel"],
["test4.csd", "Simple Test, using k-rate variables, 2 Channel"],
["test5.csd", "Simple Test, using global i-rate variables, 2 Channel"],
["test6.csd", "Testing Pfields"],
["test7.csd", "Testing expressions, no functions"],
["test8.csd", "Testing multi-part expressions, no functions"],
["test9.csd", "Unused Label (to test labels get parsed)"],
["test10.csd", "kgoto going to a label"],
["test11.csd", "if-kgoto going to a label, boolean expressions"],
["test12.csd", "Simple if-then statement"],
["test13.csd", "function call"],
["test14.csd", "polymorphic test, 0xffff (init)"],
["test15.csd", "pluck test, 0xffff (init)"],
["test16.csd", "Simple if-then with multiple statements in body"],
["test17.csd", "Simple if-then-else with multiple statements in body"],
["test18.csd", "if-then-elseif with no else block"],
["test19.csd", "if-elseif-else"],
["test20.csd", "if-elseif-else with inner if-elseif-else blocks"],
["test21.csd", "if-elseif-else with multiple elseif blocks"],
["test22.csd", "simple UDO"],
["test23.csd", "named instrument"],
## ["test24.csd", "la_i opcodes"],
["test43.csd", "mixed numbered and named instruments"],
["test25.csd", "polymorphic test, 0xfffd (peak)"],
["test26.csd", "polymorphic test, 0xfffc (divz)"],
["test27.csd", "polymorphic test, 0xfffb (chnget)"],
["test28.csd", "label test"],
["test29.csd", "bit operations test"],
["test30.csd", "multi-numbered instrument test"],
["test31.csd", "i-rate conditional test"],
["test32.csd", "continuation lines test"],
["test33.csd", "using named instrument from score (testing score strings)"],
["test34.csd", "tertiary conditional expressions"],
["test35.csd", "test of passign"],
["test36.csd", "opcode with all input args optional (passign)"],
["test37.csd", "Testing in and out"],
["test38.csd", "Testing simple macro"],
["test39.csd", "Testing macro with argument"],
["test40.csd", "Testing i^j"],
["test41.csd", "if statement with = instead of =="],
["test42.csd", "extended string"],
["test44.csd", "expected failure with in-arg given to in opcode", 1],
["test45.csd", "if-goto with expression in boolean comparison"],
["test46.csd", "if-then with expression in boolean comparison"],
["test47.csd", "until loop and t-variables"],
["test48.csd", "expected failure with variable used before defined", 1],
["test_instr0_labels.csd", "test labels in instr0 space"],
["test_string.csd", "test string assignment and printing"],
["test_sprintf.csd", "test string assignment and printing"],
["test_sprintf2.csd", "test string assignment and printing that causes reallocation"],
["test_label_within_if_block.csd", "test label within if block"],
["test_arrays.csd", "test k-array with single dimension, assignment to expression value"],
["test_arrays2.csd", "test gk-array with single dimension, assignment to expression value"],
["test_arrays3.csd", "test k-array with single dimension, assignment with number"],
["test_arrays_multi.csd", "test multi-dimensionsl k-array, assigment to number and expression"],
["test_arrays_string.csd", "test string-array"],
["test_arrays_string2.csd", "test simple string-array assignment"],
["test_asig_as_array.csd", "test using a-sig with array get/set syntax"],
["test_arrays_negative_dimension_fail.csd",
"test expected failure with negative dimension size and array", 1],
["test_empty_conditional_branches.csd", "tests that empty branches do not cause compiler issues"],
["test_empty_instr.csd", "tests that empty instruments do not cause compiler issues"],
["test_empty_udo.csd", "tests that empty UDOs do not cause compiler issues"],
["test_semantics_undefined_var.csd", "test undefined var", 1],
["test_invalid_expression.csd", "test expression", 1],
["test_invalid_ternary.csd", "test expression", 1],
["test_opcode_as_function.csd", "test expression"],
["test_fsig_udo.csd", "UDO with f-sig arg"],
["test_karrays_udo.csd", "UDO with k[] arg"],
["test_arrays_addition.csd", "test array arithmetic (i.e. k[] + k[]"],
["test_arrays_fns.csd", "test functions on arrays (i.e. tabgen)"],
["test_polymorphic_udo.csd", "test polymorphic udo"],
["test_udo_a_array.csd", "test udo with a-array"],
["test_udo_2d_array.csd", "test udo with 2d-array"],
["test_udo_string_array_join.csd", "test udo with S[] arg returning S"],
["test_array_function_call.csd", "test synthesizing an array arg from a function-call"],
]
arrayTests = [["arrays/arrays_i_local.csd", "local i[]"],
["arrays/arrays_i_global.csd", "global i[]"],
["arrays/arrays_k_local.csd", "local k[]"],
["arrays/arrays_k_global.csd", "global k[]"],
["arrays/arrays_a_local.csd", "local a[]"],
["arrays/arrays_a_global.csd", "global a[]"],
["arrays/arrays_S_local.csd", "local S[]"],
["arrays/arrays_S_global.csd", "global S[]"],
]
udoTests = [["udo/fail_no_xin.csd", "fail due to no xin", 1],
["udo/fail_no_xout.csd", "fail due to no xout", 1],
["udo/fail_invalid_xin.csd", "fail due to invalid xin", 1],
["udo/fail_invalid_xout.csd", "fail due to invalid xout", 1],
]
tests += arrayTests
tests += udoTests
output = ""
tempfile = "/tmp/csound_test_output.txt"
if(os.sep == '/' and os.name == 'nt'):
tempfile = 'csound_test_output.txt'
counter = 1
retVals = []
testPass = 0
testFail = 0
for t in tests:
filename = t[0]
desc = t[1]
expectedResult = (len(t) == 3) and 1 or 0
if(os.sep == '\\' or os.name == 'nt'):
executable = (csoundExecutable == "") and "..\csound.exe" or csoundExecutable
command = "%s %s %s %s 2> %s"%(executable, parserType, runArgs, filename, tempfile)
print command
retVal = os.system(command)
else:
executable = (csoundExecutable == "") and "../../csound" or csoundExecutable
command = "%s %s %s %s &> %s"%(executable, parserType, runArgs, filename, tempfile)
#print command
retVal = os.system(command)
out = ""
if (retVal == 0) == (expectedResult == 0):
testPass += 1
out = "[pass] - "
else:
testFail += 1
out = "[FAIL] - "
out += "Test %i: %s (%s)\n\tReturn Code: %i\tExpected: %d\n"%(counter, desc, filename, retVal, expectedResult
)
print out
output += "%s\n"%("=" * 80)
output += "Test %i: %s (%s)\nReturn Code: %i\n"%(counter, desc, filename, retVal)
output += "%s\n\n"%("=" * 80)
f = open(tempfile, "r")
csOutput = ""
for line in f:
csOutput += line
output += csOutput
f.close()
retVals.append(t + [retVal, csOutput])
output += "\n\n"
counter += 1
# print output
print "%s\n\n"%("=" * 80)
print "Tests Passed: %i\nTests Failed: %i\n"%(testPass, testFail)
f = open("results.txt", "w")
f.write(output)
f.flush()
f.close()
return retVals
if __name__ == "__main__":
if(len(sys.argv) > 1):
for arg in sys.argv:
if (arg == "--help"):
showHelp()
sys.exit(0)
elif arg == "--show-ui":
showUIatClose = True
elif arg == "--old-parser":
parserType = "--old-parser"
elif arg.startswith("--csound-executable="):
csoundExecutable = arg[20:]
print csoundExecutable
elif arg.startswith("--opcode6dir64="):
os.environ['OPCODE6DIR64'] = arg[15:]
print os.environ['OPCODE6DIR64']
results = runTest()
if (showUIatClose):
showUI(results)
|
lgpl-2.1
| 6,264,556,847,959,010,000
| 37.301205
| 117
| 0.594527
| false
| 3.490849
| true
| false
| false
|
gsobczyk/hamster
|
src/hamster/widgets/facttree.py
|
1
|
23335
|
# -*- coding: utf-8 -*-
# Copyright (C) 2008-2009, 2014 Toms Bauģis <toms.baugis at gmail.com>
# This file is part of Project Hamster.
# Project Hamster is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Project Hamster is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Project Hamster. If not, see <http://www.gnu.org/licenses/>.
import bisect
import cairo
from collections import defaultdict
from gi.repository import GObject as gobject
from gi.repository import Gtk as gtk
from gi.repository import Gdk as gdk
from gi.repository import PangoCairo as pangocairo
from gi.repository import Pango as pango
from hamster.lib import datetime as dt
from hamster.lib import graphics
from hamster.lib import stuff
from hamster.lib.fact import Fact
class ActionRow(graphics.Sprite):
def __init__(self):
graphics.Sprite.__init__(self)
self.visible = False
self.restart = graphics.Icon("view-refresh-symbolic", size=18,
interactive=True,
mouse_cursor=gdk.CursorType.HAND1,
y=4)
self.add_child(self.restart)
self.width = 50 # Simon says
class TotalFact(Fact):
"""An extension of Fact that is used for daily totals.
Instances of this class are rendered differently than instances
of Fact.
A TotalFact doesn't have a meaningful start and an end, but a
total duration (delta).
FIXME: Ideally, we should have a common parent for Fact and Total Fact
so we don't need to have nonsensical start and end properties here.
"""
def __init__(self, activity, duration):
super().__init__(activity=activity, start=dt.datetime.now(), end=dt.datetime.now())
self.duration = duration
@property
def delta(self):
return self.duration
class Label(object):
"""a much cheaper label that would be suitable for cellrenderer"""
def __init__(self, x=0, y=0, color=None):
self.x = x
self.y = y
self.color = color
self._label_context = cairo.Context(cairo.ImageSurface(cairo.FORMAT_A1, 0, 0))
self.layout = pangocairo.create_layout(self._label_context)
self.layout.set_font_description(pango.FontDescription(graphics._font_desc))
self.set_text("Hamster") # dummy
@property
def height(self):
"""Label height in pixels."""
return self.layout.get_pixel_size()[1]
def set_text(self, text):
self.text = text
self.layout.set_markup(text)
def get_text(self):
return self.text
def show(self, g, text=None, x=None, y=None):
"""Show the label.
If text is given, it overrides any previous set_text().
x and y can be passed to temporary override the position.
(self.x and self.y will not be changed)
"""
g.save_context()
# fallback to self.x
if x is None:
x = self.x
if y is None:
y = self.y
g.move_to(x, y)
if text is not None:
self.set_text(text)
if self.color:
g.set_color(self.color)
pangocairo.show_layout(g.context, self.layout)
g.restore_context()
class TagLabel(Label):
"""Tag label, with small text."""
def set_text(self, text):
Label.set_text(self, "<small>{}</small>".format(text))
class FactRow(object):
def __init__(self):
self.to_export = Label()
self.time_label = Label(x=30)
self.activity_label = Label(x=130)
self.category_label = Label()
self.description_label = Label()
self.tag_label = TagLabel()
self.duration_label = Label()
self.duration_label.layout.set_alignment(pango.Alignment.RIGHT)
self.duration_label.layout.set_width(90 * pango.SCALE)
self.width = 0
# margins (in pixels)
self.tag_row_margin_H = 2.5
self.tag_row_margin_V = 2.5
self.tag_inner_margin_H = 3
self.tag_inner_margin_V = 2
self.inter_tag_margin = 4
self.row_margin_H = 5
self.row_margin_V = 2
self.category_offset_V = self.category_label.height * 0.1
@property
def height(self):
res = self.activity_label.height + 2 * 3
if self.fact.description:
res += self.description_label.height
if self.fact.tags:
res += (self.tag_label.height
+ self.tag_inner_margin_V * 2
+ self.tag_row_margin_V * 2)
res += self.row_margin_V * 2
return res
def set_fact(self, fact):
"""Set current fact."""
self.fact = fact
time_label = fact.start_time.strftime("%H:%M -")
if fact.end_time:
time_label += fact.end_time.strftime(" %H:%M")
self.time_label.set_text(time_label)
self.to_export.set_text("🔸" if fact.exported else ("📤️" if fact.range.end else "⏳"))
self.activity_label.set_text(stuff.escape_pango(fact.activity))
category_text = " - {}".format(stuff.escape_pango(fact.category)) if fact.category else ""
self.category_label.set_text(category_text)
text = stuff.escape_pango(fact.description)
description_text = "<small><i>{}</i></small>".format(text) if fact.description else ""
self.description_label.set_text(description_text)
if fact.tags:
# for now, tags are on a single line.
# The first one is enough to determine the height.
self.tag_label.set_text(stuff.escape_pango(fact.tags[0]))
def _show_tags(self, g, color, bg):
label = self.tag_label
label.color = bg
g.save_context()
g.translate(self.tag_row_margin_H, self.tag_row_margin_V)
for tag in self.fact.tags:
label.set_text(stuff.escape_pango(tag))
w, h = label.layout.get_pixel_size()
rw = w + self.tag_inner_margin_H * 2
rh = h + self.tag_inner_margin_V * 2
g.rectangle(0, 0, rw, rh, 2)
g.fill(color, 0.5)
label.show(g, x=self.tag_inner_margin_H, y=self.tag_inner_margin_V)
g.translate(rw + self.inter_tag_margin, 0)
g.restore_context()
def show(self, g, colors, fact=None, is_selected=False):
"""Display the fact row.
If fact is given, the fact attribute is updated.
"""
g.save_context()
if fact is not None:
# before the selection highlight, to get the correct height
self.set_fact(fact)
color, bg = colors["normal"], colors["normal_bg"]
if is_selected:
color, bg = colors["selected"], colors["selected_bg"]
g.fill_area(0, 0, self.width, self.height, bg)
g.translate(self.row_margin_H, self.row_margin_V)
g.set_color(color)
# Do not show the start/end time for Totals
if not isinstance(self.fact, TotalFact):
self.time_label.show(g)
self.to_export.show(g)
self.activity_label.show(g, self.activity_label.get_text() if not isinstance(self.fact, TotalFact) else "<b>{}</b>".format(self.activity_label.get_text()))
if self.fact.category:
g.save_context()
category_color = graphics.ColorUtils.mix(bg, color, 0.57)
g.set_color(category_color)
x = self.activity_label.x + self.activity_label.layout.get_pixel_size()[0]
self.category_label.show(g, x=x, y=self.category_offset_V)
g.restore_context()
if self.fact.description or self.fact.tags:
g.save_context()
g.translate(self.activity_label.x, self.activity_label.height + 3)
if self.fact.tags:
self._show_tags(g, color, bg)
tag_height = (self.tag_label.height
+ self.tag_inner_margin_V * 2
+ self.tag_row_margin_V * 2)
g.translate(0, tag_height)
if self.fact.description:
self.description_label.show(g)
g.restore_context()
self.duration_label.show(g, self.fact.delta.format() if not isinstance(self.fact, TotalFact) else "<b>{}</b>".format(self.fact.delta.format()), x=self.width - 105)
g.restore_context()
class FactTree(graphics.Scene, gtk.Scrollable):
"""
The fact tree is a painter.
It does not change facts by itself, only sends signals.
Facts get updated only through `set_facts`.
It maintains scroll state and shows what we can see.
That means it does not show all the facts there are,
but rather only those that you can see.
It's also painter as it reuses labels.
Caching is futile, we do all the painting every time
ASCII Art!
| Weekday | Start - End | Activity - category [actions]| Duration |
| Month, Day | | tags, description | |
| | Start - End | Activity - category | Duration |
| | | Total | Total Duration |
Inline edit?
"""
__gsignals__ = {
# enter or double-click, passes in current day and fact
'on-activate-row': (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT, gobject.TYPE_PYOBJECT)),
'on-delete-called': (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
'on-toggle-exported-row': (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
}
hadjustment = gobject.property(type=gtk.Adjustment, default=None)
hscroll_policy = gobject.property(type=gtk.ScrollablePolicy, default=gtk.ScrollablePolicy.MINIMUM)
vadjustment = gobject.property(type=gtk.Adjustment, default=None)
vscroll_policy = gobject.property(type=gtk.ScrollablePolicy, default=gtk.ScrollablePolicy.MINIMUM)
def __init__(self):
graphics.Scene.__init__(self, style_class=gtk.STYLE_CLASS_VIEW)
self.date_label = Label(10, 3)
fontdesc = pango.FontDescription(graphics._font_desc)
fontdesc.set_weight(pango.Weight.BOLD)
self.date_label.layout.set_alignment(pango.Alignment.RIGHT)
self.date_label.layout.set_width(80 * pango.SCALE)
self.date_label.layout.set_font_description(fontdesc)
self.fact_row = FactRow()
self.action_row = ActionRow()
# self.add_child(self.action_row)
self.row_positions = []
self.row_heights = []
self.y = 0
self.day_padding = 20
self.hover_day = None
self.hover_fact = None
self.current_fact = None
self.style = self._style
self.visible_range = None
self.set_size_request(500, 400)
self.connect("on-mouse-scroll", self.on_scroll)
self.connect("on-mouse-move", self.on_mouse_move)
self.connect("on-mouse-down", self.on_mouse_down)
self.connect("on-resize", self.on_resize)
self.connect("on-key-press", self.on_key_press)
self.connect("notify::vadjustment", self._on_vadjustment_change)
self.connect("on-enter-frame", self.on_enter_frame)
self.connect("on-double-click", self.on_double_click)
self.clipboard = gtk.Clipboard.get(gdk.SELECTION_CLIPBOARD)
@property
def current_fact_index(self):
"""Current fact index in the self.facts list."""
facts_ids = [fact.id for fact in self.facts]
return facts_ids.index(self.current_fact.id)
def on_mouse_down(self, scene, event):
self.on_mouse_move(None, event)
self.grab_focus()
if self.hover_fact:
# match either content or id
if (self.hover_fact == self.current_fact
or (self.hover_fact
and self.current_fact
and self.hover_fact.id == self.current_fact.id)
):
self.unset_current_fact()
# Totals can't be selected
elif not isinstance(self.hover_fact, TotalFact):
self.set_current_fact(self.hover_fact)
def activate_row(self, day, fact):
self.emit("on-activate-row", day, fact)
def toggle_exported_row(self, day, fact):
self.emit("on-toggle-exported-row", fact)
def delete_row(self, fact):
self.emit("on-delete-called", fact)
def copy_fact(self, fact):
self.clipboard.set_text(fact.serialized(), -1)
def on_double_click(self, scene, event):
if self.hover_fact and not isinstance(self.hover_fact, TotalFact):
self.activate_row(self.hover_day, self.hover_fact)
def on_key_press(self, scene, event):
# all keys should appear also in the Overview.on_key_press
# to be forwarded here even without focus.
if event.keyval == gdk.KEY_Up:
if self.facts:
if self.current_fact:
idx = max(0, self.current_fact_index - 1)
else:
# enter from below
idx = len(self.facts) - 1
self.set_current_fact(self.facts[idx])
elif event.keyval == gdk.KEY_Down:
if self.facts:
if self.current_fact:
idx = min(len(self.facts) - 1, self.current_fact_index + 1)
else:
# enter from top
idx = 0
self.set_current_fact(self.facts[idx])
elif event.keyval == gdk.KEY_Home:
if self.facts:
self.set_current_fact(self.facts[0])
elif event.keyval == gdk.KEY_End:
if self.facts:
self.set_current_fact(self.facts[-1])
elif event.keyval == gdk.KEY_Page_Down:
self.y += self.height * 0.8
self.on_scroll()
elif event.keyval == gdk.KEY_Page_Up:
self.y -= self.height * 0.8
self.on_scroll()
elif event.keyval == gdk.KEY_x:
if self.current_fact:
self.toggle_exported_row(self.hover_day, self.current_fact)
elif event.keyval in (gdk.KEY_Return, gdk.KEY_e):
if self.current_fact:
self.activate_row(self.hover_day, self.current_fact)
elif event.keyval == gdk.KEY_Delete:
if self.current_fact:
self.delete_row(self.current_fact)
elif event.state & gdk.ModifierType.CONTROL_MASK and event.keyval == gdk.KEY_c:
if self.current_fact:
self.copy_fact(self.current_fact)
def set_current_fact(self, fact):
self.current_fact = fact
if fact.y < self.y:
self.y = fact.y
if (fact.y + fact.height) > (self.y + self.height):
self.y = fact.y + fact.height - self.height
self.on_scroll()
def unset_current_fact(self):
"""Deselect fact."""
self.current_fact = None
self.on_scroll()
def get_visible_range(self):
start, end = (bisect.bisect(self.row_positions, self.y) - 1,
bisect.bisect(self.row_positions, self.y + self.height))
y = self.y
return [{"i": start + i, "y": pos - y, "h": height, "day": day, "facts": facts}
for i, (pos, height, (day, facts)) in enumerate(zip(self.row_positions[start:end],
self.row_heights[start:end],
self.days[start:end]))]
def on_mouse_move(self, tree, event):
hover_day, hover_fact = None, None
for rec in self.visible_range:
if rec['y'] <= event.y <= (rec['y'] + rec['h']):
hover_day = rec
break
if hover_day != self.hover_day:
# Facts are considered equal if their content is the same,
# even if their id is different.
# redraw only cares about content, not id.
self.redraw()
# make sure it is always fully updated, including facts ids.
self.hover_day = hover_day
if self.hover_day:
for fact in self.hover_day.get('facts', []):
if (fact.y - self.y) <= event.y <= (fact.y - self.y + fact.height):
hover_fact = fact
break
if (hover_fact
and self.hover_fact
and hover_fact.id != self.hover_fact.id
):
self.move_actions()
# idem, always update hover_fact, not just if they appear different
self.hover_fact = hover_fact
def move_actions(self):
if self.hover_fact:
self.action_row.visible = True
self.action_row.x = self.width - 80 - self.action_row.width
self.action_row.y = self.hover_fact.y - self.y
else:
self.action_row.visible = False
def _on_vadjustment_change(self, scene, vadjustment):
if not self.vadjustment:
return
self.vadjustment.connect("value_changed", self.on_scroll_value_changed)
self.set_size_request(500, 300)
def set_facts(self, facts, scroll_to_top=False):
# FactTree adds attributes to its facts. isolate these side effects
# copy the id too; most of the checks are based on id here.
self.facts = [fact.copy(id=fact.id) for fact in facts]
del facts # make sure facts is not used by inadvertance below.
# If we get an entirely new set of facts, scroll back to the top
if scroll_to_top:
self.y = 0
self.hover_fact = None
if self.vadjustment:
self.vadjustment.set_value(self.y)
if self.facts:
start = self.facts[0].date
end = self.facts[-1].date
else:
start = end = dt.hday.today()
by_date = defaultdict(list)
delta_by_date = defaultdict(dt.timedelta)
for fact in self.facts:
by_date[fact.date].append(fact)
delta_by_date[fact.date] += fact.delta
# Add a TotalFact at the end of each day if we are
# displaying more than one day.
if len(by_date) > 1:
for key in by_date:
total_by_date = TotalFact(_("Total"), delta_by_date[key])
by_date[key].append(total_by_date)
days = []
for i in range((end - start).days + 1):
current_date = start + dt.timedelta(days=i)
if current_date in by_date:
days.append((current_date, by_date[current_date]))
self.days = days
self.set_row_heights()
if (self.current_fact
and self.current_fact.id in (fact.id for fact in self.facts)
):
self.on_scroll()
else:
# will also trigger an on_scroll
self.unset_current_fact()
def set_row_heights(self):
"""
the row height is defined by following factors:
* how many facts are there in the day
* does the fact have description / tags
This func creates a list of row start positions to be able to
quickly determine what to display
"""
if not self.height:
return
y, pos, heights = 0, [], []
for date, facts in self.days:
height = 0
for fact in facts:
self.fact_row.set_fact(fact)
fact_height = self.fact_row.height
fact.y = y + height
fact.height = fact_height
height += fact.height
height += self.day_padding
height = max(height, 60)
pos.append(y)
heights.append(height)
y += height
self.row_positions, self.row_heights = pos, heights
maxy = max(y, 1)
if self.vadjustment:
self.vadjustment.set_lower(0)
self.vadjustment.set_upper(max(maxy, self.height))
self.vadjustment.set_page_size(self.height)
def on_resize(self, scene, event):
self.set_row_heights()
self.fact_row.width = self.width - 105
self.on_scroll()
def on_scroll_value_changed(self, scroll):
self.y = int(scroll.get_value())
self.on_scroll()
def on_scroll(self, scene=None, event=None):
if not self.height:
return
y_pos = self.y
direction = 0
if event and event.direction == gdk.ScrollDirection.UP:
direction = -1
elif event and event.direction == gdk.ScrollDirection.DOWN:
direction = 1
y_pos += 15 * direction
if self.vadjustment:
y_pos = max(0, min(self.vadjustment.get_upper() - self.height, y_pos))
self.vadjustment.set_value(y_pos)
self.y = y_pos
self.move_actions()
self.redraw()
self.visible_range = self.get_visible_range()
def on_enter_frame(self, scene, context):
has_focus = self.get_toplevel().has_toplevel_focus()
if has_focus:
colors = {
"normal": self.style.get_color(gtk.StateFlags.NORMAL),
"normal_bg": self.style.get_background_color(gtk.StateFlags.NORMAL),
"selected": self.style.get_color(gtk.StateFlags.SELECTED),
"selected_bg": self.style.get_background_color(gtk.StateFlags.SELECTED),
}
else:
colors = {
"normal": self.style.get_color(gtk.StateFlags.BACKDROP),
"normal_bg": self.style.get_background_color(gtk.StateFlags.BACKDROP),
"selected": self.style.get_color(gtk.StateFlags.BACKDROP),
"selected_bg": self.style.get_background_color(gtk.StateFlags.BACKDROP),
}
if not self.height:
return
g = graphics.Graphics(context)
g.set_line_style(1)
g.translate(0.5, 0.5)
date_bg_color = self.colors.mix(colors["normal_bg"], colors["normal"], 0.15)
g.fill_area(0, 0, 105, self.height, date_bg_color)
y = int(self.y)
for rec in self.visible_range:
g.save_context()
g.translate(0, rec['y'])
g.set_color(colors["normal"])
self.date_label.show(g, rec['day'].strftime("%A\n%b %d"))
g.translate(105, 0)
for fact in rec['facts']:
is_selected = (self.current_fact is not None
and fact.id == self.current_fact.id)
self.fact_row.set_fact(fact)
self.fact_row.show(g, colors, is_selected=is_selected)
g.translate(0, self.fact_row.height)
g.restore_context()
|
gpl-3.0
| -825,646,525,799,796,400
| 33.916168
| 171
| 0.572543
| false
| 3.71875
| false
| false
| false
|
Answeror/aip
|
aip/imfs/baidupcs.py
|
1
|
3088
|
from .base import NameMixin, guarded
from .error import ImfsError, NotFoundError
from .utils import thumbnail
from pprint import pformat
from .. import img
from datetime import datetime
BASE = '/apps/aip/cache/'
class PCSError(ImfsError):
pass
class BadResponse(PCSError):
def __init__(self, r):
self.status_code = r.status_code
try:
self.content = r.json()
except:
self.content = r.content
def __str__(self):
return pformat({
'status_code': self.status_code,
'content': self.content
})
def wrap(name):
return BASE + name
def error_code(r):
try:
d = r.json()
code = d.get('error_code', None)
if code is None:
code = d.get('content', {}).get('error_code', None)
return code
except:
return None
class BaiduPCS(NameMixin):
def __init__(self, access_token):
self.access_token = access_token
@guarded
def _load(self, name):
r = self.pcs.download(wrap(name))
if r.status_code == 404:
return None
if not r.ok:
raise BadResponse(r)
return r.content
@guarded
def _save(self, name, data):
r = self.pcs.upload(wrap(name), data, ondup='overwrite')
if not r.ok:
if r.status_code == 400 and error_code(r) == 31061:
pass
else:
raise BadResponse(r)
def _thumbnail(self, name, width, height):
data = self.load(name)
if data is None:
return None
kind = img.kind(data=data)
if kind is None:
raise PCSError('cannot detect image type')
return thumbnail(data, kind, width, height)
@guarded
def _has(self, name):
r = self.pcs.meta(wrap(name))
if r.status_code == 404:
return False
if not r.ok:
raise BadResponse(r)
return True
@guarded
def _remove(self, name):
r = self.pcs.delete(wrap(name))
if not r.ok and r.status_code not in (404,):
raise BadResponse(r)
@guarded
def _mtime(self, name):
r = self.pcs.meta(wrap(name))
if not r.ok:
if r.status_code == 404:
raise NotFoundError(name)
raise BadResponse(r)
return datetime.fromtimestamp(r.json()['list'][0]['mtime'])
def _cache_timeout(self, name):
return None
@property
def pcs(self):
if not hasattr(self, '_pcs'):
from baidupcs import PCS
self._pcs = PCS(self.access_token)
ensure_base(self._pcs, BASE)
return self._pcs
@guarded
def ensure_base(pcs, base):
r = pcs.mkdir(base)
if not r.ok:
if r.status_code == 400 and error_code(r) == 31061:
r = pcs.meta(base)
if not r.ok:
raise BadResponse(r)
if not r.json()['list'][0]['isdir']:
raise PCSError('%s is not dir' % base)
else:
raise BadResponse(r)
|
mit
| 4,479,358,954,816,668,000
| 23.507937
| 67
| 0.53886
| false
| 3.658768
| false
| false
| false
|
dwavesystems/dimod
|
dimod/reference/composites/roofduality.py
|
1
|
2936
|
# Copyright 2019 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A composite that uses the roof duality algorithm [#bht]_ [#bh]_ to fix some
variables in the binary quadratic model before passing it on to its child
sampler.
.. [#bht] Boros, E., P.L. Hammer, G. Tavares. Preprocessing of Unconstrained
Quadratic Binary Optimization. Rutcor Research Report 10-2006, April, 2006.
.. [#bh] Boros, E., P.L. Hammer. Pseudo-Boolean optimization. Discrete Applied
Mathematics 123, (2002), pp. 155-225
"""
from dimod.reference.composites.fixedvariable import FixedVariableComposite
from dimod.roof_duality import fix_variables
__all__ = ['RoofDualityComposite']
class RoofDualityComposite(FixedVariableComposite):
"""Uses roof duality to assign some variables before invoking child sampler.
Uses the :func:`~dimod.roof_duality.fix_variables` function to determine
variable assignments, then fixes them before calling the child sampler.
Returned samples include the fixed variables.
Args:
child (:obj:`dimod.Sampler`):
A dimod sampler. Used to sample the binary quadratic model after
variables have been fixed.
"""
@property
def parameters(self):
params = self.child.parameters.copy()
params['sampling_mode'] = []
return params
def sample(self, bqm, sampling_mode=True, **parameters):
"""Sample from the provided binary quadratic model.
Uses the :func:`~dimod.roof_duality.fix_variables` function to determine
which variables to fix.
Args:
bqm (:obj:`dimod.BinaryQuadraticModel`):
Binary quadratic model to be sampled from.
sampling_mode (bool, optional, default=True):
In sampling mode, only roof-duality is used. When
`sampling_mode` is false, strongly connected components are used
to fix more variables, but in some optimal solutions these
variables may take different values.
**parameters:
Parameters for the child sampler.
Returns:
:obj:`dimod.SampleSet`
"""
# use roof-duality to decide which variables to fix
parameters['fixed_variables'] = fix_variables(bqm, sampling_mode=sampling_mode)
return super(RoofDualityComposite, self).sample(bqm, **parameters)
|
apache-2.0
| -527,168,984,225,831,500
| 36.641026
| 87
| 0.680858
| false
| 4.152758
| false
| false
| false
|
aewallin/openvoronoi
|
src/test/pytest_ttt_alphabet/ttt_alphabet.py
|
1
|
3568
|
import truetypetracer as ttt
import openvoronoi as ovd
import time
import sys
def translate(segs,x,y):
out = []
for seg in segs:
seg2 = []
for p in seg:
p2 = []
p2.append(p[0] + x)
p2.append(p[1] + y)
seg2.append(p2)
#seg2.append(seg[3] + y)
out.append(seg2)
return out
def insert_polygon_points(vd, polygon):
pts=[]
for p in polygon:
pts.append( ovd.Point( p[0], p[1] ) )
id_list = []
print "inserting ",len(pts)," point-sites:"
m=0
for p in pts:
id_list.append( vd.addVertexSite( p ) )
print " ",m," added vertex ", id_list[ len(id_list) -1 ]
m=m+1
return id_list
def insert_polygon_segments(vd,id_list):
j=0
print "inserting ",len(id_list)," line-segments:"
for n in range(len(id_list)):
n_nxt = n+1
if n==(len(id_list)-1):
n_nxt=0
print " ",j,"inserting segment ",id_list[n]," - ",id_list[n_nxt]
vd.addLineSite( id_list[n], id_list[n_nxt])
j=j+1
def modify_segments(segs):
segs_mod =[]
for seg in segs:
first = seg[0]
last = seg[ len(seg)-1 ]
assert( first[0]==last[0] and first[1]==last[1] )
seg.pop()
seg.reverse()
segs_mod.append(seg)
#drawSegment(myscreen, seg)
return segs_mod
def insert_many_polygons(vd,segs):
polygon_ids =[]
t_before = time.time()
for poly in segs:
poly_id = insert_polygon_points(vd,poly)
polygon_ids.append(poly_id)
t_after = time.time()
pt_time = t_after-t_before
t_before = time.time()
for ids in polygon_ids:
insert_polygon_segments(vd,ids)
t_after = time.time()
seg_time = t_after-t_before
return [pt_time, seg_time]
def ttt_segments(text,scale,conic_subdiv):
wr = ttt.SEG_Writer()
# wr.scale = 3
wr.arc = False
wr.conic = False
wr.cubic = False
wr.conic_biarc_subdivision = 10 # this has no effect?
wr.conic_line_subdivision = conic_subdiv # this increases nr of points
wr.cubic_biarc_subdivision = 10 # no effect?
wr.cubic_line_subdivision = 10 # no effect?
wr.setFont(3)
wr.scale = float(1)/float(scale)
s3 = ttt.ttt(text,wr)
segs = wr.get_segments()
return segs
if __name__ == "__main__":
conic_subdiv = 200
if len(sys.argv) == 2:
conic_subdiv = int(sys.argv[1])
scale = 25000
segs = ttt_segments( "ABCDEFGHIJKLM", scale, conic_subdiv)
segs2 = ttt_segments( "NOPQRSTUVWXYZ", scale, conic_subdiv)
segs3 = ttt_segments( "abcdefghijklm", scale, conic_subdiv)
#segs3 = ttt_segments( "m", 6400)
segs4 = ttt_segments( "nopqrstuvwxyz", scale, conic_subdiv) # NOPQRSTUVWXYZ", 64000)
segs5 = ttt_segments( "0123456789+-*/", scale, conic_subdiv)
dx = float(50000)/float(scale)
xt=-0.3
segs = translate(segs, xt*dx, 0.05*dx)
segs = modify_segments(segs)
segs2 = translate(segs2, xt*dx, -0.05*dx)
segs2 = modify_segments(segs2)
segs3 = translate(segs3, xt*dx, -0.15*dx)
segs3 = modify_segments(segs3)
segs4 = translate(segs4, xt*dx, -0.22*dx)
segs4 = modify_segments(segs4)
segs5 = translate(segs5, xt*dx, -0.32*dx)
segs5 = modify_segments(segs5)
vd = ovd.VoronoiDiagram(1,120)
all_segs=segs+segs2 +segs3 +segs4+segs5
insert_many_polygons(vd,all_segs)
c = vd.check()
print " VD check: ", c
if c:
exit(0)
else:
exit(-1)
|
lgpl-2.1
| -4,697,356,656,404,354,000
| 26.658915
| 88
| 0.571749
| false
| 2.858974
| false
| false
| false
|
gannetson/sportschooldeopenlucht
|
apps/cowry/migrations/0001_initial.py
|
1
|
3506
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Payment'
db.create_table(u'cowry_payment', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('polymorphic_ctype', self.gf('django.db.models.fields.related.ForeignKey')(related_name=u'polymorphic_cowry.payment_set', null=True, to=orm['contenttypes.ContentType'])),
('amount', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('currency', self.gf('django.db.models.fields.CharField')(default='', max_length=3)),
('fee', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('payment_method_id', self.gf('django.db.models.fields.CharField')(default='', max_length=20, blank=True)),
('payment_submethod_id', self.gf('django.db.models.fields.CharField')(default='', max_length=20, blank=True)),
('status', self.gf('django.db.models.fields.CharField')(default='new', max_length=15, db_index=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('updated', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
))
db.send_create_signal(u'cowry', ['Payment'])
def backwards(self, orm):
# Deleting model 'Payment'
db.delete_table(u'cowry_payment')
models = {
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'cowry.payment': {
'Meta': {'object_name': 'Payment'},
'amount': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '3'}),
'fee': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'payment_method_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '20', 'blank': 'True'}),
'payment_submethod_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '20', 'blank': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'polymorphic_cowry.payment_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'status': ('django.db.models.fields.CharField', [], {'default': "'new'", 'max_length': '15', 'db_index': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
}
}
complete_apps = ['cowry']
|
bsd-3-clause
| 2,409,316,479,135,669,000
| 62.763636
| 196
| 0.596121
| false
| 3.706131
| false
| false
| false
|
gangadhar-kadam/nassimapp
|
stock/doctype/purchase_receipt/purchase_receipt.py
|
1
|
12927
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import webnotes
from webnotes.utils import cstr, flt, cint
from webnotes.model.bean import getlist
from webnotes.model.code import get_obj
from webnotes import msgprint, _
import webnotes.defaults
from stock.utils import update_bin
from controllers.buying_controller import BuyingController
class DocType(BuyingController):
def __init__(self, doc, doclist=[]):
self.doc = doc
self.doclist = doclist
self.tname = 'Purchase Receipt Item'
self.fname = 'purchase_receipt_details'
self.count = 0
self.status_updater = [{
'source_dt': 'Purchase Receipt Item',
'target_dt': 'Purchase Order Item',
'join_field': 'prevdoc_detail_docname',
'target_field': 'received_qty',
'target_parent_dt': 'Purchase Order',
'target_parent_field': 'per_received',
'target_ref_field': 'qty',
'source_field': 'qty',
'percent_join_field': 'prevdoc_docname',
}]
def onload(self):
billed_qty = webnotes.conn.sql("""select sum(ifnull(qty, 0)) from `tabPurchase Invoice Item`
where purchase_receipt=%s""", self.doc.name)
if billed_qty:
total_qty = sum((item.qty for item in self.doclist.get({"parentfield": "purchase_receipt_details"})))
self.doc.fields["__billing_complete"] = billed_qty[0][0] == total_qty
def validate(self):
super(DocType, self).validate()
self.po_required()
if not self.doc.status:
self.doc.status = "Draft"
import utilities
utilities.validate_status(self.doc.status, ["Draft", "Submitted", "Cancelled"])
self.validate_with_previous_doc()
self.validate_rejected_warehouse()
self.validate_accepted_rejected_qty()
self.validate_inspection()
self.validate_uom_is_integer("uom", ["qty", "received_qty"])
self.validate_uom_is_integer("stock_uom", "stock_qty")
self.validate_challan_no()
pc_obj = get_obj(dt='Purchase Common')
pc_obj.validate_for_items(self)
self.check_for_stopped_status(pc_obj)
# sub-contracting
self.validate_for_subcontracting()
self.create_raw_materials_supplied("pr_raw_material_details")
self.update_valuation_rate("purchase_receipt_details")
def validate_rejected_warehouse(self):
for d in self.doclist.get({"parentfield": "purchase_receipt_details"}):
if flt(d.rejected_qty) and not d.rejected_warehouse:
d.rejected_warehouse = self.doc.rejected_warehouse
if not d.rejected_warehouse:
webnotes.throw(_("Rejected Warehouse is mandatory against regected item"))
# validate accepted and rejected qty
def validate_accepted_rejected_qty(self):
for d in getlist(self.doclist, "purchase_receipt_details"):
if not flt(d.received_qty) and flt(d.qty):
d.received_qty = flt(d.qty) - flt(d.rejected_qty)
elif not flt(d.qty) and flt(d.rejected_qty):
d.qty = flt(d.received_qty) - flt(d.rejected_qty)
elif not flt(d.rejected_qty):
d.rejected_qty = flt(d.received_qty) - flt(d.qty)
# Check Received Qty = Accepted Qty + Rejected Qty
if ((flt(d.qty) + flt(d.rejected_qty)) != flt(d.received_qty)):
msgprint("Sum of Accepted Qty and Rejected Qty must be equal to Received quantity. Error for Item: " + cstr(d.item_code))
raise Exception
def validate_challan_no(self):
"Validate if same challan no exists for same supplier in a submitted purchase receipt"
if self.doc.challan_no:
exists = webnotes.conn.sql("""select name from `tabPurchase Receipt`
where docstatus=1 and name!=%s and supplier=%s and challan_no=%s
and fiscal_year=%s""", (self.doc.name, self.doc.supplier,
self.doc.challan_no, self.doc.fiscal_year))
if exists:
webnotes.throw(_("Supplier delivery number duplicate in {0}").format(exists))
def validate_with_previous_doc(self):
super(DocType, self).validate_with_previous_doc(self.tname, {
"Purchase Order": {
"ref_dn_field": "prevdoc_docname",
"compare_fields": [["supplier", "="], ["company", "="], ["currency", "="]],
},
"Purchase Order Item": {
"ref_dn_field": "prevdoc_detail_docname",
"compare_fields": [["project_name", "="], ["uom", "="], ["item_code", "="]],
"is_child_table": True
}
})
if cint(webnotes.defaults.get_global_default('maintain_same_rate')):
super(DocType, self).validate_with_previous_doc(self.tname, {
"Purchase Order Item": {
"ref_dn_field": "prevdoc_detail_docname",
"compare_fields": [["import_rate", "="]],
"is_child_table": True
}
})
def po_required(self):
if webnotes.conn.get_value("Buying Settings", None, "po_required") == 'Yes':
for d in getlist(self.doclist,'purchase_receipt_details'):
if not d.prevdoc_docname:
msgprint("Purchse Order No. required against item %s"%d.item_code)
raise Exception
def update_stock(self):
sl_entries = []
stock_items = self.get_stock_items()
for d in getlist(self.doclist, 'purchase_receipt_details'):
if d.item_code in stock_items and d.warehouse:
pr_qty = flt(d.qty) * flt(d.conversion_factor)
if pr_qty:
sl_entries.append(self.get_sl_entries(d, {
"actual_qty": flt(pr_qty),
"serial_no": cstr(d.serial_no).strip(),
"incoming_rate": d.valuation_rate
}))
if flt(d.rejected_qty) > 0:
sl_entries.append(self.get_sl_entries(d, {
"warehouse": d.rejected_warehouse,
"actual_qty": flt(d.rejected_qty) * flt(d.conversion_factor),
"serial_no": cstr(d.rejected_serial_no).strip(),
"incoming_rate": d.valuation_rate
}))
self.bk_flush_supp_wh(sl_entries)
self.make_sl_entries(sl_entries)
def update_ordered_qty(self):
stock_items = self.get_stock_items()
for d in self.doclist.get({"parentfield": "purchase_receipt_details"}):
if d.item_code in stock_items and d.warehouse \
and cstr(d.prevdoc_doctype) == 'Purchase Order':
already_received_qty = self.get_already_received_qty(d.prevdoc_docname,
d.prevdoc_detail_docname)
po_qty, ordered_warehouse = self.get_po_qty_and_warehouse(d.prevdoc_detail_docname)
if not ordered_warehouse:
webnotes.throw(_("Warehouse is missing in Purchase Order"))
if already_received_qty + d.qty > po_qty:
ordered_qty = - (po_qty - already_received_qty) * flt(d.conversion_factor)
else:
ordered_qty = - flt(d.qty) * flt(d.conversion_factor)
update_bin({
"item_code": d.item_code,
"warehouse": ordered_warehouse,
"posting_date": self.doc.posting_date,
"ordered_qty": flt(ordered_qty) if self.doc.docstatus==1 else -flt(ordered_qty)
})
def get_already_received_qty(self, po, po_detail):
qty = webnotes.conn.sql("""select sum(qty) from `tabPurchase Receipt Item`
where prevdoc_detail_docname = %s and docstatus = 1
and prevdoc_doctype='Purchase Order' and prevdoc_docname=%s
and parent != %s""", (po_detail, po, self.doc.name))
return qty and flt(qty[0][0]) or 0.0
def get_po_qty_and_warehouse(self, po_detail):
po_qty, po_warehouse = webnotes.conn.get_value("Purchase Order Item", po_detail,
["qty", "warehouse"])
return po_qty, po_warehouse
def bk_flush_supp_wh(self, sl_entries):
for d in getlist(self.doclist, 'pr_raw_material_details'):
# negative quantity is passed as raw material qty has to be decreased
# when PR is submitted and it has to be increased when PR is cancelled
sl_entries.append(self.get_sl_entries(d, {
"item_code": d.rm_item_code,
"warehouse": self.doc.supplier_warehouse,
"actual_qty": -1*flt(d.consumed_qty),
"incoming_rate": 0
}))
def validate_inspection(self):
for d in getlist(self.doclist, 'purchase_receipt_details'): #Enter inspection date for all items that require inspection
ins_reqd = webnotes.conn.sql("select inspection_required from `tabItem` where name = %s",
(d.item_code,), as_dict = 1)
ins_reqd = ins_reqd and ins_reqd[0]['inspection_required'] or 'No'
if ins_reqd == 'Yes' and not d.qa_no:
msgprint("Item: " + d.item_code + " requires QA Inspection. Please enter QA No or report to authorized person to create Quality Inspection")
# Check for Stopped status
def check_for_stopped_status(self, pc_obj):
check_list =[]
for d in getlist(self.doclist, 'purchase_receipt_details'):
if d.fields.has_key('prevdoc_docname') and d.prevdoc_docname and d.prevdoc_docname not in check_list:
check_list.append(d.prevdoc_docname)
pc_obj.check_for_stopped_status( d.prevdoc_doctype, d.prevdoc_docname)
# on submit
def on_submit(self):
purchase_controller = webnotes.get_obj("Purchase Common")
# Check for Approving Authority
get_obj('Authorization Control').validate_approving_authority(self.doc.doctype, self.doc.company, self.doc.grand_total)
# Set status as Submitted
webnotes.conn.set(self.doc, 'status', 'Submitted')
self.update_prevdoc_status()
self.update_ordered_qty()
self.update_stock()
from stock.doctype.serial_no.serial_no import update_serial_nos_after_submit
update_serial_nos_after_submit(self, "purchase_receipt_details")
purchase_controller.update_last_purchase_rate(self, 1)
self.make_gl_entries()
def check_next_docstatus(self):
submit_rv = webnotes.conn.sql("select t1.name from `tabPurchase Invoice` t1,`tabPurchase Invoice Item` t2 where t1.name = t2.parent and t2.purchase_receipt = '%s' and t1.docstatus = 1" % (self.doc.name))
if submit_rv:
msgprint("Purchase Invoice : " + cstr(self.submit_rv[0][0]) + " has already been submitted !")
raise Exception , "Validation Error."
def on_cancel(self):
pc_obj = get_obj('Purchase Common')
self.check_for_stopped_status(pc_obj)
# Check if Purchase Invoice has been submitted against current Purchase Order
# pc_obj.check_docstatus(check = 'Next', doctype = 'Purchase Invoice', docname = self.doc.name, detail_doctype = 'Purchase Invoice Item')
submitted = webnotes.conn.sql("select t1.name from `tabPurchase Invoice` t1,`tabPurchase Invoice Item` t2 where t1.name = t2.parent and t2.purchase_receipt = '%s' and t1.docstatus = 1" % self.doc.name)
if submitted:
msgprint("Purchase Invoice : " + cstr(submitted[0][0]) + " has already been submitted !")
raise Exception
webnotes.conn.set(self.doc,'status','Cancelled')
self.update_ordered_qty()
self.update_stock()
self.update_prevdoc_status()
pc_obj.update_last_purchase_rate(self, 0)
self.make_cancel_gl_entries()
def get_current_stock(self):
for d in getlist(self.doclist, 'pr_raw_material_details'):
if self.doc.supplier_warehouse:
bin = webnotes.conn.sql("select actual_qty from `tabBin` where item_code = %s and warehouse = %s", (d.rm_item_code, self.doc.supplier_warehouse), as_dict = 1)
d.current_stock = bin and flt(bin[0]['actual_qty']) or 0
def get_rate(self,arg):
return get_obj('Purchase Common').get_rate(arg,self)
def get_gl_entries(self, warehouse_account=None):
against_stock_account = self.get_company_default("stock_received_but_not_billed")
gl_entries = super(DocType, self).get_gl_entries(warehouse_account, against_stock_account)
return gl_entries
def get_invoiced_qty_map(purchase_receipt):
"""returns a map: {pr_detail: invoiced_qty}"""
invoiced_qty_map = {}
for pr_detail, qty in webnotes.conn.sql("""select pr_detail, qty from `tabPurchase Invoice Item`
where purchase_receipt=%s and docstatus=1""", purchase_receipt):
if not invoiced_qty_map.get(pr_detail):
invoiced_qty_map[pr_detail] = 0
invoiced_qty_map[pr_detail] += qty
return invoiced_qty_map
@webnotes.whitelist()
def make_purchase_invoice(source_name, target_doclist=None):
from webnotes.model.mapper import get_mapped_doclist
invoiced_qty_map = get_invoiced_qty_map(source_name)
def set_missing_values(source, target):
pi = webnotes.bean(target)
pi.run_method("set_missing_values")
pi.run_method("set_supplier_defaults")
pi.set_doclist(pi.doclist.get({"parentfield": ["!=", "entries"]}) +
pi.doclist.get({"parentfield": "entries", "qty": [">", 0]}))
if len(pi.doclist.get({"parentfield": "entries"})) == 0:
webnotes.msgprint(_("All items have already been invoiced."), raise_exception=True)
return pi.doclist
def update_item(source_doc, target_doc, source_parent):
target_doc.qty = source_doc.qty - invoiced_qty_map.get(source_doc.name, 0)
doclist = get_mapped_doclist("Purchase Receipt", source_name, {
"Purchase Receipt": {
"doctype": "Purchase Invoice",
"validation": {
"docstatus": ["=", 1],
}
},
"Purchase Receipt Item": {
"doctype": "Purchase Invoice Item",
"field_map": {
"name": "pr_detail",
"parent": "purchase_receipt",
"prevdoc_detail_docname": "po_detail",
"prevdoc_docname": "purchase_order",
"purchase_rate": "rate"
},
"postprocess": update_item
},
"Purchase Taxes and Charges": {
"doctype": "Purchase Taxes and Charges",
"add_if_empty": True
}
}, target_doclist, set_missing_values)
return [d.fields for d in doclist]
|
agpl-3.0
| 7,571,372,898,239,984,000
| 35.724432
| 205
| 0.691034
| false
| 3.047383
| false
| false
| false
|
zvoase/django-relax
|
relax/templatetags/couchdb.py
|
1
|
2778
|
# -*- coding: utf-8 -*-
import re
from django import template
from django.template.defaultfilters import stringfilter
from relax import json, settings
register = template.Library()
class SettingNode(template.Node):
def __init__(self, setting_name, var_name=None, default_value=None):
# The variable name will be stored no matter what.
self.var_name = var_name
# If there is a default value, it will be added to the args for the
# relax.settings._ function; otherwise it will just be the setting
# name.
self.setting_args = ((setting_name, default_value) if default_value
else (setting_name,))
def render(self, context):
# We pre-stored these arguments in __init__, remember?
value = settings._(*self.setting_args)
# If a variable name was provided, use it.
if self.var_name:
context[self.var_name] = value
return ''
# Otherwise, render the setting as a string in the template.
else:
return str(value)
def get_setting(parser, token):
try:
tag_name, arg = token.contents.split(None, 1)
except ValueError:
raise template.TemplateSyntaxError(
'%r tag requires arguments' % (token.contents.split()[0],))
# Here we match 4 different regexs. This deals with the optional presence
# of both a default value and a variable name.
match = re.search(r'^([A-Za-z0-9_-]+)$', arg)
if not match:
match = re.search(r'^([A-Za-z0-9_-]+) (.*?)$', arg)
if not match:
match = re.search(r'^([A-Za-z0-9_-]+) (.*?) as ([A-Za-z0-9_]+)$', arg)
if not match:
match = re.search(r'^([A-Za-z0-9_-]+) as ([A-Za-z0-9_]+)$', arg)
if not match:
# If all else fails, just raise an error.
raise template.TemplateSyntaxError('Invalid arguments for %r tag' %
(tag_name,))
setting_name, var_name = match.groups()
return SettingNode(setting_name, var_name=var_name)
setting_name, default_value, var_name = match.groups()
# The default value should be specified in JSON format. This makes
# things considerably more secure than just using eval().
default_value = json.loads(default_value)
return SettingNode(setting_name, var_name=var_name,
default_value=default_value)
setting_name, default_value = match.groups()
default_value = json.loads(default_value)
return SettingNode(setting_name, default_value=default_value)
setting_name = match.groups()[0]
return SettingNode(setting_name)
register.tag('setting', get_setting)
|
mit
| -2,214,768,308,070,269,200
| 39.275362
| 87
| 0.598632
| false
| 3.934844
| false
| false
| false
|
MissionCriticalCloud/marvin
|
marvin/cloudstackAPI/prepareTemplate.py
|
1
|
6722
|
"""load template into primary storage"""
from baseCmd import *
from baseResponse import *
class prepareTemplateCmd (baseCmd):
typeInfo = {}
def __init__(self):
self.isAsync = "false"
"""template ID of the template to be prepared in primary storage(s)."""
"""Required"""
self.templateid = None
self.typeInfo['templateid'] = 'uuid'
"""zone ID of the template to be prepared in primary storage(s)."""
"""Required"""
self.zoneid = None
self.typeInfo['zoneid'] = 'uuid'
"""storage pool ID of the primary storage pool to which the template should be prepared. If it is not provided the template is prepared on all the available primary storage pools."""
self.storageid = None
self.typeInfo['storageid'] = 'uuid'
self.required = ["templateid", "zoneid", ]
class prepareTemplateResponse (baseResponse):
typeInfo = {}
def __init__(self):
"""the template ID"""
self.id = None
self.typeInfo['id'] = 'string'
"""the account name to which the template belongs"""
self.account = None
self.typeInfo['account'] = 'string'
"""the account id to which the template belongs"""
self.accountid = None
self.typeInfo['accountid'] = 'string'
"""true if the ISO is bootable, false otherwise"""
self.bootable = None
self.typeInfo['bootable'] = 'boolean'
"""checksum of the template"""
self.checksum = None
self.typeInfo['checksum'] = 'string'
"""the date this template was created"""
self.created = None
self.typeInfo['created'] = 'date'
"""true if the template is managed across all Zones, false otherwise"""
self.crossZones = None
self.typeInfo['crossZones'] = 'boolean'
"""additional key/value details tied with template"""
self.details = None
self.typeInfo['details'] = 'map'
"""the template display text"""
self.displaytext = None
self.typeInfo['displaytext'] = 'string'
"""the name of the domain to which the template belongs"""
self.domain = None
self.typeInfo['domain'] = 'string'
"""the ID of the domain to which the template belongs"""
self.domainid = None
self.typeInfo['domainid'] = 'string'
"""the format of the template."""
self.format = None
self.typeInfo['format'] = 'imageformat'
"""the ID of the secondary storage host for the template"""
self.hostid = None
self.typeInfo['hostid'] = 'string'
"""the name of the secondary storage host for the template"""
self.hostname = None
self.typeInfo['hostname'] = 'string'
"""the hypervisor on which the template runs"""
self.hypervisor = None
self.typeInfo['hypervisor'] = 'string'
"""true if template contains XS tools inorder to support dynamic scaling of VM cpu/memory"""
self.isdynamicallyscalable = None
self.typeInfo['isdynamicallyscalable'] = 'boolean'
"""true if the template is extractable, false otherwise"""
self.isextractable = None
self.typeInfo['isextractable'] = 'boolean'
"""true if this template is a featured template, false otherwise"""
self.isfeatured = None
self.typeInfo['isfeatured'] = 'boolean'
"""true if this template is a public template, false otherwise"""
self.ispublic = None
self.typeInfo['ispublic'] = 'boolean'
"""true if the template is ready to be deployed from, false otherwise."""
self.isready = None
self.typeInfo['isready'] = 'boolean'
"""the template name"""
self.name = None
self.typeInfo['name'] = 'string'
"""the ID of the OS type for this template."""
self.ostypeid = None
self.typeInfo['ostypeid'] = 'string'
"""the name of the OS type for this template."""
self.ostypename = None
self.typeInfo['ostypename'] = 'string'
"""true if the reset password feature is enabled, false otherwise"""
self.passwordenabled = None
self.typeInfo['passwordenabled'] = 'boolean'
"""the project name of the template"""
self.project = None
self.typeInfo['project'] = 'string'
"""the project id of the template"""
self.projectid = None
self.typeInfo['projectid'] = 'string'
"""the date this template was removed"""
self.removed = None
self.typeInfo['removed'] = 'date'
"""the size of the template"""
self.size = None
self.typeInfo['size'] = 'long'
"""the template ID of the parent template if present"""
self.sourcetemplateid = None
self.typeInfo['sourcetemplateid'] = 'string'
"""true if template is sshkey enabled, false otherwise"""
self.sshkeyenabled = None
self.typeInfo['sshkeyenabled'] = 'boolean'
"""the status of the template"""
self.status = None
self.typeInfo['status'] = 'string'
"""the tag of this template"""
self.templatetag = None
self.typeInfo['templatetag'] = 'string'
"""the type of the template"""
self.templatetype = None
self.typeInfo['templatetype'] = 'string'
"""the ID of the zone for this template"""
self.zoneid = None
self.typeInfo['zoneid'] = 'string'
"""the name of the zone for this template"""
self.zonename = None
self.typeInfo['zonename'] = 'string'
"""the list of resource tags associated with tempate"""
self.tags = []
"""the ID of the latest async job acting on this object"""
self.jobid = None
self.typeInfo['jobid'] = ''
"""the current status of the latest async job acting on this object"""
self.jobstatus = None
self.typeInfo['jobstatus'] = ''
class tags:
def __init__(self):
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
|
apache-2.0
| 6,533,930,235,692,661,000
| 39.739394
| 190
| 0.591937
| false
| 4.303457
| false
| false
| false
|
stivalet/PHP-Vuln-test-suite-generator
|
bin/core.py
|
1
|
3062
|
from Classes.Manifest import *
from Flaws_generators.Generator_factory import *
from Flaws_generators.Generation_functions import *
import global_variables as g
def main(argv):
# List of flaws
flaws = ["XSS", "IDOR", "Injection", "URF", "SM", "SDE"]
flaw_list = []
#Gets options & arguments
try:
opts, args = getopt.getopt(argv, "c:f:h", ["cwe=", "flaws=", "help"])
except getopt.GetoptError:
print('Invalid argument')
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-f", "--flaws"): #Select flaws
flaw_list = arg.split(',')
elif opt in ("-c", "--cwe"): #Select CWEs
g.cwe_list = arg.split(',')
elif opt in ("-h", "--help"): #Show usage
usage()
return 0
else: #Default
usage()
return 0
for flaw in flaw_list:
if flaw not in flaws:
usage()
return 0
date = time.strftime("%m-%d-%Y_%Hh%Mm%S")
root = ET.parse('output.xml').getroot()
if len(flaw_list) == 0 or len(g.cwe_list) > 0: #Select all flaws
flaw_list=flaws
for flaw in flaw_list:
if flaw == "XSS":
initialization(Generator_factory.makeXSS_Generator(date), root)
if flaw == "Injection":
initialization(Generator_factory.makeInjection_Generator(date), root)
if flaw == "IDOR":
initialization(Generator_factory.makeIDOR_Generator(date), root)
if flaw == "URF":
initialization(Generator_factory.makeURF_Generator(date), root)
if flaw == "SM":
for input in root.findall('input'):
root.remove(input)
initialization(Generator_factory.makeSM_Generator(date), root)
if flaw == "SDE":
for input in root.findall('input'):
root.remove(input)
initialization(Generator_factory.makeSDE_Generator(date), root)
def usage():
flaw = "-f flaws to generate (flaw1,flaw2,flaw3,...):\n\tIDOR :\tInsecure Direct Object Reference\n\tInjection :\tInjection (SQL,LDAP,XPATH)\n\tSDE :\tSensitive Data Exposure\n\tSM :\tSecurity Misconfiguration\n\tURF :\tURL Redirects and Forwards\n\tXSS :\tCross-site Scripting"
cweparam = "-c generate particular CWE:\n\t78 :\tCommand OS Injection\n\t79 :\tXSS\n\t89 :\tSQL Injection\n\t90 :\tLDAP Injection\n\t91 :\tXPath Injection\n\t95 :\tCode Injection\n\t98 :\tFile Injection\n\t209 :\tInformation Exposure Through an Error Message\n\t311 :\tMissing Encryption of Sensitive Data\n\t327 :\tUse of a Broken or Risky Cryptographic Algorithm\n\t601 :\tURL Redirection to Untrusted Site\n\t862 :\tInsecure Direct Object References"
example = "$py core.py -f Injection \t// generate test cases with Injection flaws\n $py core.py -c 79 \t\t// generate test cases with cross site scripting."
print("usage: [-f flaw | -c cwe ] [arg]\nOptions and arguments:\n", flaw, "\n", cweparam,"\n",example )
if __name__ == "__main__":
main(sys.argv[1:])
|
mit
| 526,869,589,661,282,600
| 43.376812
| 457
| 0.612998
| false
| 3.296017
| false
| false
| false
|
pjdufour/geonode
|
geonode/base/forms.py
|
1
|
13646
|
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import autocomplete_light
from fields import MultiThesauriField
from widgets import MultiThesauriWidget
from autocomplete_light.contrib.taggit_field import TaggitField, TaggitWidget
from django import forms
from django.forms import models
from django.forms.fields import ChoiceField
from django.forms.utils import flatatt
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from django.db.models import Q
from django.utils.encoding import (
force_text,
)
from bootstrap3_datetime.widgets import DateTimePicker
from modeltranslation.forms import TranslationModelForm
from geonode.base.models import TopicCategory, Region
from geonode.people.models import Profile
def get_tree_data():
def rectree(parent, path):
children_list_of_tuples = list()
c = Region.objects.filter(parent=parent)
for child in c:
children_list_of_tuples.append(
tuple((path + parent.name, tuple((child.id, child.name))))
)
childrens = rectree(child, parent.name + '/')
if childrens:
children_list_of_tuples.extend(childrens)
return children_list_of_tuples
data = list()
try:
t = Region.objects.filter(Q(level=0) | Q(parent=None))
for toplevel in t:
data.append(
tuple((toplevel.id, toplevel.name))
)
childrens = rectree(toplevel, '')
if childrens:
data.append(
tuple((toplevel.name, childrens))
)
except:
pass
return tuple(data)
class AdvancedModelChoiceIterator(models.ModelChoiceIterator):
def choice(self, obj):
return (self.field.prepare_value(obj), self.field.label_from_instance(obj), obj)
class CategoryChoiceField(forms.ModelChoiceField):
def _get_choices(self):
if hasattr(self, '_choices'):
return self._choices
return AdvancedModelChoiceIterator(self)
choices = property(_get_choices, ChoiceField._set_choices)
def label_from_instance(self, obj):
return '<i class="fa '+obj.fa_class+' fa-2x unchecked"></i>' \
'<i class="fa '+obj.fa_class+' fa-2x checked"></i>' \
'<span class="has-popover" data-container="body" data-toggle="popover" data-placement="top" ' \
'data-content="' + obj.description + '" trigger="hover">' \
'<br/><strong>' + obj.gn_description + '</strong></span>'
class TreeWidget(forms.TextInput):
input_type = 'text'
def __init__(self, attrs=None):
super(TreeWidget, self).__init__(attrs)
def render(self, name, values, attrs=None):
if isinstance(values, basestring):
vals = values
else:
vals = ','.join([str(i.tag.name) for i in values])
output = ["""<input class='form-control' id='id_resource-keywords' name='resource-keywords'
value='%s'><br/>""" % (vals)]
output.append('<div id="treeview" class=""></div>')
return mark_safe(u'\n'.join(output))
class RegionsMultipleChoiceField(forms.MultipleChoiceField):
def validate(self, value):
"""
Validates that the input is a list or tuple.
"""
if self.required and not value:
raise forms.ValidationError(self.error_messages['required'], code='required')
class RegionsSelect(forms.Select):
allow_multiple_selected = True
def render(self, name, value, attrs=None):
if value is None:
value = []
final_attrs = self.build_attrs(attrs, name=name)
output = [format_html('<select multiple="multiple"{}>', flatatt(final_attrs))]
options = self.render_options(value)
if options:
output.append(options)
output.append('</select>')
return mark_safe('\n'.join(output))
def value_from_datadict(self, data, files, name):
try:
getter = data.getlist
except AttributeError:
getter = data.get
return getter(name)
def render_option_value(self, selected_choices, option_value, option_label, data_section=None):
if option_value is None:
option_value = ''
option_value = force_text(option_value)
if option_value in selected_choices:
selected_html = mark_safe(' selected')
if not self.allow_multiple_selected:
# Only allow for a single selection.
selected_choices.remove(option_value)
else:
selected_html = ''
label = force_text(option_label)
if data_section is None:
data_section = ''
else:
data_section = force_text(data_section)
if '/' in data_section:
label = format_html('{} [{}]', label, data_section.rsplit('/', 1)[1])
return format_html('<option data-section="{}" value="{}"{}>{}</option>',
data_section,
option_value,
selected_html,
label)
def render_options(self, selected_choices):
# Normalize to strings.
selected_choices = set(force_text(v) for v in selected_choices)
output = []
output.append(format_html('<optgroup label="{}">', 'Global'))
for option_value, option_label in self.choices:
if not isinstance(option_label, (list, tuple)) and isinstance(option_label, basestring):
output.append(self.render_option_value(selected_choices, option_value, option_label))
output.append('</optgroup>')
for option_value, option_label in self.choices:
if isinstance(option_label, (list, tuple)) and not isinstance(option_label, basestring):
output.append(format_html('<optgroup label="{}">', force_text(option_value)))
for option in option_label:
if isinstance(option, (list, tuple)) and not isinstance(option, basestring):
if isinstance(option[1][0], (list, tuple)) and not isinstance(option[1][0], basestring):
for option_child in option[1][0]:
output.append(self.render_option_value(selected_choices,
*option_child,
data_section=force_text(option[1][0][0])))
else:
output.append(self.render_option_value(selected_choices,
*option[1],
data_section=force_text(option[0])))
else:
output.append(self.render_option_value(selected_choices,
*option,
data_section=force_text(option_value)))
output.append('</optgroup>')
return '\n'.join(output)
class CategoryForm(forms.Form):
category_choice_field = CategoryChoiceField(required=False,
label='*' + _('Category'),
empty_label=None,
queryset=TopicCategory.objects.filter(is_choice=True)
.extra(order_by=['description']))
def clean(self):
cleaned_data = self.data
ccf_data = cleaned_data.get("category_choice_field")
if not ccf_data:
msg = _("Category is required.")
self._errors = self.error_class([msg])
# Always return the full collection of cleaned data.
return cleaned_data
class TKeywordForm(forms.Form):
tkeywords = MultiThesauriField(
label=_("Keywords from Thesauri"),
required=False,
help_text=_("List of keywords from Thesauri"),
widget=MultiThesauriWidget())
def clean(self):
cleaned_data = None
if self.data:
try:
cleaned_data = [{key: self.data.getlist(key)} for key, value
in self.data.items()
if 'tkeywords-tkeywords' in key.lower() and 'autocomplete' not in key.lower()]
except:
pass
return cleaned_data
class ResourceBaseForm(TranslationModelForm):
"""Base form for metadata, should be inherited by childres classes of ResourceBase"""
owner = forms.ModelChoiceField(
empty_label="Owner",
label=_("Owner"),
required=False,
queryset=Profile.objects.exclude(
username='AnonymousUser'),
widget=autocomplete_light.ChoiceWidget('ProfileAutocomplete'))
_date_widget_options = {
"icon_attrs": {"class": "fa fa-calendar"},
"attrs": {"class": "form-control input-sm"},
# "format": "%Y-%m-%d %I:%M %p",
"format": "%Y-%m-%d",
# Options for the datetimepickers are not set here on purpose.
# They are set in the metadata_form_js.html template because
# bootstrap-datetimepicker uses jquery for its initialization
# and we need to ensure it is available before trying to
# instantiate a new datetimepicker. This could probably be improved.
"options": False,
}
date = forms.DateTimeField(
label=_("Date"),
localize=True,
input_formats=['%Y-%m-%d'],
widget=DateTimePicker(**_date_widget_options)
)
temporal_extent_start = forms.DateTimeField(
label=_("temporal extent start"),
required=False,
localize=True,
input_formats=['%Y-%m-%d'],
widget=DateTimePicker(**_date_widget_options)
)
temporal_extent_end = forms.DateTimeField(
label=_("temporal extent end"),
required=False,
localize=True,
input_formats=['%Y-%m-%d'],
widget=DateTimePicker(**_date_widget_options)
)
poc = forms.ModelChoiceField(
empty_label=_("Person outside GeoNode (fill form)"),
label=_("Point of Contact"),
required=False,
queryset=Profile.objects.exclude(
username='AnonymousUser'),
widget=autocomplete_light.ChoiceWidget('ProfileAutocomplete'))
metadata_author = forms.ModelChoiceField(
empty_label=_("Person outside GeoNode (fill form)"),
label=_("Metadata Author"),
required=False,
queryset=Profile.objects.exclude(
username='AnonymousUser'),
widget=autocomplete_light.ChoiceWidget('ProfileAutocomplete'))
keywords = TaggitField(
label=_("Free-text Keywords"),
required=False,
help_text=_("A space or comma-separated list of keywords. Use the widget to select from Hierarchical tree."),
widget=TaggitWidget('HierarchicalKeywordAutocomplete'))
"""
regions = TreeNodeMultipleChoiceField(
label=_("Regions"),
required=False,
queryset=Region.objects.all(),
level_indicator=u'___')
"""
regions = RegionsMultipleChoiceField(
label=_("Regions"),
required=False,
choices=get_tree_data(),
widget=RegionsSelect)
regions.widget.attrs = {"size": 20}
def __init__(self, *args, **kwargs):
super(ResourceBaseForm, self).__init__(*args, **kwargs)
for field in self.fields:
help_text = self.fields[field].help_text
self.fields[field].help_text = None
if help_text != '':
self.fields[field].widget.attrs.update(
{
'class': 'has-popover',
'data-content': help_text,
'data-placement': 'right',
'data-container': 'body',
'data-html': 'true'})
class Meta:
exclude = (
'contacts',
'name',
'uuid',
'bbox_x0',
'bbox_x1',
'bbox_y0',
'bbox_y1',
'srid',
'category',
'csw_typename',
'csw_schema',
'csw_mdsource',
'csw_type',
'csw_wkt_geometry',
'metadata_uploaded',
'metadata_xml',
'csw_anytext',
'popular_count',
'share_count',
'thumbnail',
'charset',
'rating',
'detail_url'
)
|
gpl-3.0
| -3,462,400,340,119,322,600
| 36.081522
| 117
| 0.555767
| false
| 4.485865
| false
| false
| false
|
whitews/FlowIO
|
examples/print_channels.py
|
1
|
1358
|
import flowio
import os
import sys
if len(sys.argv) > 1:
flow_dir = sys.argv[1]
else:
flow_dir = os.getcwd()
files = os.listdir(flow_dir)
for file in files:
try:
flow_data = flowio.FlowData("/".join([flow_dir,file]))
except:
continue
print file + ':'
for key in sorted(flow_data.channels.keys()):
line = key + '\t' + \
flow_data.channels[key]['PnN'] + '\t'
if 'PnS' in flow_data.channels[key]:
line += flow_data.channels[key]['PnS']
print '\t' + line
if 'creator' in flow_data.text:
print '\t' + 'Creator: ' + flow_data.text['creator']
if 'export time' in flow_data.text:
print '\t' + 'Export time: ' + flow_data.text['export time']
if 'experiment name' in flow_data.text:
print '\t' + 'Experiment name: ' + flow_data.text['experiment name']
if 'patient id' in flow_data.text:
print '\t' + 'Patient ID: ' + flow_data.text['patient id']
if 'tube name' in flow_data.text:
print '\t' + 'Tube name: ' + flow_data.text['tube name']
if 'src' in flow_data.text:
print '\t' + 'Source: ' + flow_data.text['src']
if 'sample id' in flow_data.text:
print '\t' + 'Sample ID: ' + flow_data.text['sample id']
if 'tot' in flow_data.text:
print '\t' + 'Total: ' + flow_data.text['tot']
|
bsd-3-clause
| -1,600,365,581,725,694,500
| 31.357143
| 76
| 0.562592
| false
| 3.079365
| false
| false
| false
|
scalyr/scalyr-agent-2
|
scalyr_agent/compat.py
|
1
|
7997
|
# Copyright 2014-2020 Scalyr Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
if False: # NOSONAR
from typing import Union, Tuple, Any, Generator, Iterable, Optional
import sys
import struct
import os
import subprocess
import six
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
PY26 = sys.version_info[0] == 2 and sys.version_info[1] == 6
PY27 = sys.version_info[0] == 2 and sys.version_info[1] == 7
PY2_pre_279 = PY2 and sys.version_info < (2, 7, 9)
PY_post_equal_279 = sys.version_info >= (2, 7, 9)
PY3_pre_32 = PY3 and sys.version_info < (3, 2)
PY3_post_equal_37 = PY3 and sys.version_info >= (3, 7)
# NOTE: ssl.match_hostname was added in Python 2.7.9 so for earlier versions, we need to use
# version from backports package
if PY2_pre_279 or PY3_pre_32:
try:
from backports.ssl_match_hostname import (
match_hostname as ssl_match_hostname,
) # NOQA
from backports.ssl_match_hostname import CertificateError # NOQA
except ImportError:
# NOTE: We should never come here in real life. If we do, it indicates we messed up package
# creation and / or path mangling in scalyr_init().
raise Exception(
"Missing backports.ssl_match_hostname module, hostname verification can't "
"be performed"
)
else:
# ssl module in Python 2 >= 2.7.9 and Python 3 >= 3.2 includes match hostname function
from ssl import match_hostname as ssl_match_hostname # NOQA
from ssl import CertificateError # type: ignore # NOQA
def custom_any(iterable):
if sys.version_info[:2] > (2, 4):
return any(iterable)
else:
for element in iterable:
if element:
return True
return False
def custom_all(iterable):
if sys.version_info[:2] > (2, 4):
return all(iterable)
else:
for element in iterable:
if not element:
return False
return True
def custom_defaultdict(default_type):
if sys.version_info[:2] > (2, 4):
from collections import defaultdict
return defaultdict(default_type)
else:
class DefaultDict(dict):
def __getitem__(self, key):
if key not in self:
dict.__setitem__(self, key, default_type())
return dict.__getitem__(self, key)
return DefaultDict()
if six.PY2:
class EnvironUnicode(object):
"""Just a wrapper for os.environ, to convert its items to unicode in python2."""
def __getitem__(self, item):
value = os.environ[item]
return six.ensure_text(value)
def get(self, item, default=None):
value = os.environ.get(item, default)
if value is not None:
value = six.ensure_text(value)
return value
def pop(self, item, default=None):
value = os.environ.pop(item, default)
if value is not None:
value = six.ensure_text(value)
return value
def __setitem__(self, key, value):
key = six.ensure_text(key)
value = six.ensure_text(value)
os.environ[key] = value
@staticmethod
def _iterable_elements_to_unicode_generator(iterable):
# type: (Iterable) -> Generator[Union[Tuple, Any]]
"""Generator that gets values from original iterable and converts its 'str' values to 'unicode'"""
for element in iterable:
if type(element) is tuple:
yield tuple(
v.decode("utf-8", "replace")
if type(v) is six.binary_type
else v
for v in element
)
else:
yield six.ensure_text(element)
def iteritems(self):
return self._iterable_elements_to_unicode_generator(
six.iteritems(os.environ)
)
def items(self):
return list(
self._iterable_elements_to_unicode_generator(os.environ.items())
)
def iterkeys(self):
return self._iterable_elements_to_unicode_generator(
six.iterkeys(os.environ)
)
def keys(self):
return list(self._iterable_elements_to_unicode_generator(os.environ.keys()))
def itervalues(self):
return self._iterable_elements_to_unicode_generator(
six.itervalues(os.environ)
)
def values(self):
return list(
self._iterable_elements_to_unicode_generator(os.environ.values())
)
def copy(self):
return dict(self.items())
def __iter__(self):
return self.iterkeys()
def os_getenv_unicode(name, default=None):
"""The same logic as in os.environ, but with None check."""
result = os.getenv(name, default)
if result is not None:
result = six.ensure_text(result)
return result
os_environ_unicode = EnvironUnicode()
else:
os_environ_unicode = os.environ
os_getenv_unicode = os.getenv
# 2->TODO struct.pack|unpack, does not accept unicode as format string.
# see more: https://python-future.org/stdlib_incompatibilities.html#struct-pack
# to avoid conversion of format string on every struct.pack call, we can monkey patch it here.
if sys.version_info[:3] < (2, 7, 7):
def python_unicode_pack_unpack_wrapper(f):
def _pack_unpack(format_str, *args):
"""wrapper for struct.pack function that converts unicode format string to 'str'"""
binary_format_str = six.ensure_binary(format_str)
return f(binary_format_str, *args)
return _pack_unpack
struct_pack_unicode = python_unicode_pack_unpack_wrapper(struct.pack)
struct_unpack_unicode = python_unicode_pack_unpack_wrapper(struct.unpack)
else:
struct_pack_unicode = struct.pack
struct_unpack_unicode = struct.unpack
def which(executable):
# type: (str) -> Optional[str]
"""
Search for the provided executable in PATH and return path to it if found.
"""
paths = os.environ["PATH"].split(os.pathsep)
for path in paths:
full_path = os.path.join(path, executable)
if os.path.exists(full_path) and os.access(full_path, os.X_OK):
return full_path
return None
def find_executable(executable):
# type: (str) -> Optional[str]
"""
Wrapper around distutils.spawn.find_executable which is not available in some default Python 3
installations where full blown python3-distutils package is not installed.
"""
try:
from distutils.spawn import find_executable as distutils_find_executable
except ImportError:
# Likely Ubuntu 18.04 where python3-distutils package is not present (default behavior)
return which(executable)
return distutils_find_executable(executable)
def subprocess_check_output(cmd, *args, **kwargs):
"""
Wrapper around subprocess.check_output which is not available under Python 2.6.
"""
if sys.version_info < (2, 7, 0):
output = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, *args, **kwargs
).communicate()[0]
else:
output = subprocess.check_output(cmd, *args, **kwargs)
return output
|
apache-2.0
| 6,758,009,555,331,464,000
| 31.909465
| 110
| 0.615481
| false
| 4.034813
| false
| false
| false
|
wiki2014/Learning-Summary
|
alps/cts/apps/CameraITS/tests/inprog/test_burst_sameness_fullres_auto.py
|
1
|
3382
|
# Copyright 2014 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import its.image
import its.device
import its.objects
import its.caps
import os.path
import numpy
import pylab
import matplotlib
import matplotlib.pyplot
def main():
"""Take long bursts of images and check that they're all identical.
Assumes a static scene. Can be used to idenfity if there are sporadic
frames that are processed differently or have artifacts, or if 3A isn't
stable, since this test converges 3A at the start but doesn't lock 3A
throughout capture.
"""
NAME = os.path.basename(__file__).split(".")[0]
BURST_LEN = 6
BURSTS = 2
FRAMES = BURST_LEN * BURSTS
DELTA_THRESH = 0.1
with its.device.ItsSession() as cam:
# Capture at full resolution.
props = cam.get_camera_properties()
its.caps.skip_unless(its.caps.manual_sensor(props) and
its.caps.awb_lock(props))
w,h = its.objects.get_available_output_sizes("yuv", props)[0]
# Converge 3A prior to capture.
cam.do_3a(lock_ae=True, lock_awb=True)
# After 3A has converged, lock AE+AWB for the duration of the test.
req = its.objects.fastest_auto_capture_request(props)
req["android.blackLevel.lock"] = True
req["android.control.awbLock"] = True
req["android.control.aeLock"] = True
# Capture bursts of YUV shots.
# Build a 4D array, which is an array of all RGB images after down-
# scaling them by a factor of 4x4.
imgs = numpy.empty([FRAMES,h/4,w/4,3])
for j in range(BURSTS):
caps = cam.do_capture([req]*BURST_LEN)
for i,cap in enumerate(caps):
n = j*BURST_LEN + i
imgs[n] = its.image.downscale_image(
its.image.convert_capture_to_rgb_image(cap), 4)
# Dump all images.
print "Dumping images"
for i in range(FRAMES):
its.image.write_image(imgs[i], "%s_frame%03d.jpg"%(NAME,i))
# The mean image.
img_mean = imgs.mean(0)
its.image.write_image(img_mean, "%s_mean.jpg"%(NAME))
# Compute the deltas of each image from the mean image; this test
# passes if none of the deltas are large.
print "Computing frame differences"
delta_maxes = []
for i in range(FRAMES):
deltas = (imgs[i] - img_mean).reshape(h*w*3/16)
delta_max_pos = numpy.max(deltas)
delta_max_neg = numpy.min(deltas)
delta_maxes.append(max(abs(delta_max_pos), abs(delta_max_neg)))
max_delta_max = max(delta_maxes)
print "Frame %d has largest diff %f" % (
delta_maxes.index(max_delta_max), max_delta_max)
assert(max_delta_max < DELTA_THRESH)
if __name__ == '__main__':
main()
|
gpl-3.0
| 4,365,159,712,139,811,000
| 34.978723
| 75
| 0.630692
| false
| 3.563751
| false
| false
| false
|
shayn1234/apps-catalog
|
deployment/catalog-ci-jenkins/modules/catalog_ci/files/scripts/generate_names.py
|
1
|
1579
|
#!/usr/bin/python
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
import re
from sys import argv
import yaml
def yaml_to_dict(infile, k):
stream = open(infile, 'r')
rdict = yaml.load(stream)[k]
return rdict
def diff_images_config(images1, images2):
if images1 == images2:
return ''
intersec = [item for item in images1 if item in images2]
sym_diff = [item for item in itertools.chain(
images1, images2) if item not in intersec]
name = ''
d_size = len(sym_diff)
if d_size <= 2:
i = d_size - 1
else:
return ''
if 'name' in sym_diff[i].keys() and 'format' in sym_diff[i].keys():
i_name = re.sub('[(){}<>]', '', sym_diff[i]['name'])
i_type = sym_diff[i]['format']
name = i_name + '.' + i_type
name = name.lower().replace(" ", "_")
return name
if __name__ == '__main__':
if argv[1] == 'glance':
images1 = yaml_to_dict(argv[2], 'images')
images2 = yaml_to_dict(argv[3], 'images')
print(diff_images_config(images1, images2))
|
apache-2.0
| -7,187,242,229,561,227,000
| 30.58
| 75
| 0.631412
| false
| 3.462719
| false
| false
| false
|
aeppert/py-cifsdk
|
test/email/t1.py
|
1
|
2367
|
# -*- coding: utf-8 -*-
msg = """
Delivered-To: support@barely3am.com
Received: by 10.112.40.50 with SMTP id u18csp916705lbk;
Sun, 19 Apr 2015 05:50:04 -0700 (PDT)
X-Received: by 10.42.151.4 with SMTP id c4mr13784232icw.77.1429447803846;
Sun, 19 Apr 2015 05:50:03 -0700 (PDT)
Return-Path: <advertisebz09ua@gmail.com>
Received: from gmail.com ([61.72.137.254])
by mx.google.com with SMTP id s93si13575887ioe.52.2015.04.19.05.50.00
for <support@barely3am.com>;
Sun, 19 Apr 2015 05:50:03 -0700 (PDT)
Received-SPF: softfail (google.com: domain of transitioning advertisebz09ua@gmail.com does not designate 61.72.137.254 as permitted sender) client-ip=61.72.137.254;
Authentication-Results: mx.google.com;
spf=softfail (google.com: domain of transitioning advertisebz09ua@gmail.com does not designate 61.72.137.254 as permitted sender) smtp.mail=advertisebz09ua@gmail.com;
dmarc=fail (p=NONE dis=NONE) header.from=gmail.com
Message-ID: <BE5B7E8D.883B43A2@gmail.com>
Date: Sun, 19 Apr 2015 05:24:33 -0700
Reply-To: "HENRY" <advertisebz09ua@gmail.com>
From: "HENRY" <advertisebz09ua@gmail.com>
User-Agent: Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.19) Gecko/20081209 Thunderbird/2.0.0.19
MIME-Version: 1.0
To: <support@barely3am.com>
Subject: Boost Social Presence with FB posts likes
Content-Type: text/plain;
charset="us-ascii"
Content-Transfer-Encoding: 7bit
Hello,
Boost your Facebook posts with a massive promotion
and gain over 10.000 likes in total towards all your posts.
We can promote up to 20 posts links at a time.
Increase exposure with guaranteed promotion service.
Use this coupon and get another 10% discount on your purchase
==================
10% Coupon = EB2CA
==================
Order today, cheap and guaranteed service:
http://www.socialservices.cn/detail.php?id=9
Regards
HENRY
Â
Unsubscribe option is available on the footer of our website
"""
from cifsdk.email import parse_message
from cifsdk.urls import extract_urls
from pprint import pprint
def test_parse_message():
body = parse_message(msg)
assert type(body) is list
assert body[0].startswith(b'Delivered-To: support@barely3am.com')
def test_email_urls():
body = parse_message(msg)
urls = extract_urls(body[0])
assert 'http://www.socialservices.cn/detail.php?id=9' in urls
|
lgpl-3.0
| -4,987,111,927,963,269,000
| 30.546667
| 173
| 0.724736
| false
| 2.880633
| false
| false
| false
|
lmorillas/catalogoprogramaseducativos
|
lib/extrae_info_wikispaces.py
|
1
|
4799
|
# -*- coding: utf-8 -*-
import urlparse
import re
import json
from amara.bindery import html
from amara.lib import U
import shelve
catsh = shelve.open('catalogo.dat')
def quita_punto(texto):
if texto.endswith('.'):
return texto[:-1]
else: return texto
def dame_texto(texto, inicio, fin):
_ini = texto.find(inicio)
if _ini != -1:
i = _ini +len(inicio)
f = texto[i:].find(fin)
f1 = texto[i:].find('\n\n')
_fin = min(f, f1)
if _fin == -1:
_fin = max(f, f1)
texto = texto[i:i+_fin]
texto = texto.replace (u'\u200d', '').strip()
return texto
def parse_proyecto(url):
item = {}
item['url'] = url
#doc = html.parse(url)
#texto = U(doc.xml_select(u'//div[@class="ws-theme-content-inner"]'))
#catsh[url.encode('utf-8')] = texto
texto = catsh.get(url.encode('utf-8'))
#texto = texto.decode('utf-8')
texto = texto[texto.find("NOMBRE DE LA"):]
nombre = dame_texto(texto, u'NOMBRE DE LA ACTUACIÓN', u'ÓRGANO GESTOR')
item ['label'] = nombre
gestor = dame_texto(texto, u'ÓRGANO GESTOR', u'DESCRIPCIÓN')
if gestor:
gestor = quita_punto(gestor)
if '\n' in gestor:
gestor = gestor.split('\n')
gestor = map(quita_punto, gestor)
item['gestor'] = gestor
descripcion = dame_texto(texto, u'DESCRIPCIÓN', 'DESTINATARIOS')
if descripcion:
item['descripcion'] = descripcion
destinatarios = dame_texto(texto, 'DESTINATARIOS', 'SOLICITUD')
item['destinatarios'] = destinatarios
solicitud = dame_texto(texto, 'SOLICITUD', 'FECHAS' )
item['solicitud'] = solicitud
fechas = dame_texto(texto, 'FECHAS' , u'FINANCIACIÓN')
item['fechas'] = fechas
financiacion = dame_texto(texto, u'FINANCIACIÓN', '\n\n')
if financiacion:
item['financiacion'] = financiacion
masinfo = dame_texto(texto, u'MÁS INFORMACIÓN', '\n\n')
if masinfo:
mas_url = re.search("(?P<url>https?://[^\s]+)", masinfo)
if mas_url:
url = mas_url.group("url")
masinfo = masinfo.replace(url, '<a href="{}" target="_blank">{}</a>'.format(url, url) )
item['masinfo'] = masinfo
return item
f = json.load(open('catacata.json'))
items = f.get('items')
nitems = []
errores = []
for it in items:
url = it.get('url')
if url:
#try:
print '-->', url
res = parse_proyecto(url)
nitems.append(res)
#except:
# print '***', url
# errores.append(url)
#catsh.sync()
catsh.close()
import json
cat = json.load(open('catalogoprog.json'))
items = cat.get('items')
ld = {}
for n in nitems:
ld[n.get('url')] = n
for it in items:
if it.get('type') == 'ta':
n = ld.get(it.get('url'))
if not n:
print '***', it
else:
for k in 'destinatarios fechas gestor solicitud descripcion masinfo'.split():
it[k] = n.get(k)
json.dump(cat, open('catalogoprog.json', 'w'))
'''
gestor = re.compile('RGANO GESTOR</h2>\W*([^<]*)', re.DOTALL)
item['nombre'] = sel.xpath('//h1[@class="pageTitle"]//text()').extract()
item['gestor'] = sel.re(gestor)
item['programa'] = sel.xpath('//h1[@id="toc0"]//text()').extract()
item['gestor'] = sel.xpath()
item['name'] = site.xpath('a/text()').extract()
item['url'] = site.xpath('a/@href').extract()
item['description'] = site.xpath('text()').re('-\s([^\n]*?)\\n')
items.append(item)
yield item
nombre = scrapy.Field()
gestor = scrapy.Field()
descripcion = scrapy.Field()
destinatarios = scrapy.Field()
solicitud = scrapy.Field()
fechas = scrapy.Field()
financiacion = scrapy.Field()
masinfo = scrapy.Field()
apartado = scrapy.Field()
return items
def parse(self, response):
for h3 in response.xpath('//h3').extract():
yield MyItem(title=h3)
for url in response.xpath('//a/@href').extract():
yield scrapy.Request(url, callback=self.parse)
http://catalogo00.wikispaces.com/Reconocimiento+de+buenas+pr%C3%A1cticas+de+educaci%C3%B3n+inclusiva+y+de+convivencia.+Centros+p%C3%BAblicos+-+concertados
http://catalogo00.wikispaces.com/Reconocimiento+de+buenas+pr%C3%A1cticas+de+educaci%C3%B3n+inclusiva+y+de+convivencia.+Centros+p%C3%BAblicos+-+concertados
"http://catalogo1.wikispaces.com/Indice+Educaci%C3%B3n+Inclusiva",
"http://catalogo2.wikispaces.com/Indice+aprender+a+aprender",
"http://catalogo3.wikispaces.com/Indice+Convive+y+Concilia",
"http://catalogo4.wikispaces.com/Indice+excelencia+acad%C3%A9mica",
"http://catalogo5.wikispaces.com/Indice+actuaciones+otros+departamentos",
"http://catalogo6.wikispaces.com/Indice+entidades+privadas",
]
'''
|
apache-2.0
| 9,024,883,634,799,330,000
| 27.855422
| 154
| 0.603549
| false
| 2.686483
| false
| false
| false
|
tzuhsienli/resume-site
|
resume/settings.py
|
1
|
6235
|
# Django settings for resume project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Administrator', 'li.tzuhsien@gmail.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'resumedb', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': 'root',
'PASSWORD': 'ligang',
'HOST': '127.0.0.1', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '3306', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '8ybb^f*xwt6s=5=q5$b$qlrw=yg2y=i)3o4yexun&(&k2jv)*5'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'resume.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'resume.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
'django.contrib.admindocs',
# RESTful service framework supports
'rest_framework',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# A RESTful API configuration
# Any global settings for a REST framework API are kept in a
# single configuration dictionary named REST_FRAMEWORK.
# Start off by adding the following to your settings.py module:
REST_FRAMEWORK = {
# Use hyperlinked styles by default.
# Only used if the `serializer_class` attribute is not set on a view.
'DEFAULT_MODEL_SERIALIZER_CLASS':
'rest_framework.serializers.HyperlinkedModelSerializer',
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly',
'rest_framework.permissions.IsAdminUser'
],
'PAGINATE_BY': 10
}
|
mit
| 4,027,110,781,063,915,000
| 33.832402
| 136
| 0.691259
| false
| 3.760555
| false
| false
| false
|
nehudesi/MSim
|
module/tsutil.py
|
1
|
32434
|
'''
Version: MRT v3.0
Type: Library
Location: C:\MRT3.0\module
Author: Chintan Patel
Email: chintanlike@gmail.com
'''
import math
import datetime as dt
#import numpy as np
import module.qsdateutil as qsdateutil
from math import sqrt
import pandas as pd
from copy import deepcopy
import matplotlib.pyplot as plt
#import random as rand
#import module.DataAccess as da
import module.qsdateutil as du
import numpy as np
def daily(lfFunds):
"""
@summary Computes daily returns centered around 0
@param funds: A time series containing daily fund values
@return an array of daily returns
"""
if type(lfFunds) == type(pd.Series()):
ldt_timestamps = du.getNYSEdays(lfFunds.index[0], lfFunds.index[-1], dt.timedelta(hours=16))
lfFunds = lfFunds.reindex(index=ldt_timestamps, method='ffill')
nds = np.asarray(deepcopy(lfFunds))
s= np.shape(nds)
if len(s)==1:
nds=np.expand_dims(nds,1)
returnize0(nds)
return(nds)
# def daily1(lfFunds):
# """
# @summary Computes daily returns centered around 1
# @param funds: A time series containing daily fund values
# @return an array of daily returns
# """
# nds = np.asarray(deepcopy(lfFunds))
# s= np.shape(nds)
# if len(s)==1:
# nds=np.expand_dims(nds,1)
# returnize1(nds)
# return(nds)
# def monthly(funds):
# """
# @summary Computes monthly returns centered around 0
# @param funds: A time series containing daily fund values
# @return an array of monthly returns
# """
# funds2 = []
# last_last_month = -1
# years = qsdateutil.getYears(funds)
# for year in years:
# months = qsdateutil.getMonths(funds, year)
# for month in months:
# last_this_month = qsdateutil.getLastDay(funds, year, month)
# if last_last_month == -1 :
# last_last_month=qsdateutil.getFirstDay(funds, year, month)
# if type(funds).__name__=='TimeSeries':
# funds2.append(funds[last_this_month]/funds[last_last_month]-1)
# else:
# funds2.append(funds.xs(last_this_month)/funds.xs(last_last_month)-1)
# last_last_month = last_this_month
# return(funds2)
def average_monthly(funds):
"""
@summary Computes average monthly returns centered around 0
@param funds: A time series containing daily fund values
@return an array of average monthly returns
"""
rets = daily(funds)
ret_i = 0
years = qsdateutil.getYears(funds)
averages = []
for year in years:
months = qsdateutil.getMonths(funds, year)
for month in months:
avg = 0
count = 0
days = qsdateutil.getDays(funds, year, month)
for day in days:
avg += rets[ret_i]
ret_i += 1
count += 1
averages.append(float(avg) / count)
return(averages)
def fillforward(nds):
"""
@summary Removes NaNs from a 2D array by scanning forward in the
1st dimension. If a cell is NaN, the value above it is carried forward.
@param nds: the array to fill forward
@return the array is revised in place
"""
for col in range(nds.shape[1]):
for row in range(1, nds.shape[0]):
if math.isnan(nds[row, col]):
nds[row, col] = nds[row-1, col]
def fillbackward(nds):
"""
@summary Removes NaNs from a 2D array by scanning backward in the
1st dimension. If a cell is NaN, the value above it is carried backward.
@param nds: the array to fill backward
@return the array is revised in place
"""
for col in range(nds.shape[1]):
for row in range(nds.shape[0] - 2, -1, -1):
if math.isnan(nds[row, col]):
nds[row, col] = nds[row+1, col]
def returnize0(nds):
"""
@summary Computes stepwise (usually daily) returns relative to 0, where
0 implies no change in value.
@return the array is revised in place
"""
if type(nds) == type(pd.DataFrame()):
nds = (nds / nds.shift(1)) - 1.0
nds = nds.fillna(0.0)
return nds
s= np.shape(nds)
if len(s)==1:
nds=np.expand_dims(nds,1)
nds[1:, :] = (nds[1:, :] / nds[0:-1]) - 1
nds[0, :] = np.zeros(nds.shape[1])
return nds
# def returnize1(nds):
# """
# @summary Computes stepwise (usually daily) returns relative to 1, where
# 1 implies no change in value.
# @param nds: the array to fill backward
# @return the array is revised in place
# """
# if type(nds) == type(pd.DataFrame()):
# nds = nds / nds.shift(1)
# nds = nds.fillna(1.0)
# return nds
#
# s= np.shape(nds)
# if len(s)==1:
# nds=np.expand_dims(nds,1)
# nds[1:, :] = (nds[1:, :]/nds[0:-1])
# nds[0, :] = np.ones(nds.shape[1])
# return nds
# def priceize1(nds):
# """
# @summary Computes stepwise (usually daily) returns relative to 1, where
# 1 implies no change in value.
# @param nds: the array to fill backward
# @return the array is revised in place
# """
#
# nds[0, :] = 100
# for i in range(1, nds.shape[0]):
# nds[i, :] = nds[i-1, :] * nds[i, :]
#
# def logreturnize(nds):
# """
# @summary Computes stepwise (usually daily) logarithmic returns.
# @param nds: the array to fill backward
# @return the array is revised in place
# """
# returnize1(nds)
# nds = np.log(nds)
# return nds
# def get_winning_days( rets):
# """
# @summary Returns the percentage of winning days of the returns.
# @param rets: 1d numpy array or fund list of daily returns (centered on 0)
# @return Percentage of winning days
# """
# negative_rets = []
# for i in rets:
# if(i<0):
# negative_rets.append(i)
# return 100 * (1 - float(len(negative_rets)) / float(len(rets)))
# def get_max_draw_down(ts_vals):
# """
# @summary Returns the max draw down of the returns.
# @param ts_vals: 1d numpy array or fund list
# @return Max draw down
# """
# MDD = 0
# DD = 0
# peak = -99999
# for value in ts_vals:
# if (value > peak):
# peak = value
# else:
# DD = (peak - value) / peak
# if (DD > MDD):
# MDD = DD
# return -1*MDD
def get_sortino_ratio( rets, risk_free=0.00 ):
"""
@summary Returns the daily Sortino ratio of the returns.
@param rets: 1d numpy array or fund list of daily returns (centered on 0)
@param risk_free: risk free return, default is 0%
@return Sortino Ratio, computed off daily returns
"""
rets = np.asarray(rets)
f_mean = np.mean( rets, axis=0 )
negative_rets = rets[rets < 0]
f_dev = np.std( negative_rets, axis=0 )
f_sortino = (f_mean*252 - risk_free) / (f_dev * np.sqrt(252))
return f_sortino
def get_sharpe_ratio( rets, risk_free=0.00 ):
"""
@summary Returns the daily Sharpe ratio of the returns.
@param rets: 1d numpy array or fund list of daily returns (centered on 0)
@param risk_free: risk free returns, default is 0%
@return Annualized rate of return, not converted to percent
"""
f_dev = np.std( rets, axis=0 )
f_mean = np.mean( rets, axis=0 )
f_sharpe = (f_mean *252 - risk_free) / ( f_dev * np.sqrt(252) )
return f_sharpe
# def get_ror_annual( rets ):
# """
# @summary Returns the rate of return annualized. Assumes len(rets) is number of days.
# @param rets: 1d numpy array or list of daily returns
# @return Annualized rate of return, not converted to percent
# """
#
# f_inv = 1.0
# for f_ret in rets:
# f_inv = f_inv * f_ret
#
# f_ror_ytd = f_inv - 1.0
#
# #print ' RorYTD =', f_inv, 'Over days:', len(rets)
#
# return ( (1.0 + f_ror_ytd)**( 1.0/(len(rets)/252.0) ) ) - 1.0
# def getPeriodicRets( dmPrice, sOffset ):
# """
# @summary Reindexes a DataMatrix price array and returns the new periodic returns.
# @param dmPrice: DataMatrix of stock prices
# @param sOffset: Offset string to use, choose from _offsetMap in pandas/core/datetools.py
# e.g. 'EOM', 'WEEKDAY', 'W@FRI', 'A@JAN'. Or use a pandas DateOffset.
# """
#
# # Could possibly use DataMatrix.asfreq here """
# # Use pandas DateRange to create the dates we want, use 4:00 """
# drNewRange = DateRange(dmPrice.index[0], dmPrice.index[-1], timeRule=sOffset)
# drNewRange += DateOffset(hours=16)
#
# dmPrice = dmPrice.reindex( drNewRange, method='ffill' )
#
# returnize1( dmPrice.values )
#
# # Do not leave return of 1.0 for first time period: not accurate """
# return dmPrice[1:]
def getReindexedRets( rets, l_period ):
"""
@summary Reindexes returns using the cumulative product. E.g. if returns are 1.5 and 1.5, a period of 2 will
produce a 2-day return of 2.25. Note, these must be returns centered around 1.
@param rets: Daily returns of the various stocks (using returnize1)
@param l_period: New target period.
@note: Note that this function does not track actual weeks or months, it only approximates with trading days.
You can use 5 for week, or 21 for month, etc.
"""
naCumData = np.cumprod(rets, axis=0)
lNewRows =(rets.shape[0]-1) / (l_period)
# We compress data into height / l_period + 1 new rows """
for i in range( lNewRows ):
lCurInd = -1 - i*l_period
# Just hold new data in same array"""
# new return is cumprod on day x / cumprod on day x-l_period """
start=naCumData[lCurInd - l_period, :]
naCumData[-1 - i, :] = naCumData[lCurInd, :] / start
# Select new returns from end of cumulative array """
return naCumData[-lNewRows:, ]
def getOptPort(rets, f_target, l_period=1, naLower=None, naUpper=None, lNagDebug=0):
"""
@summary Returns the Markowitz optimum portfolio for a specific return.
@param rets: Daily returns of the various stocks (using returnize1)
@param f_target: Target return, i.e. 0.04 = 4% per period
@param l_period: Period to compress the returns to, e.g. 7 = weekly
@param naLower: List of floats which corresponds to lower portfolio% for each stock
@param naUpper: List of floats which corresponds to upper portfolio% for each stock
@return tuple: (weights of portfolio, min possible return, max possible return)
"""
# Attempt to import library """
try:
pass
import nagint as nag
except ImportError:
print 'Could not import NAG library'
print 'make sure nagint.so is in your python path'
return ([], 0, 0)
# Get number of stocks """
lStocks = rets.shape[1]
# If period != 1 we need to restructure the data """
if( l_period != 1 ):
rets = getReindexedRets( rets, l_period)
# Calculate means and covariance """
naAvgRets = np.average( rets, axis=0 )
naCov = np.cov( rets, rowvar=False )
# Special case for None == f_target"""
# simply return average returns and cov """
if( f_target is None ):
return naAvgRets, np.std(rets, axis=0)
# Calculate upper and lower limits of variables as well as constraints """
if( naUpper is None ):
naUpper = np.ones( lStocks ) # max portfolio % is 1
if( naLower is None ):
naLower = np.zeros( lStocks ) # min is 0, set negative for shorting
# Two extra constraints for linear conditions"""
# result = desired return, and sum of weights = 1 """
naUpper = np.append( naUpper, [f_target, 1.0] )
naLower = np.append( naLower, [f_target, 1.0] )
# Initial estimate of portfolio """
naInitial = np.array([1.0/lStocks]*lStocks)
# Set up constraints matrix"""
# composed of expected returns in row one, unity row in row two """
naConstraints = np.vstack( (naAvgRets, np.ones(lStocks)) )
# Get portfolio weights, last entry in array is actually variance """
try:
naReturn = nag.optPort( naConstraints, naLower, naUpper, \
naCov, naInitial, lNagDebug )
except RuntimeError:
print 'NAG Runtime error with target: %.02lf'%(f_target)
return ( naInitial, sqrt( naCov[0][0] ) )
#return semi-junk to not mess up the rest of the plot
# Calculate stdev of entire portfolio to return"""
# what NAG returns is slightly different """
fPortDev = np.std( np.dot(rets, naReturn[0,0:-1]) )
# Show difference between above stdev and sqrt NAG covariance"""
# possibly not taking correlation into account """
#print fPortDev / sqrt(naReturn[0, -1])
# Return weights and stdDev of portfolio."""
# note again the last value of naReturn is NAG's reported variance """
return (naReturn[0, 0:-1], fPortDev)
def OptPort( naData, fTarget, naLower=None, naUpper=None, naExpected=None, s_type = "long"):
"""
@summary Returns the Markowitz optimum portfolio for a specific return.
@param naData: Daily returns of the various stocks (using returnize1)
@param fTarget: Target return, i.e. 0.04 = 4% per period
@param lPeriod: Period to compress the returns to, e.g. 7 = weekly
@param naLower: List of floats which corresponds to lower portfolio% for each stock
@param naUpper: List of floats which corresponds to upper portfolio% for each stock
@return tuple: (weights of portfolio, min possible return, max possible return)
"""
''' Attempt to import library '''
try:
pass
from cvxopt import matrix
from cvxopt.blas import dot
from cvxopt.solvers import qp, options
except ImportError:
print 'Could not import CVX library'
raise
''' Get number of stocks '''
length = naData.shape[1]
b_error = False
naLower = deepcopy(naLower)
naUpper = deepcopy(naUpper)
naExpected = deepcopy(naExpected)
# Assuming AvgReturns as the expected returns if parameter is not specified
if (naExpected==None):
naExpected = np.average( naData, axis=0 )
na_signs = np.sign(naExpected)
indices, = np.where(na_signs == 0)
na_signs[indices] = 1
if s_type == "long":
na_signs = np.ones(len(na_signs))
elif s_type == "short":
na_signs = np.ones(len(na_signs))*(-1)
naData = na_signs*naData
naExpected = na_signs*naExpected
# Covariance matrix of the Data Set
naCov=np.cov(naData, rowvar=False)
# If length is one, just return 100% single symbol
if length == 1:
return (list(na_signs), np.std(naData, axis=0)[0], False)
if length == 0:
return ([], [0], False)
# If we have 0/1 "free" equity we can't optimize
# We just use limits since we are stuck with 0 degrees of freedom
''' Special case for None == fTarget, simply return average returns and cov '''
if( fTarget is None ):
return (naExpected, np.std(naData, axis=0), b_error)
# Upper bound of the Weights of a equity, If not specified, assumed to be 1.
if(naUpper is None):
naUpper= np.ones(length)
# Lower bound of the Weights of a equity, If not specified assumed to be 0 (No shorting case)
if(naLower is None):
naLower= np.zeros(length)
if sum(naLower) == 1:
fPortDev = np.std(np.dot(naData, naLower))
return (naLower, fPortDev, False)
if sum(naUpper) == 1:
fPortDev = np.std(np.dot(naData, naUpper))
return (naUpper, fPortDev, False)
naFree = naUpper != naLower
if naFree.sum() <= 1:
lnaPortfolios = naUpper.copy()
# If there is 1 free we need to modify it to make the total
# Add up to 1
if naFree.sum() == 1:
f_rest = naUpper[~naFree].sum()
lnaPortfolios[naFree] = 1.0 - f_rest
lnaPortfolios = na_signs * lnaPortfolios
fPortDev = np.std(np.dot(naData, lnaPortfolios))
return (lnaPortfolios, fPortDev, False)
# Double the covariance of the diagonal elements for calculating risk.
for i in range(length):
naCov[i][i]=2*naCov[i][i]
# Note, returns are modified to all be long from here on out
(fMin, fMax) = getRetRange(False, naLower, naUpper, naExpected, "long")
#print (fTarget, fMin, fMax)
if fTarget<fMin or fTarget>fMax:
print "<<<(i) Target not achievable..", fTarget, fMin, fMax
b_error = True
naLower = naLower*(-1)
# Setting up the parameters for the CVXOPT Library, it takes inputs in Matrix format.
'''
The Risk minimization problem is a standard Quadratic Programming problem according to the Markowitz Theory.
'''
S=matrix(naCov)
#pbar=matrix(naExpected)
naLower.shape=(length,1)
naUpper.shape=(length,1)
naExpected.shape = (1,length)
zeo=matrix(0.0,(length,1))
I = np.eye(length)
minusI=-1*I
G=matrix(np.vstack((I, minusI)))
h=matrix(np.vstack((naUpper, naLower)))
ones=matrix(1.0,(1,length))
A=matrix(np.vstack((naExpected, ones)))
b=matrix([float(fTarget),1.0])
# Optional Settings for CVXOPT
options['show_progress'] = False
options['abstol']=1e-25
options['reltol']=1e-24
options['feastol']=1e-25
# Optimization Calls
# Optimal Portfolio
try:
lnaPortfolios = qp(S, -zeo, G, h, A, b)['x']
except:
b_error = True
if b_error == True:
print "<<<(i) Optimization not Possible"
na_port = naLower*-1
if sum(na_port) < 1:
if sum(naUpper) == 1:
na_port = naUpper
else:
i=0
while(sum(na_port)<1 and i<25):
naOrder = naUpper - na_port
i = i+1
indices = np.where(naOrder > 0)
na_port[indices]= na_port[indices] + (1-sum(na_port))/len(indices[0])
naOrder = naUpper - na_port
indices = np.where(naOrder < 0)
na_port[indices]= naUpper[indices]
lnaPortfolios = matrix(na_port)
lnaPortfolios = (na_signs.reshape(-1,1) * lnaPortfolios).reshape(-1)
# Expected Return of the Portfolio
# lfReturn = dot(pbar, lnaPortfolios)
# Risk of the portfolio
fPortDev = np.std(np.dot(naData, lnaPortfolios))
return (lnaPortfolios, fPortDev, b_error)
def getRetRange( rets, naLower, naUpper, naExpected = "False", s_type = "long"):
"""
@summary Returns the range of possible returns with upper and lower bounds on the portfolio participation
@param rets: Expected returns
@param naLower: List of lower percentages by stock
@param naUpper: List of upper percentages by stock
@return tuple containing (fMin, fMax)
"""
# Calculate theoretical minimum and maximum theoretical returns """
fMin = 0
fMax = 0
rets = deepcopy(rets)
if naExpected == "False":
naExpected = np.average( rets, axis=0 )
na_signs = np.sign(naExpected)
indices, = np.where(na_signs == 0)
na_signs[indices] = 1
if s_type == "long":
na_signs = np.ones(len(na_signs))
elif s_type == "short":
na_signs = np.ones(len(na_signs))*(-1)
rets = na_signs*rets
naExpected = na_signs*naExpected
naSortInd = naExpected.argsort()
# First add the lower bounds on portfolio participation """
for i, fRet in enumerate(naExpected):
fMin = fMin + fRet*naLower[i]
fMax = fMax + fRet*naLower[i]
# Now calculate minimum returns"""
# allocate the max possible in worst performing equities """
# Subtract min since we have already counted it """
naUpperAdd = naUpper - naLower
fTotalPercent = np.sum(naLower[:])
for i, lInd in enumerate(naSortInd):
fRetAdd = naUpperAdd[lInd] * naExpected[lInd]
fTotalPercent = fTotalPercent + naUpperAdd[lInd]
fMin = fMin + fRetAdd
# Check if this additional percent puts us over the limit """
if fTotalPercent > 1.0:
fMin = fMin - naExpected[lInd] * (fTotalPercent - 1.0)
break
# Repeat for max, just reverse the sort, i.e. high to low """
naUpperAdd = naUpper - naLower
fTotalPercent = np.sum(naLower[:])
for i, lInd in enumerate(naSortInd[::-1]):
fRetAdd = naUpperAdd[lInd] * naExpected[lInd]
fTotalPercent = fTotalPercent + naUpperAdd[lInd]
fMax = fMax + fRetAdd
# Check if this additional percent puts us over the limit """
if fTotalPercent > 1.0:
fMax = fMax - naExpected[lInd] * (fTotalPercent - 1.0)
break
return (fMin, fMax)
def _create_dict(df_rets, lnaPortfolios):
allocations = {}
for i, sym in enumerate(df_rets.columns):
allocations[sym] = lnaPortfolios[i]
return allocations
def optimizePortfolio(df_rets, list_min, list_max, list_price_target,
target_risk, direction="long"):
naLower = np.array(list_min)
naUpper = np.array(list_max)
naExpected = np.array(list_price_target)
b_same_flag = np.all( naExpected == naExpected[0])
if b_same_flag and (naExpected[0] == 0):
naExpected = naExpected + 0.1
if b_same_flag:
na_randomness = np.ones(naExpected.shape)
target_risk = 0
for i in range(len(na_randomness)):
if i%2 ==0:
na_randomness[i] = -1
naExpected = naExpected + naExpected*0.0000001*na_randomness
(fMin, fMax) = getRetRange( df_rets.values, naLower, naUpper,
naExpected, direction)
# Try to avoid intractible endpoints due to rounding errors """
fMin += abs(fMin) * 0.00000000001
fMax -= abs(fMax) * 0.00000000001
if target_risk == 1:
(naPortWeights, fPortDev, b_error) = OptPort( df_rets.values, fMax, naLower, naUpper, naExpected, direction)
allocations = _create_dict(df_rets, naPortWeights)
return {'allocations': allocations, 'std_dev': fPortDev, 'expected_return': fMax, 'error': b_error}
fStep = (fMax - fMin) / 50.0
lfReturn = [fMin + x * fStep for x in range(51)]
lfStd = []
lnaPortfolios = []
for fTarget in lfReturn:
(naWeights, fStd, b_error) = OptPort( df_rets.values, fTarget, naLower, naUpper, naExpected, direction)
if b_error == False:
lfStd.append(fStd)
lnaPortfolios.append( naWeights )
else:
# Return error on ANY failed optimization
allocations = _create_dict(df_rets, np.zeros(df_rets.shape[1]))
return {'allocations': allocations, 'std_dev': 0.0,
'expected_return': fMax, 'error': True}
if len(lfStd) == 0:
(naPortWeights, fPortDev, b_error) = OptPort( df_rets.values, fMax, naLower, naUpper, naExpected, direction)
allocations = _create_dict(df_rets, naPortWeights)
return {'allocations': allocations, 'std_dev': fPortDev, 'expected_return': fMax, 'error': True}
f_return = lfReturn[lfStd.index(min(lfStd))]
if target_risk == 0:
naPortWeights=lnaPortfolios[lfStd.index(min(lfStd))]
allocations = _create_dict(df_rets, naPortWeights)
return {'allocations': allocations, 'std_dev': min(lfStd), 'expected_return': f_return, 'error': False}
# If target_risk = 0.5, then return the one with maximum sharpe
if target_risk == 0.5:
lf_return_new = np.array(lfReturn)
lf_std_new = np.array(lfStd)
lf_std_new = lf_std_new[lf_return_new >= f_return]
lf_return_new = lf_return_new[lf_return_new >= f_return]
na_sharpe = lf_return_new / lf_std_new
i_index_max_sharpe, = np.where(na_sharpe == max(na_sharpe))
i_index_max_sharpe = i_index_max_sharpe[0]
fTarget = lf_return_new[i_index_max_sharpe]
(naPortWeights, fPortDev, b_error) = OptPort(df_rets.values, fTarget, naLower, naUpper, naExpected, direction)
allocations = _create_dict(df_rets, naPortWeights)
return {'allocations': allocations, 'std_dev': fPortDev, 'expected_return': fTarget, 'error': b_error}
# Otherwise try to hit custom target between 0-1 min-max return
fTarget = f_return + ((fMax - f_return) * target_risk)
(naPortWeights, fPortDev, b_error) = OptPort( df_rets.values, fTarget, naLower, naUpper, naExpected, direction)
allocations = _create_dict(df_rets, naPortWeights)
return {'allocations': allocations, 'std_dev': fPortDev, 'expected_return': fTarget, 'error': b_error}
def getFrontier( rets, lRes=100, fUpper=0.2, fLower=0.00):
"""
@summary Generates an efficient frontier based on average returns.
@param rets: Array of returns to use
@param lRes: Resolution of the curve, default=100
@param fUpper: Upper bound on portfolio percentage
@param fLower: Lower bound on portfolio percentage
@return tuple containing (lf_ret, lfStd, lnaPortfolios)
lf_ret: List of returns provided by each point
lfStd: list of standard deviations provided by each point
lnaPortfolios: list of numpy arrays containing weights for each portfolio
"""
# Limit/enforce percent participation """
naUpper = np.ones(rets.shape[1]) * fUpper
naLower = np.ones(rets.shape[1]) * fLower
(fMin, fMax) = getRetRange( rets, naLower, naUpper )
# Try to avoid intractible endpoints due to rounding errors """
fMin *= 1.0000001
fMax *= 0.9999999
# Calculate target returns from min and max """
lf_ret = []
for i in range(lRes):
lf_ret.append( (fMax - fMin) * i / (lRes - 1) + fMin )
lfStd = []
lnaPortfolios = []
# Call the function lRes times for the given range, use 1 for period """
for f_target in lf_ret:
(naWeights, fStd) = getOptPort( rets, f_target, 1, \
naUpper=naUpper, naLower=naLower )
lfStd.append(fStd)
lnaPortfolios.append( naWeights )
# plot frontier """
#plt.plot( lfStd, lf_ret )
plt.plot( np.std( rets, axis=0 ), np.average( rets, axis=0 ), \
'g+', markersize=10 )
#plt.show()"""
return (lf_ret, lfStd, lnaPortfolios)
# def stockFilter( dmPrice, dmVolume, fNonNan=0.95, fPriceVolume=100*1000 ):
# """
# @summary Returns the list of stocks filtered based on various criteria.
# @param dmPrice: DataMatrix of stock prices
# @param dmVolume: DataMatrix of stock volumes
# @param fNonNan: Optional non-nan percent, default is .95
# @param fPriceVolume: Optional price*volume, default is 100,000
# @return list of stocks which meet the criteria
# """
#
# lsRetStocks = list( dmPrice.columns )
#
# for sStock in dmPrice.columns:
# fValid = 0.0
# print sStock
# # loop through all dates """
# for dtDate in dmPrice.index:
# # Count null (nan/inf/etc) values """
# fPrice = dmPrice[sStock][dtDate]
# if( not isnull(fPrice) ):
# fValid = fValid + 1
# # else test price volume """
# fVol = dmVolume[sStock][dtDate]
# if( not isnull(fVol) and fVol * fPrice < fPriceVolume ):
# lsRetStocks.remove( sStock )
# break
#
# # Remove if too many nan values """
# if( fValid / len(dmPrice.index) < fNonNan and sStock in lsRetStocks ):
# lsRetStocks.remove( sStock )
#
# return lsRetStocks
#
#
# def getRandPort( lNum, dtStart=None, dtEnd=None, lsStocks=None,\
# dmPrice=None, dmVolume=None, bFilter=True, fNonNan=0.95,\
# fPriceVolume=100*1000, lSeed=None ):
# """
# @summary Returns a random portfolio based on certain criteria.
# @param lNum: Number of stocks to be included
# @param dtStart: Start date for portfolio
# @param dtEnd: End date for portfolio
# @param lsStocks: Optional list of ticker symbols, if not provided all symbols will be used
# @param bFilter: If False, stocks are not filtered by price or volume data, simply return random Portfolio.
# @param dmPrice: Optional price data, if not provided, data access will be queried
# @param dmVolume: Optional volume data, if not provided, data access will be queried
# @param fNonNan: Optional non-nan percent for filter, default is .95
# @param fPriceVolume: Optional price*volume for filter, default is 100,000
# @warning: Does not work for all sets of optional inputs, e.g. if you don't include dtStart, dtEnd, you need
# to include dmPrice/dmVolume
# @return list of stocks which meet the criteria
# """
#
# if( lsStocks is None ):
# if( dmPrice is None and dmVolume is None ):
# norObj = da.DataAccess('Norgate')
# lsStocks = norObj.get_all_symbols()
# elif( not dmPrice is None ):
# lsStocks = list(dmPrice.columns)
# else:
# lsStocks = list(dmVolume.columns)
#
# if( dmPrice is None and dmVolume is None and bFilter == True ):
# norObj = da.DataAccess('Norgate')
# ldtTimestamps = du.getNYSEdays( dtStart, dtEnd, dt.timedelta(hours=16) )
#
# # if dmPrice and dmVol are provided then we don't query it every time """
# bPullPrice = False
# bPullVol = False
# if( dmPrice is None ):
# bPullPrice = True
# if( dmVolume is None ):
# bPullVol = True
#
# # Default seed (none) uses system clock """
# rand.seed(lSeed)
# lsRetStocks = []
#
# # Loop until we have enough randomly selected stocks """
# llRemainingIndexes = range(0,len(lsStocks))
# lsValid = None
# while( len(lsRetStocks) != lNum ):
#
# lsCheckStocks = []
# for i in range( lNum - len(lsRetStocks) ):
# lRemaining = len(llRemainingIndexes)
# if( lRemaining == 0 ):
# print 'Error in getRandPort: ran out of stocks'
# return lsRetStocks
#
# # Pick a stock and remove it from the list of remaining stocks """
# lPicked = rand.randint(0, lRemaining-1)
# lsCheckStocks.append( lsStocks[ llRemainingIndexes.pop(lPicked) ] )
#
# # If bFilter is false"""
# # simply return our first list of stocks, don't check prive/vol """
# if( not bFilter ):
# return sorted(lsCheckStocks)
#
#
# # Get data if needed """
# if( bPullPrice ):
# dmPrice = norObj.get_data( ldtTimestamps, lsCheckStocks, 'close' )
#
# # Get data if needed """
# if( bPullVol ):
# dmVolume = norObj.get_data(ldtTimestamps, lsCheckStocks, 'volume' )
#
# # Only query this once if data is provided"""
# # else query every time with new data """
# if( lsValid is None or bPullVol or bPullPrice ):
# lsValid = stockFilter(dmPrice, dmVolume, fNonNan, fPriceVolume)
#
# for sAdd in lsValid:
# if sAdd in lsCheckStocks:
# lsRetStocks.append( sAdd )
#
# return sorted(lsRetStocks)
|
agpl-3.0
| -8,922,782,742,476,446,000
| 35.452656
| 118
| 0.588981
| false
| 3.292792
| false
| false
| false
|
klmitch/nova
|
nova/tests/unit/scheduler/test_utils.py
|
1
|
80648
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import mock
import os_resource_classes as orc
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
from nova import context as nova_context
from nova import exception
from nova import objects
from nova.scheduler.client import report
from nova.scheduler import utils
from nova import test
from nova.tests.unit import fake_instance
from nova.tests.unit.scheduler import fakes
class FakeResourceRequest(object):
"""A fake of ``nova.scheduler.utils.ResourceRequest``.
Allows us to assert that various properties of a real ResourceRequest
object are set as we'd like them to be.
"""
def __init__(self):
self._rg_by_id = {}
self._group_policy = None
self._limit = 1000
class TestUtilsBase(test.NoDBTestCase):
def setUp(self):
super(TestUtilsBase, self).setUp()
self.context = nova_context.get_admin_context()
self.mock_host_manager = mock.Mock()
def assertResourceRequestsEqual(self, expected, observed):
self.assertEqual(expected._limit, observed._limit)
self.assertEqual(expected._group_policy, observed._group_policy)
ex_by_id = expected._rg_by_id
ob_by_id = observed._rg_by_id
self.assertEqual(set(ex_by_id), set(ob_by_id))
for ident in ex_by_id:
self.assertEqual(vars(ex_by_id[ident]), vars(ob_by_id[ident]))
@ddt.ddt
class TestUtils(TestUtilsBase):
def _test_resources_from_request_spec(self, expected, flavor, image=None):
if image is None:
image = objects.ImageMeta(properties=objects.ImageMetaProps())
fake_spec = objects.RequestSpec(flavor=flavor, image=image)
resources = utils.resources_from_request_spec(
self.context, fake_spec, self.mock_host_manager)
self.assertResourceRequestsEqual(expected, resources)
return resources
def test_resources_from_request_spec_flavor_only(self):
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=10,
ephemeral_gb=5,
swap=0)
expected_resources = FakeResourceRequest()
expected_resources._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 15,
}
)
self._test_resources_from_request_spec(expected_resources, flavor)
def test_resources_from_request_spec_flavor_req_traits(self):
flavor = objects.Flavor(
vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0,
extra_specs={'trait:CUSTOM_FLAVOR_TRAIT': 'required'})
expected_resources = FakeResourceRequest()
expected_resources._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 15,
},
required_traits=set(['CUSTOM_FLAVOR_TRAIT'])
)
resources = self._test_resources_from_request_spec(
expected_resources, flavor)
expected_result = set(['CUSTOM_FLAVOR_TRAIT'])
self.assertEqual(expected_result, resources.all_required_traits)
def test_resources_from_request_spec_flavor_and_image_traits(self):
image = objects.ImageMeta.from_dict({
'properties': {
'trait:CUSTOM_IMAGE_TRAIT1': 'required',
'trait:CUSTOM_IMAGE_TRAIT2': 'required',
},
'id': 'c8b1790e-a07d-4971-b137-44f2432936cd',
})
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=10,
ephemeral_gb=5,
swap=0,
extra_specs={
'trait:CUSTOM_FLAVOR_TRAIT': 'required',
'trait:CUSTOM_IMAGE_TRAIT2': 'required'})
expected_resources = FakeResourceRequest()
expected_resources._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 15,
},
required_traits={
# trait:CUSTOM_IMAGE_TRAIT2 is defined in both extra_specs and
# image metadata. We get a union of both.
'CUSTOM_IMAGE_TRAIT1',
'CUSTOM_IMAGE_TRAIT2',
'CUSTOM_FLAVOR_TRAIT',
}
)
self._test_resources_from_request_spec(expected_resources, flavor,
image)
def test_resources_from_request_spec_flavor_forbidden_trait(self):
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=10,
ephemeral_gb=5,
swap=0,
extra_specs={
'trait:CUSTOM_FLAVOR_TRAIT': 'forbidden'})
expected_resources = FakeResourceRequest()
expected_resources._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 15,
},
forbidden_traits={
'CUSTOM_FLAVOR_TRAIT',
}
)
self._test_resources_from_request_spec(expected_resources, flavor)
def test_resources_from_request_spec_with_no_disk(self):
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=0,
ephemeral_gb=0,
swap=0)
expected_resources = FakeResourceRequest()
expected_resources._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 1,
'MEMORY_MB': 1024,
}
)
self._test_resources_from_request_spec(expected_resources, flavor)
def test_get_resources_from_request_spec_custom_resource_class(self):
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=10,
ephemeral_gb=5,
swap=0,
extra_specs={"resources:CUSTOM_TEST_CLASS": 1})
expected_resources = FakeResourceRequest()
expected_resources._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
"VCPU": 1,
"MEMORY_MB": 1024,
"DISK_GB": 15,
"CUSTOM_TEST_CLASS": 1,
}
)
self._test_resources_from_request_spec(expected_resources, flavor)
def test_get_resources_from_request_spec_override_flavor_amounts(self):
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=10,
ephemeral_gb=5,
swap=0,
extra_specs={
"resources:VCPU": 99,
"resources:MEMORY_MB": 99,
"resources:DISK_GB": 99})
expected_resources = FakeResourceRequest()
expected_resources._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
"VCPU": 99,
"MEMORY_MB": 99,
"DISK_GB": 99,
}
)
self._test_resources_from_request_spec(expected_resources, flavor)
def test_get_resources_from_request_spec_remove_flavor_amounts(self):
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=10,
ephemeral_gb=5,
swap=0,
extra_specs={
"resources:VCPU": 0,
"resources:DISK_GB": 0})
expected_resources = FakeResourceRequest()
expected_resources._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
"MEMORY_MB": 1024,
}
)
self._test_resources_from_request_spec(expected_resources, flavor)
def test_get_resources_from_request_spec_vgpu(self):
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=10,
ephemeral_gb=0,
swap=0,
extra_specs={
"resources:VGPU": 1,
"resources:VGPU_DISPLAY_HEAD": 1})
expected_resources = FakeResourceRequest()
expected_resources._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
"VCPU": 1,
"MEMORY_MB": 1024,
"DISK_GB": 10,
"VGPU": 1,
"VGPU_DISPLAY_HEAD": 1,
}
)
self._test_resources_from_request_spec(expected_resources, flavor)
def test_get_resources_from_request_spec_bad_std_resource_class(self):
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=10,
ephemeral_gb=5,
swap=0,
extra_specs={
"resources:DOESNT_EXIST": 0})
fake_spec = objects.RequestSpec(flavor=flavor)
with mock.patch("nova.objects.request_spec.LOG.warning") as mock_log:
utils.resources_from_request_spec(
self.context, fake_spec, self.mock_host_manager)
mock_log.assert_called_once()
args = mock_log.call_args[0]
self.assertEqual(args[0], "Received an invalid ResourceClass "
"'%(key)s' in extra_specs.")
self.assertEqual(args[1], {"key": "DOESNT_EXIST"})
def test_get_resources_from_request_spec_granular(self):
flavor = objects.Flavor(
vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=0, swap=0,
extra_specs={'resources1:VGPU': '1',
'resources1:VGPU_DISPLAY_HEAD': '2',
# Replace
'resources3:VCPU': '2',
# Stay separate (don't sum)
'resources42:SRIOV_NET_VF': '1',
'resources24:SRIOV_NET_VF': '2',
# Ignore
'some:bogus': 'value',
# Custom in the unnumbered group (merge with DISK_GB)
'resources:CUSTOM_THING': '123',
# Traits make it through
'trait3:CUSTOM_SILVER': 'required',
'trait3:CUSTOM_GOLD': 'required',
# Delete standard
'resources86:MEMORY_MB': '0',
# Standard and custom zeroes don't make it through
'resources:IPV4_ADDRESS': '0',
'resources:CUSTOM_FOO': '0',
# Bogus values don't make it through
'resources1:MEMORY_MB': 'bogus',
'group_policy': 'none'})
expected_resources = FakeResourceRequest()
expected_resources._group_policy = 'none'
expected_resources._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
'DISK_GB': 10,
'CUSTOM_THING': 123,
}
)
expected_resources._rg_by_id['1'] = objects.RequestGroup(
requester_id='1',
resources={
'VGPU': 1,
'VGPU_DISPLAY_HEAD': 2,
}
)
expected_resources._rg_by_id['3'] = objects.RequestGroup(
requester_id='3',
resources={
'VCPU': 2,
},
required_traits={
'CUSTOM_GOLD',
'CUSTOM_SILVER',
}
)
expected_resources._rg_by_id['24'] = objects.RequestGroup(
requester_id='24',
resources={
'SRIOV_NET_VF': 2,
},
)
expected_resources._rg_by_id['42'] = objects.RequestGroup(
requester_id='42',
resources={
'SRIOV_NET_VF': 1,
}
)
rr = self._test_resources_from_request_spec(expected_resources, flavor)
expected_querystring = (
'group_policy=none&'
'limit=1000&'
'required3=CUSTOM_GOLD%2CCUSTOM_SILVER&'
'resources=CUSTOM_THING%3A123%2CDISK_GB%3A10&'
'resources1=VGPU%3A1%2CVGPU_DISPLAY_HEAD%3A2&'
'resources24=SRIOV_NET_VF%3A2&'
'resources3=VCPU%3A2&'
'resources42=SRIOV_NET_VF%3A1'
)
self.assertEqual(expected_querystring, rr.to_querystring())
def test_all_required_traits(self):
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=10,
ephemeral_gb=5,
swap=0,
extra_specs={
'trait:HW_CPU_X86_SSE': 'required',
'trait:HW_CPU_X86_AVX': 'required',
'trait:HW_CPU_X86_AVX2': 'forbidden'})
expected_resources = FakeResourceRequest()
expected_resources._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 15,
},
required_traits={
'HW_CPU_X86_SSE',
'HW_CPU_X86_AVX'
},
forbidden_traits={
'HW_CPU_X86_AVX2'
}
)
resource = self._test_resources_from_request_spec(expected_resources,
flavor)
expected_result = {'HW_CPU_X86_SSE', 'HW_CPU_X86_AVX'}
self.assertEqual(expected_result,
resource.all_required_traits)
def test_resources_from_request_spec_aggregates(self):
destination = objects.Destination()
flavor = objects.Flavor(vcpus=1, memory_mb=1024,
root_gb=1, ephemeral_gb=0,
swap=0)
reqspec = objects.RequestSpec(flavor=flavor,
requested_destination=destination)
destination.require_aggregates(['foo', 'bar'])
req = utils.resources_from_request_spec(
self.context, reqspec, self.mock_host_manager)
self.assertEqual([['foo', 'bar']],
req.get_request_group(None).aggregates)
destination.require_aggregates(['baz'])
req = utils.resources_from_request_spec(
self.context, reqspec, self.mock_host_manager)
self.assertEqual([['foo', 'bar'], ['baz']],
req.get_request_group(None).aggregates)
def test_resources_from_request_spec_no_aggregates(self):
flavor = objects.Flavor(vcpus=1, memory_mb=1024,
root_gb=1, ephemeral_gb=0,
swap=0)
reqspec = objects.RequestSpec(flavor=flavor)
req = utils.resources_from_request_spec(
self.context, reqspec, self.mock_host_manager)
self.assertEqual([], req.get_request_group(None).aggregates)
reqspec.requested_destination = None
req = utils.resources_from_request_spec(
self.context, reqspec, self.mock_host_manager)
self.assertEqual([], req.get_request_group(None).aggregates)
reqspec.requested_destination = objects.Destination()
req = utils.resources_from_request_spec(
self.context, reqspec, self.mock_host_manager)
self.assertEqual([], req.get_request_group(None).aggregates)
reqspec.requested_destination.aggregates = None
req = utils.resources_from_request_spec(
self.context, reqspec, self.mock_host_manager)
self.assertEqual([], req.get_request_group(None).aggregates)
def test_resources_from_request_spec_forbidden_aggregates(self):
flavor = objects.Flavor(vcpus=1, memory_mb=1024,
root_gb=1, ephemeral_gb=0,
swap=0)
reqspec = objects.RequestSpec(
flavor=flavor,
requested_destination=objects.Destination(
forbidden_aggregates=set(['foo', 'bar'])))
req = utils.resources_from_request_spec(self.context, reqspec,
self.mock_host_manager)
self.assertEqual(set(['foo', 'bar']),
req.get_request_group(None).forbidden_aggregates)
def test_resources_from_request_spec_no_forbidden_aggregates(self):
flavor = objects.Flavor(vcpus=1, memory_mb=1024,
root_gb=1, ephemeral_gb=0,
swap=0)
reqspec = objects.RequestSpec(flavor=flavor)
req = utils.resources_from_request_spec(
self.context, reqspec, self.mock_host_manager)
self.assertEqual(set([]), req.get_request_group(None).
forbidden_aggregates)
reqspec.requested_destination = None
req = utils.resources_from_request_spec(
self.context, reqspec, self.mock_host_manager)
self.assertEqual(set([]), req.get_request_group(None).
forbidden_aggregates)
reqspec.requested_destination = objects.Destination()
req = utils.resources_from_request_spec(
self.context, reqspec, self.mock_host_manager)
self.assertEqual(set([]), req.get_request_group(None).
forbidden_aggregates)
reqspec.requested_destination.forbidden_aggregates = None
req = utils.resources_from_request_spec(
self.context, reqspec, self.mock_host_manager)
self.assertEqual(set([]), req.get_request_group(None).
forbidden_aggregates)
def test_process_extra_specs_granular_called(self):
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=10,
ephemeral_gb=5,
swap=0,
extra_specs={"resources:CUSTOM_TEST_CLASS": 1})
fake_spec = objects.RequestSpec(flavor=flavor)
# just call this to make sure things don't explode
utils.resources_from_request_spec(
self.context, fake_spec, self.mock_host_manager)
def test_process_extra_specs_granular_not_called(self):
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=10,
ephemeral_gb=5,
swap=0)
fake_spec = objects.RequestSpec(flavor=flavor)
# just call this to make sure things don't explode
utils.resources_from_request_spec(
self.context, fake_spec, self.mock_host_manager)
def test_process_missing_extra_specs_value(self):
flavor = objects.Flavor(
vcpus=1,
memory_mb=1024,
root_gb=10,
ephemeral_gb=5,
swap=0,
extra_specs={"resources:CUSTOM_TEST_CLASS": ""})
fake_spec = objects.RequestSpec(flavor=flavor)
# just call this to make sure things don't explode
utils.resources_from_request_spec(
self.context, fake_spec, self.mock_host_manager)
def test_process_no_force_hosts_or_force_nodes(self):
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=15,
ephemeral_gb=0,
swap=0)
expected = FakeResourceRequest()
expected._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 15,
},
)
rr = self._test_resources_from_request_spec(expected, flavor)
expected_querystring = (
'limit=1000&'
'resources=DISK_GB%3A15%2CMEMORY_MB%3A1024%2CVCPU%3A1'
)
self.assertEqual(expected_querystring, rr.to_querystring())
def test_process_use_force_nodes(self):
fake_nodes = objects.ComputeNodeList(objects=[
objects.ComputeNode(host='fake-host',
uuid='12345678-1234-1234-1234-123456789012',
hypervisor_hostname='test')])
self.mock_host_manager.get_compute_nodes_by_host_or_node.\
return_value = fake_nodes
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=15,
ephemeral_gb=0,
swap=0)
fake_spec = objects.RequestSpec(flavor=flavor, force_nodes=['test'])
expected = FakeResourceRequest()
expected._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 15,
},
in_tree='12345678-1234-1234-1234-123456789012',
)
resources = utils.resources_from_request_spec(
self.context, fake_spec, self.mock_host_manager)
self.assertResourceRequestsEqual(expected, resources)
expected_querystring = (
'in_tree=12345678-1234-1234-1234-123456789012&'
'limit=1000&resources=DISK_GB%3A15%2CMEMORY_MB%3A1024%2CVCPU%3A1')
self.assertEqual(expected_querystring, resources.to_querystring())
self.mock_host_manager.get_compute_nodes_by_host_or_node.\
assert_called_once_with(self.context, None, 'test', cell=None)
def test_process_use_force_hosts(self):
fake_nodes = objects.ComputeNodeList(objects=[
objects.ComputeNode(host='test',
uuid='12345678-1234-1234-1234-123456789012')
])
self.mock_host_manager.get_compute_nodes_by_host_or_node.\
return_value = fake_nodes
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=15,
ephemeral_gb=0,
swap=0)
fake_spec = objects.RequestSpec(flavor=flavor, force_hosts=['test'])
expected = FakeResourceRequest()
expected._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 15,
},
in_tree='12345678-1234-1234-1234-123456789012',
)
resources = utils.resources_from_request_spec(
self.context, fake_spec, self.mock_host_manager)
self.assertResourceRequestsEqual(expected, resources)
expected_querystring = (
'in_tree=12345678-1234-1234-1234-123456789012&'
'limit=1000&resources=DISK_GB%3A15%2CMEMORY_MB%3A1024%2CVCPU%3A1')
self.assertEqual(expected_querystring, resources.to_querystring())
self.mock_host_manager.get_compute_nodes_by_host_or_node.\
assert_called_once_with(self.context, 'test', None, cell=None)
def test_process_use_force_hosts_multinodes_found(self):
fake_nodes = objects.ComputeNodeList(objects=[
objects.ComputeNode(host='test',
uuid='12345678-1234-1234-1234-123456789012'),
objects.ComputeNode(host='test',
uuid='87654321-4321-4321-4321-210987654321'),
])
self.mock_host_manager.get_compute_nodes_by_host_or_node.\
return_value = fake_nodes
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=15,
ephemeral_gb=0,
swap=0)
fake_spec = objects.RequestSpec(flavor=flavor, force_hosts=['test'])
expected = FakeResourceRequest()
expected._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 15,
},
)
# Validate that the limit is unset
expected._limit = None
resources = utils.resources_from_request_spec(
self.context, fake_spec, self.mock_host_manager)
self.assertResourceRequestsEqual(expected, resources)
# Validate that the limit is unset
expected_querystring = (
'resources=DISK_GB%3A15%2CMEMORY_MB%3A1024%2CVCPU%3A1')
self.assertEqual(expected_querystring, resources.to_querystring())
self.mock_host_manager.get_compute_nodes_by_host_or_node.\
assert_called_once_with(self.context, 'test', None, cell=None)
def test_process_use_requested_destination(self):
fake_cell = objects.CellMapping(uuid=uuids.cell1, name='foo')
destination = objects.Destination(
host='fake-host', node='fake-node', cell=fake_cell)
fake_nodes = objects.ComputeNodeList(objects=[
objects.ComputeNode(host='fake-host',
uuid='12345678-1234-1234-1234-123456789012',
hypervisor_hostname='fake-node')
])
self.mock_host_manager.get_compute_nodes_by_host_or_node.\
return_value = fake_nodes
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=15,
ephemeral_gb=0,
swap=0)
fake_spec = objects.RequestSpec(
flavor=flavor, requested_destination=destination)
expected = FakeResourceRequest()
expected._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 15,
},
in_tree='12345678-1234-1234-1234-123456789012',
)
resources = utils.resources_from_request_spec(
self.context, fake_spec, self.mock_host_manager)
self.assertResourceRequestsEqual(expected, resources)
expected_querystring = (
'in_tree=12345678-1234-1234-1234-123456789012&'
'limit=1000&resources=DISK_GB%3A15%2CMEMORY_MB%3A1024%2CVCPU%3A1')
self.assertEqual(expected_querystring, resources.to_querystring())
self.mock_host_manager.get_compute_nodes_by_host_or_node.\
assert_called_once_with(
self.context, 'fake-host', 'fake-node', cell=fake_cell)
def test_resources_from_request_spec_having_requested_resources(self):
flavor = objects.Flavor(
vcpus=1,
memory_mb=1024,
root_gb=10,
ephemeral_gb=5,
swap=0)
rg1 = objects.RequestGroup(
resources={'CUSTOM_FOO': 1}, requester_id='The-first-group')
# Leave requester_id out to trigger ValueError
rg2 = objects.RequestGroup(required_traits={'CUSTOM_BAR'})
reqspec = objects.RequestSpec(flavor=flavor,
requested_resources=[rg1, rg2])
self.assertRaises(
ValueError,
utils.resources_from_request_spec,
self.context, reqspec, self.mock_host_manager)
# Set conflicting requester_id
rg2.requester_id = 'The-first-group'
self.assertRaises(
exception.RequestGroupSuffixConflict,
utils.resources_from_request_spec,
self.context, reqspec, self.mock_host_manager)
# Good path: nonempty non-conflicting requester_id
rg2.requester_id = 'The-second-group'
req = utils.resources_from_request_spec(
self.context, reqspec, self.mock_host_manager)
self.assertEqual({'MEMORY_MB': 1024, 'DISK_GB': 15, 'VCPU': 1},
req.get_request_group(None).resources)
self.assertIs(rg1, req.get_request_group('The-first-group'))
self.assertIs(rg2, req.get_request_group('The-second-group'))
# Make sure those ended up as suffixes correctly
qs = req.to_querystring()
self.assertIn('resourcesThe-first-group=CUSTOM_FOO%3A1', qs)
self.assertIn('requiredThe-second-group=CUSTOM_BAR', qs)
def test_resources_from_request_spec_requested_resources_unfilled(self):
flavor = objects.Flavor(
vcpus=1,
memory_mb=1024,
root_gb=10,
ephemeral_gb=5,
swap=0)
reqspec = objects.RequestSpec(flavor=flavor)
req = utils.resources_from_request_spec(
self.context, reqspec, self.mock_host_manager)
self.assertEqual({'MEMORY_MB': 1024, 'DISK_GB': 15, 'VCPU': 1},
req.get_request_group(None).resources)
self.assertEqual(1, len(list(req._rg_by_id)))
reqspec = objects.RequestSpec(flavor=flavor, requested_resources=[])
req = utils.resources_from_request_spec(
self.context, reqspec, self.mock_host_manager)
self.assertEqual({'MEMORY_MB': 1024, 'DISK_GB': 15, 'VCPU': 1},
req.get_request_group(None).resources)
self.assertEqual(1, len(list(req._rg_by_id)))
@ddt.data(
# Test single hint that we are checking for.
{'group': [uuids.fake]},
# Test hint we care about and some other random hint.
{'same_host': [uuids.fake], 'fake-hint': ['fake-value']},
# Test multiple hints we are checking for.
{'same_host': [uuids.server1], 'different_host': [uuids.server2]})
def test_resources_from_request_spec_no_limit_based_on_hint(self, hints):
"""Tests that there is no limit applied to the
GET /allocation_candidates query string if a given scheduler hint
is in the request spec.
"""
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=15,
ephemeral_gb=0,
swap=0)
fake_spec = objects.RequestSpec(
flavor=flavor, scheduler_hints=hints)
expected = FakeResourceRequest()
expected._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 15,
},
)
expected._limit = None
resources = utils.resources_from_request_spec(
self.context, fake_spec, self.mock_host_manager)
self.assertResourceRequestsEqual(expected, resources)
expected_querystring = (
'resources=DISK_GB%3A15%2CMEMORY_MB%3A1024%2CVCPU%3A1'
)
self.assertEqual(expected_querystring, resources.to_querystring())
@mock.patch('nova.compute.utils.is_volume_backed_instance',
return_value=False)
def test_resources_from_flavor_no_bfv(self, mock_is_bfv):
flavor = objects.Flavor(vcpus=1, memory_mb=1024, root_gb=10,
ephemeral_gb=5, swap=1024,
extra_specs={})
instance = objects.Instance()
expected = {
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 16,
}
actual = utils.resources_from_flavor(instance, flavor)
self.assertEqual(expected, actual)
@mock.patch('nova.compute.utils.is_volume_backed_instance',
return_value=True)
def test_resources_from_flavor_bfv(self, mock_is_bfv):
flavor = objects.Flavor(vcpus=1, memory_mb=1024, root_gb=10,
ephemeral_gb=5, swap=1024,
extra_specs={})
instance = objects.Instance()
expected = {
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 6, # No root disk...
}
actual = utils.resources_from_flavor(instance, flavor)
self.assertEqual(expected, actual)
@mock.patch('nova.compute.utils.is_volume_backed_instance',
new=mock.Mock(return_value=False))
def test_resources_from_flavor_with_override(self):
flavor = objects.Flavor(
vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=1024,
extra_specs={
# Replace
'resources:VCPU': '2',
# Sum up
'resources42:SRIOV_NET_VF': '1',
'resources24:SRIOV_NET_VF': '2',
# Ignore
'some:bogus': 'value',
# Custom
'resources:CUSTOM_THING': '123',
# Ignore
'trait:CUSTOM_GOLD': 'required',
# Delete standard
'resources86:MEMORY_MB': 0,
# Standard and custom zeroes don't make it through
'resources:IPV4_ADDRESS': 0,
'resources:CUSTOM_FOO': 0,
'group_policy': 'none'})
instance = objects.Instance()
expected = {
'VCPU': 2,
'DISK_GB': 16,
'CUSTOM_THING': 123,
'SRIOV_NET_VF': 3,
}
actual = utils.resources_from_flavor(instance, flavor)
self.assertEqual(expected, actual)
def test_resource_request_init(self):
flavor = objects.Flavor(
vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0)
expected = FakeResourceRequest()
expected._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 15,
},
)
rs = objects.RequestSpec(flavor=flavor, is_bfv=False)
rr = utils.ResourceRequest(rs)
self.assertResourceRequestsEqual(expected, rr)
def test_resource_request_init_with_extra_specs(self):
flavor = objects.Flavor(
vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0,
extra_specs={
'resources:VCPU': '2',
'resources:MEMORY_MB': '2048',
'trait:HW_CPU_X86_AVX': 'required',
# Key skipped because no colons
'nocolons': '42',
'trait:CUSTOM_MAGIC': 'required',
'trait:CUSTOM_BRONZE': 'forbidden',
# Resource skipped because invalid resource class name
'resources86:CUTSOM_MISSPELLED': '86',
'resources1:SRIOV_NET_VF': '1',
# Resource skipped because non-int-able value
'resources86:CUSTOM_FOO': 'seven',
# Resource skipped because negative value
'resources86:CUSTOM_NEGATIVE': '-7',
'resources1:IPV4_ADDRESS': '1',
# Trait skipped because unsupported value
'trait86:CUSTOM_GOLD': 'preferred',
'trait1:CUSTOM_PHYSNET_NET1': 'required',
'trait1:CUSTOM_PHYSNET_NET2': 'forbidden',
'resources2:SRIOV_NET_VF': '1',
'resources2:IPV4_ADDRESS': '2',
'trait2:CUSTOM_PHYSNET_NET2': 'required',
'trait2:HW_NIC_ACCEL_SSL': 'required',
# Groupings that don't quite match the patterns are ignored
'resources_*5:SRIOV_NET_VF': '7',
'traitFoo$:HW_NIC_ACCEL_SSL': 'required',
# Solo resource, no corresponding traits
'resources3:DISK_GB': '5',
'group_policy': 'isolate',
})
expected = FakeResourceRequest()
expected._group_policy = 'isolate'
expected._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 2,
'MEMORY_MB': 2048,
},
required_traits={
'HW_CPU_X86_AVX',
'CUSTOM_MAGIC',
},
forbidden_traits={
'CUSTOM_BRONZE',
},
)
expected._rg_by_id['1'] = objects.RequestGroup(
requester_id='1',
resources={
'SRIOV_NET_VF': 1,
'IPV4_ADDRESS': 1,
},
required_traits={
'CUSTOM_PHYSNET_NET1',
},
forbidden_traits={
'CUSTOM_PHYSNET_NET2',
},
)
expected._rg_by_id['2'] = objects.RequestGroup(
requester_id='2',
resources={
'SRIOV_NET_VF': 1,
'IPV4_ADDRESS': 2,
},
required_traits={
'CUSTOM_PHYSNET_NET2',
'HW_NIC_ACCEL_SSL',
}
)
expected._rg_by_id['3'] = objects.RequestGroup(
requester_id='3',
resources={
'DISK_GB': 5,
}
)
rs = objects.RequestSpec(flavor=flavor, is_bfv=False)
rr = utils.ResourceRequest(rs)
self.assertResourceRequestsEqual(expected, rr)
expected_querystring = (
'group_policy=isolate&'
'limit=1000&'
'required=CUSTOM_MAGIC%2CHW_CPU_X86_AVX%2C%21CUSTOM_BRONZE&'
'required1=CUSTOM_PHYSNET_NET1%2C%21CUSTOM_PHYSNET_NET2&'
'required2=CUSTOM_PHYSNET_NET2%2CHW_NIC_ACCEL_SSL&'
'resources=MEMORY_MB%3A2048%2CVCPU%3A2&'
'resources1=IPV4_ADDRESS%3A1%2CSRIOV_NET_VF%3A1&'
'resources2=IPV4_ADDRESS%3A2%2CSRIOV_NET_VF%3A1&'
'resources3=DISK_GB%3A5'
)
self.assertEqual(expected_querystring, rr.to_querystring())
def _test_resource_request_init_with_legacy_extra_specs(self):
flavor = objects.Flavor(
vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0,
extra_specs={
'hw:cpu_policy': 'dedicated',
'hw:cpu_thread_policy': 'isolate',
'hw:emulator_threads_policy': 'isolate',
})
return objects.RequestSpec(flavor=flavor, is_bfv=False)
def test_resource_request_init_with_legacy_extra_specs(self):
expected = FakeResourceRequest()
expected._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
# we should have two PCPUs, one due to hw:cpu_policy and the
# other due to hw:cpu_thread_policy
'PCPU': 2,
'MEMORY_MB': 1024,
'DISK_GB': 15,
},
forbidden_traits={
# we should forbid hyperthreading due to hw:cpu_thread_policy
'HW_CPU_HYPERTHREADING',
},
)
rs = self._test_resource_request_init_with_legacy_extra_specs()
rr = utils.ResourceRequest(rs)
self.assertResourceRequestsEqual(expected, rr)
self.assertTrue(rr.cpu_pinning_requested)
def test_resource_request_init_with_legacy_extra_specs_no_translate(self):
expected = FakeResourceRequest()
expected._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
# we should have a VCPU despite hw:cpu_policy because
# enable_pinning_translate=False
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 15,
},
# we should not require hyperthreading despite hw:cpu_thread_policy
# because enable_pinning_translate=False
forbidden_traits=set(),
)
rs = self._test_resource_request_init_with_legacy_extra_specs()
rr = utils.ResourceRequest(rs, enable_pinning_translate=False)
self.assertResourceRequestsEqual(expected, rr)
self.assertFalse(rr.cpu_pinning_requested)
def test_resource_request_init_with_image_props(self):
flavor = objects.Flavor(
vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0)
image = objects.ImageMeta.from_dict({
'properties': {
'trait:CUSTOM_TRUSTED': 'required',
},
'id': 'c8b1790e-a07d-4971-b137-44f2432936cd'
})
expected = FakeResourceRequest()
expected._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 15,
},
required_traits={
'CUSTOM_TRUSTED',
}
)
rs = objects.RequestSpec(flavor=flavor, image=image, is_bfv=False)
rr = utils.ResourceRequest(rs)
self.assertResourceRequestsEqual(expected, rr)
def _test_resource_request_init_with_legacy_image_props(self):
flavor = objects.Flavor(
vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0)
image = objects.ImageMeta.from_dict({
'properties': {
'hw_cpu_policy': 'dedicated',
'hw_cpu_thread_policy': 'require',
},
'id': 'c8b1790e-a07d-4971-b137-44f2432936cd',
})
return objects.RequestSpec(flavor=flavor, image=image, is_bfv=False)
def test_resource_request_init_with_legacy_image_props(self):
expected = FakeResourceRequest()
expected._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
# we should have a PCPU due to hw_cpu_policy
'PCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 15,
},
required_traits={
# we should require hyperthreading due to hw_cpu_thread_policy
'HW_CPU_HYPERTHREADING',
},
)
rs = self._test_resource_request_init_with_legacy_image_props()
rr = utils.ResourceRequest(rs)
self.assertResourceRequestsEqual(expected, rr)
self.assertTrue(rr.cpu_pinning_requested)
def test_resource_request_init_with_legacy_image_props_no_translate(self):
expected = FakeResourceRequest()
expected._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
# we should have a VCPU despite hw_cpu_policy because
# enable_pinning_translate=False
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 15,
},
# we should not require hyperthreading despite hw_cpu_thread_policy
# because enable_pinning_translate=False
required_traits=set(),
)
rs = self._test_resource_request_init_with_legacy_image_props()
rr = utils.ResourceRequest(rs, enable_pinning_translate=False)
self.assertResourceRequestsEqual(expected, rr)
self.assertFalse(rr.cpu_pinning_requested)
def _test_resource_request_init_with_mixed_cpus(self, extra_specs):
flavor = objects.Flavor(
vcpus=4, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0,
extra_specs=extra_specs)
rs = objects.RequestSpec(flavor=flavor)
expected = FakeResourceRequest()
expected._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
'PCPU': 2,
'VCPU': 2,
'MEMORY_MB': 1024,
'DISK_GB': 15,
},
required_traits=set(),
)
rr = utils.ResourceRequest(rs)
self.assertResourceRequestsEqual(expected, rr)
def test_resource_request_init_with_mixed_cpus_dedicated(self):
"""Ensure the mixed instance, which is generated through
'hw:cpu_dedicated_mask' extra spec, properly requests the PCPU, VCPU,
MEMORY_MB and DISK_GB resources.
"""
extra_specs = {
'hw:cpu_policy': 'mixed',
'hw:cpu_dedicated_mask': '2,3'
}
self._test_resource_request_init_with_mixed_cpus(extra_specs)
def test_resource_request_init_with_mixed_cpus_realtime(self):
"""Ensure the mixed instance, which is generated through real-time CPU
interface, properly requests the PCPU, VCPU, MEMORY_BM and DISK_GB
resources.
"""
extra_specs = {
'hw:cpu_policy': 'mixed',
"hw:cpu_realtime": "yes",
"hw:cpu_realtime_mask": '2,3'
}
self._test_resource_request_init_with_mixed_cpus(extra_specs)
def _test_resource_request_init_with_mixed_cpus_iso_emu(self, extra_specs):
flavor = objects.Flavor(
vcpus=4, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0,
extra_specs=extra_specs)
rs = objects.RequestSpec(flavor=flavor)
expected = FakeResourceRequest()
expected._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
# An extra PCPU resource is requested due to 'ISOLATE' emulator
# thread policy.
'PCPU': 3,
'VCPU': 2,
'MEMORY_MB': 1024,
'DISK_GB': 15,
},
required_traits=set(),
)
rr = utils.ResourceRequest(rs)
self.assertResourceRequestsEqual(expected, rr)
def test_resource_request_init_with_mixed_cpus_iso_emu_realtime(self):
"""Ensure the mixed instance, which is generated through the
'hw:cpu_dedicated_mask' extra spec, specs, properly requests the PCPU,
VCPU, MEMORY_MB, DISK_GB resources, ensure an extra PCPU resource is
requested due to a ISOLATE emulator thread policy.
"""
extra_specs = {
'hw:cpu_policy': 'mixed',
'hw:cpu_dedicated_mask': '2,3',
'hw:emulator_threads_policy': 'isolate',
}
self._test_resource_request_init_with_mixed_cpus_iso_emu(extra_specs)
def test_resource_request_init_with_mixed_cpus_iso_emu_dedicated(self):
"""Ensure the mixed instance, which is generated through realtime extra
specs, properly requests the PCPU, VCPU, MEMORY_MB, DISK_GB resources,
ensure an extra PCPU resource is requested due to a ISOLATE emulator
thread policy.
"""
extra_specs = {
'hw:cpu_policy': 'mixed',
"hw:cpu_realtime": "yes",
"hw:cpu_realtime_mask": '2,3',
'hw:emulator_threads_policy': 'isolate',
}
self._test_resource_request_init_with_mixed_cpus_iso_emu(extra_specs)
def test_resource_request_init_is_bfv(self):
flavor = objects.Flavor(
vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=1555)
expected = FakeResourceRequest()
expected._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 1,
'MEMORY_MB': 1024,
# this should only include the ephemeral and swap disk, and the
# latter should be converted from MB to GB and rounded up
'DISK_GB': 7,
},
)
rs = objects.RequestSpec(flavor=flavor, is_bfv=True)
rr = utils.ResourceRequest(rs)
self.assertResourceRequestsEqual(expected, rr)
def test_resource_request_with_vpmems(self):
flavor = objects.Flavor(
vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0,
extra_specs={'hw:pmem': '4GB, 4GB,SMALL'})
expected = FakeResourceRequest()
expected._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 15,
'CUSTOM_PMEM_NAMESPACE_4GB': 2,
'CUSTOM_PMEM_NAMESPACE_SMALL': 1
},
)
rs = objects.RequestSpec(flavor=flavor, is_bfv=False)
rr = utils.ResourceRequest(rs)
self.assertResourceRequestsEqual(expected, rr)
def test_resource_request_with_vtpm_1_2(self):
flavor = objects.Flavor(
vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0,
extra_specs={'hw:tpm_version': '1.2', 'hw:tpm_model': 'tpm-tis'},
)
image = objects.ImageMeta(
properties=objects.ImageMetaProps(
hw_tpm_version='1.2',
hw_tpm_model='tpm-tis',
)
)
expected = FakeResourceRequest()
expected._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
required_traits={'COMPUTE_SECURITY_TPM_1_2'},
resources={
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 15,
},
)
rs = objects.RequestSpec(flavor=flavor, image=image, is_bfv=False)
rr = utils.ResourceRequest(rs)
self.assertResourceRequestsEqual(expected, rr)
def test_resource_request_with_vtpm_2_0(self):
flavor = objects.Flavor(
vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0,
extra_specs={'hw:tpm_version': '2.0', 'hw:tpm_model': 'tpm-crb'},
)
image = objects.ImageMeta(
properties=objects.ImageMetaProps(
hw_tpm_version='2.0',
hw_tpm_model='tpm-crb',
)
)
expected = FakeResourceRequest()
expected._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
required_traits={'COMPUTE_SECURITY_TPM_2_0'},
resources={
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 15,
},
)
rs = objects.RequestSpec(flavor=flavor, image=image, is_bfv=False)
rr = utils.ResourceRequest(rs)
self.assertResourceRequestsEqual(expected, rr)
def test_resource_request_add_group_inserts_the_group(self):
flavor = objects.Flavor(
vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0)
rs = objects.RequestSpec(flavor=flavor, is_bfv=False)
req = utils.ResourceRequest(rs)
rg1 = objects.RequestGroup(requester_id='foo',
required_traits={'CUSTOM_FOO'})
req._add_request_group(rg1)
rg2 = objects.RequestGroup(requester_id='bar',
forbidden_traits={'CUSTOM_BAR'})
req._add_request_group(rg2)
self.assertIs(rg1, req.get_request_group('foo'))
self.assertIs(rg2, req.get_request_group('bar'))
def test_empty_groups_forbidden(self):
"""Not allowed to add premade RequestGroup without resources/traits/
aggregates.
"""
flavor = objects.Flavor(
vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0)
rs = objects.RequestSpec(flavor=flavor, is_bfv=False)
req = utils.ResourceRequest(rs)
rg = objects.RequestGroup(requester_id='foo')
self.assertRaises(ValueError, req._add_request_group, rg)
def test_claim_resources_on_destination_no_source_allocations(self):
"""Tests the negative scenario where the instance does not have
allocations in Placement on the source compute node so no claim is
attempted on the destination compute node.
"""
reportclient = report.SchedulerReportClient()
instance = fake_instance.fake_instance_obj(self.context)
source_node = objects.ComputeNode(
uuid=uuids.source_node, host=instance.host)
dest_node = objects.ComputeNode(uuid=uuids.dest_node, host='dest-host')
@mock.patch.object(reportclient,
'get_allocs_for_consumer',
return_value={})
@mock.patch.object(reportclient,
'claim_resources',
new_callable=mock.NonCallableMock)
def test(mock_claim, mock_get_allocs):
ex = self.assertRaises(
exception.ConsumerAllocationRetrievalFailed,
utils.claim_resources_on_destination,
self.context, reportclient, instance, source_node, dest_node)
mock_get_allocs.assert_called_once_with(
self.context, instance.uuid)
self.assertIn(
'Expected to find allocations for source node resource '
'provider %s' % source_node.uuid, str(ex))
test()
def test_claim_resources_on_destination_claim_fails(self):
"""Tests the negative scenario where the resource allocation claim
on the destination compute node fails, resulting in an error.
"""
reportclient = report.SchedulerReportClient()
instance = fake_instance.fake_instance_obj(self.context)
source_node = objects.ComputeNode(
uuid=uuids.source_node, host=instance.host)
dest_node = objects.ComputeNode(uuid=uuids.dest_node, host='dest-host')
source_res_allocs = {
'allocations': {
uuids.source_node: {
'resources': {
'VCPU': instance.vcpus,
'MEMORY_MB': instance.memory_mb,
# This would really include ephemeral and swap too but
# we're lazy.
'DISK_GB': instance.root_gb
}
}
},
'consumer_generation': 1,
'project_id': uuids.project_id,
'user_id': uuids.user_id
}
dest_alloc_request = {
'allocations': {
uuids.dest_node: {
'resources': {
'VCPU': instance.vcpus,
'MEMORY_MB': instance.memory_mb,
'DISK_GB': instance.root_gb
}
}
},
}
@mock.patch.object(reportclient,
'get_allocs_for_consumer',
return_value=source_res_allocs)
@mock.patch.object(reportclient,
'claim_resources', return_value=False)
def test(mock_claim, mock_get_allocs):
# NOTE(danms): Don't pass source_node_allocations here to test
# that they are fetched if needed.
self.assertRaises(exception.NoValidHost,
utils.claim_resources_on_destination,
self.context, reportclient, instance,
source_node, dest_node)
mock_get_allocs.assert_called_once_with(
self.context, instance.uuid)
mock_claim.assert_called_once_with(
self.context, instance.uuid, dest_alloc_request,
instance.project_id, instance.user_id,
allocation_request_version='1.28', consumer_generation=1)
test()
def test_claim_resources_on_destination(self):
"""Happy path test where everything is successful."""
reportclient = report.SchedulerReportClient()
instance = fake_instance.fake_instance_obj(self.context)
source_node = objects.ComputeNode(
uuid=uuids.source_node, host=instance.host)
dest_node = objects.ComputeNode(uuid=uuids.dest_node, host='dest-host')
source_res_allocs = {
uuids.source_node: {
'resources': {
'VCPU': instance.vcpus,
'MEMORY_MB': instance.memory_mb,
# This would really include ephemeral and swap too but
# we're lazy.
'DISK_GB': instance.root_gb
}
}
}
dest_alloc_request = {
'allocations': {
uuids.dest_node: {
'resources': {
'VCPU': instance.vcpus,
'MEMORY_MB': instance.memory_mb,
'DISK_GB': instance.root_gb
}
}
},
}
@mock.patch.object(reportclient,
'get_allocs_for_consumer')
@mock.patch.object(reportclient,
'claim_resources', return_value=True)
def test(mock_claim, mock_get_allocs):
utils.claim_resources_on_destination(
self.context, reportclient, instance, source_node, dest_node,
source_res_allocs, consumer_generation=None)
self.assertFalse(mock_get_allocs.called)
mock_claim.assert_called_once_with(
self.context, instance.uuid, dest_alloc_request,
instance.project_id, instance.user_id,
allocation_request_version='1.28', consumer_generation=None)
test()
@mock.patch('nova.scheduler.client.report.SchedulerReportClient')
@mock.patch('nova.scheduler.utils.request_is_rebuild')
def test_claim_resources(self, mock_is_rebuild, mock_client):
"""Tests that when claim_resources() is called, that we appropriately
call the placement client to claim resources for the instance.
"""
mock_is_rebuild.return_value = False
ctx = nova_context.RequestContext(user_id=uuids.user_id)
spec_obj = objects.RequestSpec(project_id=uuids.project_id)
instance_uuid = uuids.instance
alloc_req = mock.sentinel.alloc_req
mock_client.claim_resources.return_value = True
res = utils.claim_resources(ctx, mock_client, spec_obj, instance_uuid,
alloc_req)
mock_client.claim_resources.assert_called_once_with(
ctx, uuids.instance, mock.sentinel.alloc_req, uuids.project_id,
uuids.user_id, allocation_request_version=None,
consumer_generation=None)
self.assertTrue(res)
# Now do it again but with RequestSpec.user_id set.
spec_obj.user_id = uuids.spec_user_id
mock_client.reset_mock()
utils.claim_resources(ctx, mock_client, spec_obj, instance_uuid,
alloc_req)
mock_client.claim_resources.assert_called_once_with(
ctx, uuids.instance, mock.sentinel.alloc_req, uuids.project_id,
uuids.spec_user_id, allocation_request_version=None,
consumer_generation=None)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient')
@mock.patch('nova.scheduler.utils.request_is_rebuild')
def test_claim_resources_for_policy_check(self, mock_is_rebuild,
mock_client):
mock_is_rebuild.return_value = True
ctx = mock.Mock(user_id=uuids.user_id)
res = utils.claim_resources(ctx, None, mock.sentinel.spec_obj,
mock.sentinel.instance_uuid, [])
self.assertTrue(res)
mock_is_rebuild.assert_called_once_with(mock.sentinel.spec_obj)
self.assertFalse(mock_client.claim_resources.called)
def test_get_weight_multiplier(self):
host_attr = {'vcpus_total': 4, 'vcpus_used': 6,
'cpu_allocation_ratio': 1.0}
host1 = fakes.FakeHostState('fake-host', 'node', host_attr)
host1.aggregates = [
objects.Aggregate(
id=1,
name='foo',
hosts=['fake-host'],
metadata={'cpu_weight_multiplier': 'invalid'},
)]
# Get value from default given value if the agg meta is invalid.
self.assertEqual(
1.0,
utils.get_weight_multiplier(host1, 'cpu_weight_multiplier', 1.0)
)
host1.aggregates = [
objects.Aggregate(
id=1,
name='foo',
hosts=['fake-host'],
metadata={'cpu_weight_multiplier': '1.9'},
)]
# Get value from aggregate metadata
self.assertEqual(
1.9,
utils.get_weight_multiplier(host1, 'cpu_weight_multiplier', 1.0)
)
host1.aggregates = [
objects.Aggregate(
id=1,
name='foo',
hosts=['fake-host'],
metadata={'cpu_weight_multiplier': '1.9'}),
objects.Aggregate(
id=2,
name='foo',
hosts=['fake-host'],
metadata={'cpu_weight_multiplier': '1.8'}),
]
# Get min value from aggregate metadata
self.assertEqual(
1.8,
utils.get_weight_multiplier(host1, 'cpu_weight_multiplier', 1.0)
)
def _set_up_and_fill_provider_mapping(self, requested_resources):
request_spec = objects.RequestSpec()
request_spec.requested_resources = requested_resources
allocs = {
uuids.rp_uuid1: {
'resources': {
'NET_BW_EGR_KILOBIT_PER_SEC': 1,
}
},
uuids.rp_uuid2: {
'resources': {
'NET_BW_INGR_KILOBIT_PER_SEC': 1,
}
}
}
mappings = {
uuids.port_id1: [uuids.rp_uuid2],
uuids.port_id2: [uuids.rp_uuid1],
}
allocation_req = {'allocations': allocs, 'mappings': mappings}
selection = objects.Selection(
allocation_request=jsonutils.dumps(allocation_req))
# Unmapped initially
for rg in requested_resources:
self.assertEqual([], rg.provider_uuids)
utils.fill_provider_mapping(request_spec, selection)
def test_fill_provider_mapping(self):
rg1 = objects.RequestGroup(requester_id=uuids.port_id1)
rg2 = objects.RequestGroup(requester_id=uuids.port_id2)
self._set_up_and_fill_provider_mapping([rg1, rg2])
# Validate the mappings
self.assertEqual([uuids.rp_uuid2], rg1.provider_uuids)
self.assertEqual([uuids.rp_uuid1], rg2.provider_uuids)
def test_fill_provider_mapping_no_op(self):
# This just proves that having 'mappings' in the allocation request
# doesn't break anything.
self._set_up_and_fill_provider_mapping([])
@mock.patch.object(objects.RequestSpec,
'map_requested_resources_to_providers')
def test_fill_provider_mapping_based_on_allocation_returns_early(
self, mock_map):
context = nova_context.RequestContext()
request_spec = objects.RequestSpec()
# set up the request that there is nothing to do
request_spec.requested_resources = []
report_client = mock.sentinel.report_client
allocation = mock.sentinel.allocation
utils.fill_provider_mapping_based_on_allocation(
context, report_client, request_spec, allocation)
mock_map.assert_not_called()
@mock.patch('nova.scheduler.client.report.SchedulerReportClient')
@mock.patch.object(objects.RequestSpec,
'map_requested_resources_to_providers')
def test_fill_provider_mapping_based_on_allocation(
self, mock_map, mock_report_client):
context = nova_context.RequestContext()
request_spec = objects.RequestSpec()
# set up the request that there is nothing to do
request_spec.requested_resources = [objects.RequestGroup()]
allocation = {
uuids.rp_uuid: {
'resources': {
'NET_BW_EGR_KILOBIT_PER_SEC': 1,
}
}
}
traits = ['CUSTOM_PHYSNET1', 'CUSTOM_VNIC_TYPE_NORMAL']
mock_report_client.get_provider_traits.return_value = report.TraitInfo(
traits=['CUSTOM_PHYSNET1', 'CUSTOM_VNIC_TYPE_NORMAL'],
generation=0)
utils.fill_provider_mapping_based_on_allocation(
context, mock_report_client, request_spec, allocation)
mock_map.assert_called_once_with(allocation, {uuids.rp_uuid: traits})
class TestEncryptedMemoryTranslation(TestUtilsBase):
flavor_name = 'm1.test'
image_name = 'cirros'
def _get_request_spec(self, extra_specs, image):
flavor = objects.Flavor(name=self.flavor_name,
vcpus=1,
memory_mb=1024,
root_gb=10,
ephemeral_gb=5,
swap=0,
extra_specs=extra_specs)
# NOTE(aspiers): RequestSpec.flavor is not nullable, but
# RequestSpec.image is.
reqspec = objects.RequestSpec(flavor=flavor)
if image:
reqspec.image = image
return reqspec
def _get_resource_request(self, extra_specs, image):
reqspec = self._get_request_spec(extra_specs, image)
return utils.ResourceRequest(reqspec)
def _get_expected_resource_request(self, mem_encryption_context):
expected_resources = {
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 15,
}
if mem_encryption_context:
expected_resources[orc.MEM_ENCRYPTION_CONTEXT] = 1
expected = FakeResourceRequest()
expected._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources=expected_resources)
return expected
def _test_encrypted_memory_support_not_required(self, extra_specs,
image=None):
resreq = self._get_resource_request(extra_specs, image)
expected = self._get_expected_resource_request(False)
self.assertResourceRequestsEqual(expected, resreq)
def test_encrypted_memory_support_empty_extra_specs(self):
self._test_encrypted_memory_support_not_required(extra_specs={})
def test_encrypted_memory_support_false_extra_spec(self):
for extra_spec in ('0', 'false', 'False'):
self._test_encrypted_memory_support_not_required(
extra_specs={'hw:mem_encryption': extra_spec})
def test_encrypted_memory_support_empty_image_props(self):
self._test_encrypted_memory_support_not_required(
extra_specs={},
image=objects.ImageMeta(properties=objects.ImageMetaProps()))
def test_encrypted_memory_support_false_image_prop(self):
for image_prop in ('0', 'false', 'False'):
self._test_encrypted_memory_support_not_required(
extra_specs={},
image=objects.ImageMeta(
properties=objects.ImageMetaProps(
hw_mem_encryption=image_prop))
)
def test_encrypted_memory_support_both_false(self):
for extra_spec in ('0', 'false', 'False'):
for image_prop in ('0', 'false', 'False'):
self._test_encrypted_memory_support_not_required(
extra_specs={'hw:mem_encryption': extra_spec},
image=objects.ImageMeta(
properties=objects.ImageMetaProps(
hw_mem_encryption=image_prop))
)
def _test_encrypted_memory_support_conflict(self, extra_spec,
image_prop_in,
image_prop_out):
# NOTE(aspiers): hw_mem_encryption image property is a
# FlexibleBooleanField, so the result should always be coerced
# to a boolean.
self.assertIsInstance(image_prop_out, bool)
image = objects.ImageMeta(
name=self.image_name,
properties=objects.ImageMetaProps(
hw_mem_encryption=image_prop_in)
)
reqspec = self._get_request_spec(
extra_specs={'hw:mem_encryption': extra_spec},
image=image)
# Sanity check that our test request spec has an extra_specs
# dict, which is needed in order for there to be a conflict.
self.assertIn('flavor', reqspec)
self.assertIn('extra_specs', reqspec.flavor)
error = (
"Flavor %(flavor_name)s has hw:mem_encryption extra spec "
"explicitly set to %(flavor_val)s, conflicting with "
"image %(image_name)s which has hw_mem_encryption property "
"explicitly set to %(image_val)s"
)
exc = self.assertRaises(
exception.FlavorImageConflict,
utils.ResourceRequest, reqspec
)
error_data = {
'flavor_name': self.flavor_name,
'flavor_val': extra_spec,
'image_name': self.image_name,
'image_val': image_prop_out,
}
self.assertEqual(error % error_data, str(exc))
def test_encrypted_memory_support_conflict1(self):
for extra_spec in ('0', 'false', 'False'):
for image_prop_in in ('1', 'true', 'True'):
self._test_encrypted_memory_support_conflict(
extra_spec, image_prop_in, True
)
def test_encrypted_memory_support_conflict2(self):
for extra_spec in ('1', 'true', 'True'):
for image_prop_in in ('0', 'false', 'False'):
self._test_encrypted_memory_support_conflict(
extra_spec, image_prop_in, False
)
@mock.patch.object(utils, 'LOG')
def _test_encrypted_memory_support_required(self, requesters, extra_specs,
mock_log, image=None):
resreq = self._get_resource_request(extra_specs, image)
expected = self._get_expected_resource_request(True)
self.assertResourceRequestsEqual(expected, resreq)
mock_log.debug.assert_has_calls([
mock.call('Added %s=1 to requested resources',
orc.MEM_ENCRYPTION_CONTEXT)
])
def test_encrypted_memory_support_extra_spec(self):
for extra_spec in ('1', 'true', 'True'):
self._test_encrypted_memory_support_required(
'hw:mem_encryption extra spec',
{'hw:mem_encryption': extra_spec},
image=objects.ImageMeta(
id='005249be-3c2f-4351-9df7-29bb13c21b14',
properties=objects.ImageMetaProps(
hw_machine_type='q35',
hw_firmware_type='uefi'))
)
def test_encrypted_memory_support_image_prop(self):
for image_prop in ('1', 'true', 'True'):
self._test_encrypted_memory_support_required(
'hw_mem_encryption image property',
{},
image=objects.ImageMeta(
id='005249be-3c2f-4351-9df7-29bb13c21b14',
name=self.image_name,
properties=objects.ImageMetaProps(
hw_machine_type='q35',
hw_firmware_type='uefi',
hw_mem_encryption=image_prop))
)
def test_encrypted_memory_support_both_required(self):
for extra_spec in ('1', 'true', 'True'):
for image_prop in ('1', 'true', 'True'):
self._test_encrypted_memory_support_required(
'hw:mem_encryption extra spec and '
'hw_mem_encryption image property',
{'hw:mem_encryption': extra_spec},
image=objects.ImageMeta(
id='005249be-3c2f-4351-9df7-29bb13c21b14',
name=self.image_name,
properties=objects.ImageMetaProps(
hw_machine_type='q35',
hw_firmware_type='uefi',
hw_mem_encryption=image_prop))
)
class TestResourcesFromRequestGroupDefaultPolicy(test.NoDBTestCase):
"""These test cases assert what happens when the group policy is missing
from the flavor but more than one numbered request group is requested from
various sources. Note that while image can provide required traits for the
resource request those traits are always added to the unnumbered group so
image cannot be a source of additional numbered groups.
"""
def setUp(self):
super(TestResourcesFromRequestGroupDefaultPolicy, self).setUp()
self.context = nova_context.get_admin_context()
self.port_group1 = objects.RequestGroup.from_port_request(
self.context, uuids.port1,
port_resource_request={
"resources": {
"NET_BW_IGR_KILOBIT_PER_SEC": 1000,
"NET_BW_EGR_KILOBIT_PER_SEC": 1000},
"required": ["CUSTOM_PHYSNET_2",
"CUSTOM_VNIC_TYPE_NORMAL"]
})
self.port_group2 = objects.RequestGroup.from_port_request(
self.context, uuids.port2,
port_resource_request={
"resources": {
"NET_BW_IGR_KILOBIT_PER_SEC": 2000,
"NET_BW_EGR_KILOBIT_PER_SEC": 2000},
"required": ["CUSTOM_PHYSNET_3",
"CUSTOM_VNIC_TYPE_DIRECT"]
})
self.image = objects.ImageMeta(properties=objects.ImageMetaProps())
def test_one_group_from_flavor_dont_warn(self):
flavor = objects.Flavor(
vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0,
extra_specs={
'resources1:CUSTOM_BAR': '2',
})
request_spec = objects.RequestSpec(
flavor=flavor, image=self.image, requested_resources=[])
rr = utils.resources_from_request_spec(
self.context, request_spec, host_manager=mock.Mock())
log = self.stdlog.logger.output
self.assertNotIn(
"There is more than one numbered request group in the allocation "
"candidate query but the flavor did not specify any group policy.",
log)
self.assertNotIn(
"To avoid the placement failure nova defaults the group policy to "
"'none'.",
log)
self.assertIsNone(rr.group_policy)
self.assertNotIn('group_policy=none', rr.to_querystring())
def test_one_group_from_port_dont_warn(self):
flavor = objects.Flavor(
vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0,
extra_specs={})
request_spec = objects.RequestSpec(
flavor=flavor, image=self.image,
requested_resources=[self.port_group1])
rr = utils.resources_from_request_spec(
self.context, request_spec, host_manager=mock.Mock())
log = self.stdlog.logger.output
self.assertNotIn(
"There is more than one numbered request group in the allocation "
"candidate query but the flavor did not specify any group policy.",
log)
self.assertNotIn(
"To avoid the placement failure nova defaults the group policy to "
"'none'.",
log)
self.assertIsNone(rr.group_policy)
self.assertNotIn('group_policy=none', rr.to_querystring())
def test_two_groups_from_flavor_only_warns(self):
flavor = objects.Flavor(
vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0,
extra_specs={
'resources1:CUSTOM_BAR': '2',
'resources2:CUSTOM_FOO': '1'
})
request_spec = objects.RequestSpec(
flavor=flavor, image=self.image, requested_resources=[])
rr = utils.resources_from_request_spec(
self.context, request_spec, host_manager=mock.Mock())
log = self.stdlog.logger.output
self.assertIn(
"There is more than one numbered request group in the allocation "
"candidate query but the flavor did not specify any group policy.",
log)
self.assertNotIn(
"To avoid the placement failure nova defaults the group policy to "
"'none'.",
log)
self.assertIsNone(rr.group_policy)
self.assertNotIn('group_policy', rr.to_querystring())
def test_one_group_from_flavor_one_from_port_policy_defaulted(self):
flavor = objects.Flavor(
vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0,
extra_specs={
'resources1:CUSTOM_BAR': '2',
})
request_spec = objects.RequestSpec(
flavor=flavor, image=self.image,
requested_resources=[self.port_group1])
rr = utils.resources_from_request_spec(
self.context, request_spec, host_manager=mock.Mock())
log = self.stdlog.logger.output
self.assertIn(
"There is more than one numbered request group in the allocation "
"candidate query but the flavor did not specify any group policy.",
log)
self.assertIn(
"To avoid the placement failure nova defaults the group policy to "
"'none'.",
log)
self.assertEqual('none', rr.group_policy)
self.assertIn('group_policy=none', rr.to_querystring())
def test_two_groups_from_ports_policy_defaulted(self):
flavor = objects.Flavor(
vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0,
extra_specs={})
request_spec = objects.RequestSpec(
flavor=flavor, image=self.image,
requested_resources=[self.port_group1, self.port_group2])
rr = utils.resources_from_request_spec(
self.context, request_spec, host_manager=mock.Mock())
log = self.stdlog.logger.output
self.assertIn(
"There is more than one numbered request group in the allocation "
"candidate query but the flavor did not specify any group policy.",
log)
self.assertIn(
"To avoid the placement failure nova defaults the group policy to "
"'none'.",
log)
self.assertEqual('none', rr.group_policy)
self.assertIn('group_policy=none', rr.to_querystring())
|
apache-2.0
| -4,330,534,855,583,730,700
| 40.614035
| 79
| 0.542183
| false
| 4.13728
| true
| false
| false
|
willettk/gzhubble
|
python/fzeta_examples.py
|
1
|
5008
|
from matplotlib import pyplot as plt
from astropy.table import Table
from scipy import optimize
from scipy.stats import distributions as dist
import numpy as np
# Load data
data = Table.read("../data/ferengi_debiasable_data.fits")
# Use only galaxies with surface brightness/redshift ranges that are considered "debiasable"
data = data#[data['Correctable_Category']=='correctable']
# Limit to galaxies that have data at z_sim = 0.3, since that's what we're normalizing to.
unique_galaxies = set(data['sdss_id'])
z0ind = np.zeros(len(data),dtype=bool)
for ug in unique_galaxies:
ind = (data['sdss_id'] == ug)
if data[ind]['sim_redshift'].min() < 0.301:
z0ind[ind] = True
data_z0 = data[z0ind]
def fzeta_exp(p,x):
#y = p[0] * np.exp(-1 * (x-p[1])/p[2])
y = np.exp(-1 * (x-0.3)/p[0])
return y
def fzeta_lin(p,x):
y = p[0] + p[1] * x
return y
def fzeta(p,x):
# results are qualitatively the same for both lin and exp versions
return fzeta_exp(p,x)
def errfunc(p,x,y,s):
err = (y - fzeta(p,x))/s
return err
def errfunc_lin(p,x,y,s):
err = (y - fzeta_lin(p,x))/s
return err
def error_bars(k,n=40,c=0.683):
f_gal_lower = dist.beta.ppf((1-c)/2.,k+1,n-k+1)
f_gal_upper = dist.beta.ppf(1-(1-c)/2.,k+1,n-k+1)
f_gal_err = (f_gal_upper - f_gal_lower) / 2.0
return f_gal_err
def common_labels(fig,xlabel=None,ylabel=None,xfontsize=16,yfontsize=40,
xlabelpad=None, ylabelpad=None):
# Set common labels
cax = fig.add_subplot(111) # The big subplot
cax.set_axis_bgcolor('none')
cax.spines['top'].set_color('none')
cax.spines['bottom'].set_color('none')
cax.spines['left'].set_color('none')
cax.spines['right'].set_color('none')
cax.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
cax.set_xlabel(xlabel,fontsize=xfontsize, labelpad=xlabelpad)
cax.set_ylabel(ylabel,fontsize=yfontsize, labelpad=ylabelpad)
return cax
p_guess = np.array([0.5])
nrows = 4
ncols = 5
# Set up plot
fig,axarr = plt.subplots(nrows=nrows,ncols=ncols,sharex=True,sharey=True,figsize=(18,14))
bigax = common_labels(fig,'Redshift',r'$\frac{1-f_{{\rm features},z=0.3}}{1-f_{\rm features}}$',20,28, 12, 12)
zarr = np.linspace(0,1,50)
# For examples, only plot galaxies with an evolution correction of zero.
evol = 0.0
e0 = data_z0[np.absolute(data_z0['sim_evolution'] - evol) < 0.001]
e0_z0 = e0[e0['sim_redshift'] < 0.35]
#e0_z0 = data_z0
unique_galaxies = []
plist = np.linspace(0.1, 1.0, nrows*ncols+1)
nchoose = 2
for p1, p2 in zip(plist[:-1], plist[1:]):
p_match = (e0_z0['p_features'] > p1) & (e0_z0['p_features'] <= p2)
if p_match.sum() > nchoose:
#print(p_match.sum(), p_match.nonzero()[0], nchoose)
p_match = np.random.choice(p_match.nonzero()[0], nchoose, replace=False)
nchoose = 1
elif p_match.sum() <= nchoose:
#print(p_match.sum(), p_match.nonzero()[0], nchoose)
nchoose = 1 + nchoose - p_match.sum()
p_match = p_match.nonzero()[0]
p_match = p_match[np.argsort(e0_z0['p_features'][p_match])]
unique_galaxies.extend(e0_z0['sdss_id'][p_match])
for ax in axarr.ravel():
if len(unique_galaxies) == 0: break
slen = 0
# Make sure there are enough points to fit a function
while slen < (len(p_guess)+1):
ind = (e0['sdss_id'] == unique_galaxies.pop())
slen = sum(ind)
galaxy1 = e0[ind]
galaxy1.sort('sim_redshift')
z_gal = galaxy1['sim_redshift']
f_gal = galaxy1['p_features']
# ADD ERROR BARS
n = 40 # assume 40 classifications per galaxy; it'd be better to use true value, though
f_gal_err = error_bars(f_gal*n,n)
f_gal_norm = (1-f_gal[0]) /(1- f_gal)
f_gal_norm_err = np.sqrt((f_gal_err/f_gal)**2 + (f_gal_err[0]/f_gal[0])**2) * f_gal_norm
# Values must be explicitly cast as double-type precision for optimization to work. Incredibly frustrating.
# Fix: http://stackoverflow.com/questions/12473406/scipy-optimize-leastsq-returns-best-guess-parameters-not-new-best-fit
p, cov, infodict, mesg, ier = optimize.leastsq(errfunc,p_guess,args=(z_gal.astype(np.float64),
f_gal_norm.astype(np.float64),
f_gal_norm_err.astype(np.float64)),
full_output=1)
ax.plot(z_gal,f_gal_norm,lw=2)
ax.errorbar(z_gal,f_gal_norm, f_gal_norm_err)
ax.plot(zarr,fzeta_exp(p,zarr),'--',lw=1)
zeta = '={:.2f}'.format(p[0]) if p[0] <= 10.0 else '>10'
# ax.set_title('$f_{z=0}={:.2f}\; \zeta{:s}$'.format(f_gal[0], zeta), y=1.01, fontsize=16)
ax.set_title('$f_{features,z=0.3}=%s\; \zeta%s$'%(round(f_gal[0],2),zeta),y=1.01,fontsize=16)
ax.set_xlim(0.11,1.05)
ax.set_ylim(0,2)
fig.subplots_adjust(hspace=0.20, wspace=0.05)
fig.savefig('../writeup/figures/zeta_examples_sorted.pdf')
|
mit
| -8,789,516,641,017,032,000
| 31.732026
| 124
| 0.608826
| false
| 2.619247
| false
| false
| false
|
mozilla/kuma
|
kuma/core/validators.py
|
1
|
3885
|
# see also: http://github.com/tav/scripts/raw/master/validate_jsonp.py
# Placed into the Public Domain by tav <tav@espians.com>
"""Validate Javascript Identifiers for use as JSON-P callback parameters."""
import re
from unicodedata import category
# ------------------------------------------------------------------------------
# javascript identifier unicode categories and "exceptional" chars
# ------------------------------------------------------------------------------
valid_jsid_categories_start = frozenset([
'Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl'
])
valid_jsid_categories = frozenset([
'Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl', 'Mn', 'Mc', 'Nd', 'Pc'
])
valid_jsid_chars = ('$', '_')
# ------------------------------------------------------------------------------
# regex to find array[index] patterns
# ------------------------------------------------------------------------------
array_index_regex = re.compile(r'\[[0-9]+\]$')
has_valid_array_index = array_index_regex.search
replace_array_index = array_index_regex.sub
# ------------------------------------------------------------------------------
# javascript reserved words -- including keywords and null/boolean literals
# ------------------------------------------------------------------------------
is_reserved_js_word = frozenset([
'abstract', 'boolean', 'break', 'byte', 'case', 'catch', 'char', 'class',
'const', 'continue', 'debugger', 'default', 'delete', 'do', 'double',
'else', 'enum', 'export', 'extends', 'false', 'final', 'finally', 'float',
'for', 'function', 'goto', 'if', 'implements', 'import', 'in', 'instanceof',
'int', 'interface', 'long', 'native', 'new', 'null', 'package', 'private',
'protected', 'public', 'return', 'short', 'static', 'super', 'switch',
'synchronized', 'this', 'throw', 'throws', 'transient', 'true', 'try',
'typeof', 'var', 'void', 'volatile', 'while', 'with',
# potentially reserved in a future version of the ES5 standard
# 'let', 'yield'
]).__contains__
# ------------------------------------------------------------------------------
# the core validation functions
# ------------------------------------------------------------------------------
def valid_javascript_identifier(identifier, escape='\\u', ucd_cat=category):
"""Return whether the given ``id`` is a valid Javascript identifier."""
if not identifier:
return False
if not isinstance(identifier, str):
try:
identifier = str(identifier, 'utf-8')
except UnicodeDecodeError:
return False
if escape in identifier:
new = []
add_char = new.append
split_id = identifier.split(escape)
add_char(split_id.pop(0))
for segment in split_id:
if len(segment) < 4:
return False
try:
add_char(chr(int('0x' + segment[:4], 16)))
except Exception:
return False
add_char(segment[4:])
identifier = ''.join(new)
if is_reserved_js_word(identifier):
return False
first_char = identifier[0]
if not ((first_char in valid_jsid_chars) or
(ucd_cat(first_char) in valid_jsid_categories_start)):
return False
for char in identifier[1:]:
if not ((char in valid_jsid_chars) or
(ucd_cat(char) in valid_jsid_categories)):
return False
return True
def valid_jsonp_callback_value(value):
"""Return whether the given ``value`` can be used as a JSON-P callback."""
for identifier in value.split('.'):
while '[' in identifier:
if not has_valid_array_index(identifier):
return False
identifier = replace_array_index('', identifier)
if not valid_javascript_identifier(identifier):
return False
return True
|
mpl-2.0
| 3,343,287,382,419,359,000
| 32.491379
| 80
| 0.501673
| false
| 4.307095
| false
| false
| false
|
all-of-us/raw-data-repository
|
rdr_service/model/bq_workbench_workspace.py
|
1
|
9460
|
from rdr_service.model.bq_base import BQTable, BQSchema, BQView, BQField, BQFieldTypeEnum, BQFieldModeEnum, \
BQRecordField
class BQWorkspaceRaceEthnicitySchema(BQSchema):
race_ethnicity = BQField('race_ethnicity', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
race_ethnicity_id = BQField('race_ethnicity_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
class BQWorkspaceAgeSchema(BQSchema):
age = BQField('age', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
age_id = BQField('age_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
class BQRWBWorkspaceSchema(BQSchema):
"""
Represents the workbench_workspace_snapshot table.
"""
id = BQField('id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.REQUIRED)
created = BQField('created', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.REQUIRED)
modified = BQField('modified', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.REQUIRED)
workspace_source_id = BQField('workspace_source_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.REQUIRED)
name = BQField('name', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
creation_time = BQField('creation_time', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.REQUIRED)
modified_time = BQField('modified_time', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.REQUIRED)
status = BQField('status', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
status_id = BQField('status_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
exclude_from_public_directory = BQField('exclude_from_public_directory', BQFieldTypeEnum.INTEGER,
BQFieldModeEnum.NULLABLE)
disease_focused_research = BQField('disease_focused_research', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
disease_focused_research_name = BQField('disease_focused_research_name', BQFieldTypeEnum.STRING,
BQFieldModeEnum.NULLABLE)
other_purpose_details = BQField('other_purpose_details', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
methods_development = BQField('methods_development', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
control_set = BQField('control_set', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
ancestry = BQField('ancestry', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
social_behavioral = BQField('social_behavioral', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
population_health = BQField('population_health', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
drug_development = BQField('drug_development', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
commercial_purpose = BQField('commercial_purpose', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
educational = BQField('educational', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
other_purpose = BQField('other_purpose', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
ethical_legal_social_implications = BQField('ethical_legal_social_implications', BQFieldTypeEnum.INTEGER,
BQFieldModeEnum.NULLABLE)
scientific_approaches = BQField('scientific_approaches', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
intend_to_study = BQField('intend_to_study', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
findings_from_study = BQField('findings_from_study', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
focus_on_underrepresented_populations = BQField('focus_on_underrepresented_populations', BQFieldTypeEnum.INTEGER,
BQFieldModeEnum.NULLABLE)
race_ethnicities = BQRecordField('race_ethnicities', BQWorkspaceRaceEthnicitySchema)
ages = BQRecordField('ages', BQWorkspaceAgeSchema)
sex_at_birth = BQField('sex_at_birth', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
sex_at_birth_id = BQField('sex_at_birth_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
gender_identity = BQField('gender_identity', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
gender_identity_id = BQField('gender_identity_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
sexual_orientation = BQField('sexual_orientation', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
sexual_orientation_id = BQField('sexual_orientation_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
geography = BQField('geography', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
geography_id = BQField('geography_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
disability_status = BQField('disability_status', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
disability_status_id = BQField('disability_status_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
access_to_care = BQField('access_to_care', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
access_to_care_id = BQField('access_to_care_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
education_level = BQField('education_level', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
education_level_id = BQField('education_level_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
income_level = BQField('income_level', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
income_level_id = BQField('income_level_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
others = BQField('others', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
review_requested = BQField('review_requested', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
is_reviewed = BQField('is_reviewed', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
cdr_version = BQField('cdr_version', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
access_tier = BQField('access_tier', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
access_tier_id = BQField('access_tier_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
class BQRWBWorkspace(BQTable):
""" Research Workbench Workspace BigQuery Table """
__tablename__ = 'rwb_workspace'
__schema__ = BQRWBWorkspaceSchema
class BQRWBWorkspaceView(BQView):
__viewname__ = 'v_rwb_workspace'
__viewdescr__ = 'Research Workbench Workspace View'
__pk_id__ = 'workspace_source_id'
__table__ = BQRWBWorkspace
# We need to build a SQL statement with all fields except sub-tables and remove duplicates.
__sql__ = """
SELECT
%%FIELD_LIST%%
FROM (
SELECT *, MAX(modified) OVER (PARTITION BY workspace_source_id) AS max_timestamp
FROM `{project}`.{dataset}.rwb_workspace
) t
WHERE t.modified = t.max_timestamp
""".replace('%%FIELD_LIST%%', BQRWBWorkspaceSchema.get_sql_field_names(
exclude_fields=[
'race_ethnicities',
'ages'
])
)
class BQRWBWorkspaceRaceEthnicityView(BQView):
__viewname__ = 'v_rwb_workspace_race_ethnicity'
__viewdescr__ = 'Research Workbench Workspace Race Ethnicity View'
__pk_id__ = 'workspace_source_id'
__table__ = BQRWBWorkspace
__sql__ = """
SELECT t.id, t.created, t.modified, t.workspace_source_id, nt.*
FROM (
SELECT *, MAX(modified) OVER (PARTITION BY workspace_source_id) AS max_timestamp
FROM `{project}`.{dataset}.rwb_workspace
) t cross join unnest(race_ethnicities) as nt
WHERE t.modified = t.max_timestamp
"""
class BQRWBWorkspaceAgeView(BQView):
__viewname__ = 'v_rwb_workspace_age'
__viewdescr__ = 'Research Workbench Workspace Age View'
__pk_id__ = 'workspace_source_id'
__table__ = BQRWBWorkspace
__sql__ = """
SELECT t.id, t.created, t.modified, t.workspace_source_id, nt.*
FROM (
SELECT *, MAX(modified) OVER (PARTITION BY workspace_source_id) AS max_timestamp
FROM `{project}`.{dataset}.rwb_workspace
) t cross join unnest(ages) as nt
WHERE t.modified = t.max_timestamp
"""
class BQRWBWorkspaceUsersSchema(BQSchema):
id = BQField('id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.REQUIRED)
created = BQField('created', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.REQUIRED)
modified = BQField('modified', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.REQUIRED)
workspace_id = BQField('workspace_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.REQUIRED)
user_id = BQField('user_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.REQUIRED)
role = BQField('role', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
role_id = BQField('role_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
status = BQField('status', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
status_id = BQField('status_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
researcher_id = BQField('researcher_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
is_creator = BQField('is_creator', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
class BQRWBWorkspaceUsers(BQTable):
""" Research Workbench Workspace Users BigQuery Table """
__tablename__ = 'rwb_workspace_users'
__schema__ = BQRWBWorkspaceUsersSchema
class BQRWBWorkspaceUsersView(BQView):
__viewname__ = 'v_rwb_workspace_users'
__viewdescr__ = 'Research Workbench Workspace Users View'
__pk_id__ = 'id'
__table__ = BQRWBWorkspaceUsers
|
bsd-3-clause
| -1,766,450,911,606,441,000
| 54.321637
| 117
| 0.723996
| false
| 3.272224
| false
| false
| false
|
Jaesin/OctoPrint
|
src/octoprint/vendor/sockjs/tornado/transports/jsonp.py
|
1
|
3693
|
# -*- coding: utf-8 -*-
"""
sockjs.tornado.transports.jsonp
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
JSONP transport implementation.
"""
import logging
from tornado.web import asynchronous
from octoprint.vendor.sockjs.tornado import proto
from octoprint.vendor.sockjs.tornado.transports import pollingbase
from octoprint.vendor.sockjs.tornado.util import bytes_to_str, unquote_plus
LOG = logging.getLogger("tornado.general")
class JSONPTransport(pollingbase.PollingTransportBase):
name = 'jsonp'
@asynchronous
def get(self, session_id):
# Start response
self.handle_session_cookie()
self.disable_cache()
# Grab callback parameter
self.callback = self.get_argument('c', None)
if not self.callback:
self.write('"callback" parameter required')
self.set_status(500)
self.finish()
return
# Get or create session without starting heartbeat
if not self._attach_session(session_id, False):
return
# Might get already detached because connection was closed in on_open
if not self.session:
return
if not self.session.send_queue:
self.session.start_heartbeat()
else:
self.session.flush()
def send_pack(self, message, binary=False):
if binary:
raise Exception('binary not supported for JSONPTransport')
self.active = False
try:
# TODO: Just escape
msg = '%s(%s);\r\n' % (self.callback, proto.json_encode(message))
self.set_header('Content-Type', 'application/javascript; charset=UTF-8')
self.set_header('Content-Length', len(msg))
# TODO: Fix me
self.set_header('Etag', 'dummy')
self.write(msg)
self.flush(callback=self.send_complete)
except IOError:
# If connection dropped, make sure we close offending session instead
# of propagating error all way up.
self.session.delayed_close()
class JSONPSendHandler(pollingbase.PollingTransportBase):
def post(self, session_id):
self.preflight()
self.handle_session_cookie()
self.disable_cache()
session = self._get_session(session_id)
if session is None or session.is_closed:
self.set_status(404)
return
data = bytes_to_str(self.request.body)
ctype = self.request.headers.get('Content-Type', '').lower()
if ctype == 'application/x-www-form-urlencoded':
if not data.startswith('d='):
LOG.exception('jsonp_send: Invalid payload.')
self.write("Payload expected.")
self.set_status(500)
return
data = unquote_plus(data[2:])
if not data:
LOG.debug('jsonp_send: Payload expected.')
self.write("Payload expected.")
self.set_status(500)
return
try:
messages = proto.json_decode(data)
except:
# TODO: Proper error handling
LOG.debug('jsonp_send: Invalid json encoding')
self.write("Broken JSON encoding.")
self.set_status(500)
return
try:
session.on_messages(messages)
except Exception:
LOG.exception('jsonp_send: on_message() failed')
session.close()
self.write('Message handler failed.')
self.set_status(500)
return
self.write('ok')
self.set_header('Content-Type', 'text/plain; charset=UTF-8')
self.set_status(200)
|
agpl-3.0
| -149,909,300,359,437,600
| 28.07874
| 84
| 0.581099
| false
| 4.284223
| false
| false
| false
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/share/hplip/ui4/printernamecombobox.py
|
1
|
5122
|
# -*- coding: utf-8 -*-
#
# (c) Copyright 2001-2009 Hewlett-Packard Development Company, L.P.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Author: Don Welch
#
# Std Lib
#import sys
# Local
from base.g import *
from ui_utils import *
from base import device
# Qt
from PyQt4.QtCore import *
from PyQt4.QtGui import *
PRINTERNAMECOMBOBOX_TYPE_PRINTER_ONLY = 0
PRINTERNAMECOMBOBOX_TYPE_FAX_ONLY = 1
PRINTERNAMECOMBOBOX_TYPE_PRINTER_AND_FAX = 2
class PrinterNameComboBox(QWidget):
def __init__(self, parent):
QWidget.__init__(self, parent)
self.printer_name = ''
self.device_uri = ''
self.printer_index = {}
self.initial_printer = None
self.updating = False
self.typ = PRINTERNAMECOMBOBOX_TYPE_PRINTER_ONLY
self.user_settings = UserSettings()
self.user_settings.load()
self.user_settings.debug()
self.initUi()
def initUi(self):
#print "PrinterNameComboBox.initUi()"
HBoxLayout = QHBoxLayout(self)
HBoxLayout.setObjectName("HBoxLayout")
self.NameLabel = QLabel(self)
self.NameLabel.setObjectName("NameLabel")
HBoxLayout.addWidget(self.NameLabel)
SpacerItem = QSpacerItem(20, 20, QSizePolicy.Minimum, QSizePolicy.Minimum)
HBoxLayout.addItem(SpacerItem)
self.ComboBox = QComboBox(self)
sizePolicy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.ComboBox.sizePolicy().hasHeightForWidth())
self.ComboBox.setSizePolicy(sizePolicy)
self.ComboBox.setObjectName("ComboBox")
HBoxLayout.addWidget(self.ComboBox)
self.NameLabel.setText(self.__tr("Printer:"))
#self.connect(self.ComboBox, SIGNAL("currentIndexChanged(int)"),
# self.ComboBox_currentIndexChanged)
self.connect(self.ComboBox, SIGNAL("currentIndexChanged(const QString &)"),
self.ComboBox_currentIndexChanged)
def setType(self, typ):
if typ in (PRINTERNAMECOMBOBOX_TYPE_PRINTER_ONLY,
PRINTERNAMECOMBOBOX_TYPE_FAX_ONLY,
PRINTERNAMECOMBOBOX_TYPE_PRINTER_AND_FAX):
self.typ = typ
def setInitialPrinter(self, printer_name):
self.initial_printer = printer_name
def updateUi(self):
#print "PrinterNameComboBox.updateUi()"
if self.typ == PRINTERNAMECOMBOBOX_TYPE_PRINTER_ONLY:
self.NameLabel.setText(self.__tr("Printer Name:"))
be_filter = ['hp']
elif self.typ == PRINTERNAMECOMBOBOX_TYPE_FAX_ONLY:
self.NameLabel.setText(self.__tr("Fax Name:"))
be_filter = ['hpfax']
else: # PRINTERNAMECOMBOBOX_TYPE_PRINTER_AND_FAX
self.NameLabel.setText(self.__tr("Printer/Fax Name:"))
be_filter = ['hp', 'hpfax']
self.printers = device.getSupportedCUPSPrinters(be_filter)
self.printer_index.clear() # = {}
if self.printers:
if self.initial_printer is None:
#user_conf.get('last_used', 'printer_name')
self.initial_printer = self.user_settings.last_used_printer
self.updating = True
try:
k = 0
for i, p in enumerate(self.printers):
self.printer_index[p.name] = p.device_uri
self.ComboBox.insertItem(i, p.name)
if self.initial_printer is not None and p.name == self.initial_printer:
self.initial_printer = None
k = i
self.ComboBox.setCurrentIndex(-1)
finally:
self.updating = False
self.ComboBox.setCurrentIndex(k)
else:
self.emit(SIGNAL("PrinterNameComboBox_noPrinters"))
def ComboBox_currentIndexChanged(self, t):
self.printer_name = unicode(t)
if self.updating:
return
self.device_uri = self.printer_index[self.printer_name]
#user_conf.set('last_used', 'printer_name', self.printer_name)
self.user_settings.last_used_printer = self.printer_name
self.user_settings.save()
self.emit(SIGNAL("PrinterNameComboBox_currentChanged"), self.device_uri, self.printer_name)
def __tr(self,s,c = None):
return qApp.translate("PrinterNameComboBox",s,c)
|
gpl-3.0
| 8,314,230,958,582,028,000
| 32.045161
| 99
| 0.641156
| false
| 3.877366
| false
| false
| false
|
IntersectAustralia/hcsvlab_robochef
|
hcsvlab_robochef/paradisec/rdf.py
|
1
|
1904
|
from hcsvlab_robochef.rdf.map import *
PARADISEC = "PARADISEC"
paradisecSpeakerMap = FieldMapper(AUSNC)
paradisecSpeakerMap.add('name', mapto=FOAF.name)
paradisecSpeakerMap.add('role', ignore=True)
paradisecMap = MetadataMapper(PARADISEC, speakerMap=paradisecSpeakerMap, documentMap = get_generic_doc_mapper())
paradisecMap.add('Box', mapto=DC.box)
paradisecMap.add('DCMIType', mapto=DC.type, ignore=True)
paradisecMap.add('ISO3166', mapto=DC.coverage)
paradisecMap.add('URI', ignore=True)
paradisecMap.add('W3CDTF', mapto=DC.created)
paradisecMap.add('accessRights', mapto=DC.accessRights)
paradisecMap.add('author', mapto=OLAC.author, ignore=True)
paradisecMap.add('bibliographicCitation', mapto=DC.bibliographicCitation)
paradisecMap.add('compiler', mapto=OLAC.compiler, ignore=True)
paradisecMap.add('consultant', mapto=OLAC.consultant, ignore=True)
paradisecMap.add('data_inputter', mapto=OLAC.data_inputter, ignore=True)
paradisecMap.add('depositor', mapto=OLAC.depositor, ignore=True)
paradisecMap.add('description', mapto=DC.description)
paradisecMap.add('discourse-type', mapto=OLAC.discourse_type)
paradisecMap.add('format', ignore=True)
paradisecMap.add('identifier', mapto=DC.identifier)
paradisecMap.add('interviewer', mapto=OLAC.interviewer, ignore=True)
paradisecMap.add('language', mapto=OLAC.language)
paradisecMap.add('linguistic-field', mapto=OLAC.linguistic_field)
paradisecMap.add('linguistic-type', mapto=OLAC.linguistic_type)
paradisecMap.add('photographer', mapto=OLAC.photographer, ignore=True)
paradisecMap.add('recorder', mapto=OLAC.recorder, ignore=True)
paradisecMap.add('researcher', mapto=OLAC.researcher, ignore=True)
paradisecMap.add('rights', mapto=DC.rights)
paradisecMap.add('speaker', mapto=OLAC.speaker, ignore=True)
paradisecMap.add('tableOfContents', ignore=True)
paradisecMap.add('title', mapto=DC.title)
paradisecMap.add('type', mapto=DC.type, ignore=True)
|
gpl-3.0
| -8,536,860,154,655,141,000
| 47.820513
| 112
| 0.798319
| false
| 2.689266
| false
| true
| false
|
indro/t2c
|
apps/external_apps/ajax_validation/views.py
|
2
|
1452
|
from django import forms
from django.http import HttpResponse
from django.views.decorators.http import require_POST
from ajax_validation.utils import LazyEncoder
def validate(request, *args, **kwargs):
form_class = kwargs.pop('form_class')
extra_args_func = kwargs.pop('callback', lambda request, *args, **kwargs: {})
kwargs = extra_args_func(request, *args, **kwargs)
kwargs['data'] = request.POST
form = form_class(**kwargs)
if form.is_valid():
data = {
'valid': True,
}
else:
if request.POST.getlist('fields'):
fields = request.POST.getlist('fields') + ['__all__']
errors = dict([(key, val) for key, val in form.errors.iteritems() if key in fields])
else:
errors = form.errors
final_errors = {}
for key, val in errors.iteritems():
if key == '__all__':
final_errors['__all__'] = val
if not isinstance(form.fields[key], forms.FileField):
html_id = form.fields[key].widget.attrs.get('id') or form[key].auto_id
html_id = form.fields[key].widget.id_for_label(html_id)
final_errors[html_id] = val
data = {
'valid': False,
'errors': final_errors,
}
json_serializer = LazyEncoder()
return HttpResponse(json_serializer.encode(data), mimetype='application/json')
validate = require_POST(validate)
|
mit
| 6,643,868,920,188,275,000
| 38.243243
| 96
| 0.588154
| false
| 3.978082
| false
| false
| false
|
silentfuzzle/calibre
|
src/calibre/gui2/viewer/behavior_manager/behavior_manager_builder.py
|
1
|
6334
|
__license__ = 'GPL v3'
__copyright__ = '2014, Emily Palmieri <silentfuzzle@gmail.com>'
from calibre.gui2.viewer.behavior.adventurous_behavior import AdventurousBehavior
from calibre.gui2.viewer.behavior.adventurous_base_behavior import BaseAdventurousBehavior
from calibre.gui2.viewer.behavior.calibre_behavior import CalibreBehavior
from calibre.gui2.viewer.toc_sections import TOCSections
from calibre.gui2.viewer.behavior_manager.behavior_manager import BehaviorManager
from calibre.gui2.viewer.behavior_manager.behavior_manager_switch import SwitchBehaviorManager
from calibre.gui2.viewer.toc_container.toc_hierarchy_container import TreeTOCContainer
from calibre.gui2.viewer.toc_container.toc_network_container import NetworkTOCContainer
# This class builds the TOC interface(s) and page numbering behavior(s) to use in the ebook viewer interface
class BehaviorManagerBuilder (object):
# Constructor
# main (EBookViewer) - the ebook viewer interface
# b1_single_document (bool) - True if the main page behavior should display all the book's text in a single document
# b1_use_hierarchy (bool) - True if the main TOC interface should display the TOC in a hierarchy
# switch (bool) - True if the user can switch between two ebook viewer behaviors
# b2_single_document (bool) - True if the second page behavior should display all the book's text in a single document
# b2_use_hierarchy (bool) - True if the second TOC interface should display the TOC in a hierarchy
def __init__(self, main, b1_single_document=True,
b1_use_hierarchy=True, switch=False, b2_single_document=False,
b2_use_hierarchy=False):
# If both interface behaviors are the same, don't create a switch between the two
if (b1_single_document == b2_single_document and
b1_use_hierarchy == b2_use_hierarchy):
switch = False
self.b1_single_document = b1_single_document
self.b1_use_hierarchy = b1_use_hierarchy
self.switch = switch
self.b2_single_document = b2_single_document
self.b2_use_hierarchy = b2_use_hierarchy
# Create a default TOC interface to use until the user selects an ebook
self.default_manager = BehaviorManager(CalibreBehavior(),
TreeTOCContainer(main))
self.network_container = NetworkTOCContainer(main)
# Return a page behavior given if the current ebook should be display in a single document
# single_document (bool) - True if the page behavior should display all the book's text in a single document
# setup_vscrollbar_method (method) - the method from EBookViewer to use when updating the scrollbar and page numbers
def get_page_behavior(self, single_document, setup_vscrollbar_method):
if (single_document):
# Display the book in a single document
page_behavior = CalibreBehavior()
else:
# Break the book into groups and display each group as a separate document
page_behavior = AdventurousBehavior(setup_vscrollbar_method)
return page_behavior
# Return a TOC interface given if it should be displayed as a network or a hierarchy
# use_hierarchy (bool) - True if the TOC interface should display the TOC in a hierarchy
# main (EBookViewer) - the ebook viewer interface
def get_toc_interface(self, use_hierarchy, main):
if (use_hierarchy):
# Display the ebook's TOC as a hierarchy of sections
toc_container = TreeTOCContainer(main)
toc_container.connect_toc_actions(main.toc_clicked)
else:
# Display the ebook's TOC as a network of sections
toc_container = self.network_container
return toc_container
# Returns a behavior manager from the given parameters
# main (EBookViewer) - the ebook viewer interface
def build_behavior_manager(self, main):
# Create the main interface behavior
b1_page_behavior = self.get_page_behavior(self.b1_single_document,
main.setup_vscrollbar)
b1_toc_interface = self.get_toc_interface(
self.b1_use_hierarchy, main)
if (self.switch):
# Create the second interface behavior if specified
b2_page_behavior = self.get_page_behavior(self.b2_single_document,
main.setup_vscrollbar)
b2_toc_interface = self.get_toc_interface(
self.b2_use_hierarchy, main)
# Create a behavior manager to switch between the main and second behavior
behavior_manager = SwitchBehaviorManager(
main, b1_page_behavior,
b1_toc_interface, b2_page_behavior,
b2_toc_interface)
else:
# Disable the behavior toggle
main.action_toggle_adventurous_mode.setVisible(False)
behavior_manager = BehaviorManager(b1_page_behavior,
b1_toc_interface)
self.behavior_manager = behavior_manager
return behavior_manager
# main (EBookViewer) - the ebook viewer interface
# title (string) - the title of the ebook
# pathtoebook (string) - the path to the ebook on the user's file system
def setup_behavior_manager(self, main, title, pathtoebook):
toc = main.iterator.toc
toc_sections = None
# If there isn't a TOC, display the ebook in a single document with a
# hierarchical TOC interface at all times
if (not toc):
main.action_toggle_adventurous_mode.setEnabled(False)
behavior_manager = self.default_manager
else:
main.action_toggle_adventurous_mode.setEnabled(True)
main.set_toc_view(self.behavior_manager.toc_interface)
behavior_manager = self.behavior_manager
toc_sections = TOCSections(toc, main.iterator.spine)
total_num_pages = sum(main.iterator.pages)
behavior_manager.setup_ebook(total_num_pages, toc_sections, main.toc_model, title,
pathtoebook)
# Return the behavior manager to use if it has changed
return behavior_manager
|
gpl-3.0
| 4,841,253,666,804,379,000
| 50.495935
| 122
| 0.667193
| false
| 4.150721
| false
| false
| false
|
xianjunzhengbackup/code
|
data science/machine_learning_for_the_web/chapter_8/movie_reviews_analizer_app/webmining_server/pages/models.py
|
1
|
1215
|
from django.db import models
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
class SearchTerm(models.Model):
term = models.CharField(_('search'), max_length=255)
num_reviews = models.IntegerField(null=True,default=0)
#display term on admin panel
def __unicode__(self):
return self.term
class Page(models.Model):
searchterm = models.ForeignKey(SearchTerm, related_name='pages',null=True,blank=True)
url = models.URLField(_('url'), default='', blank=True)
title = models.CharField(_('name'), max_length=255)
depth = models.IntegerField(null=True,default=-1)
html = models.TextField(_('html'),blank=True, default='')
review = models.BooleanField(default=False)
old_rank = models.FloatField(null=True,default=0)
new_rank = models.FloatField(null=True,default=1)
content = models.TextField(_('content'),blank=True, default='')
sentiment = models.IntegerField(null=True,default=100)
class Link(models.Model):
searchterm = models.ForeignKey(SearchTerm, related_name='links',null=True,blank=True)
from_id = models.IntegerField(null=True)
to_id = models.IntegerField(null=True)
|
mit
| 3,143,095,247,785,500,000
| 42.428571
| 90
| 0.699588
| false
| 3.820755
| false
| false
| false
|
ganga-devs/ganga
|
ganga/GangaAtlas/Lib/Athena/ganga-stagein.py
|
1
|
4800
|
#! /usr/bin/env python
import os, sys
from getopt import getopt,GetoptError
from threading import Thread
from commands import getstatusoutput
from lfc import *
def usage():
print 'Name:'
print ' ganga-stagein.py'
print
print 'Arguments:'
print ' logical names'
print
print 'Options:'
print ' -h, --help this prinout'
print ' -i, --input file list of logical names'
print ' -d, --directory path to stage the input files (default $PWD)'
print ' -t, --timeout seconds for the staging in (default 900)'
print ' -r, --retry number for the staging command (default 3)'
print ' -v, --verbose verbosity'
def get_guid(lfn):
'''Get guid for a lfn
'''
statg = lfc_filestatg()
rc = lfc_statg(lfn,'',statg)
if not rc: return statg.guid
def get_replicas(lfn):
'''List replicas and sort the one on close SE first
'''
replicas = []
listp = lfc_list()
res = lfc_listreplica(lfn,'',CNS_LIST_BEGIN,listp)
while res:
if res.host in closeSEs:
replicas.insert(0,res.sfn)
else:
replicas.append(res.sfn)
res = lfc_listreplica(lfn,'',CNS_LIST_CONTINUE,listp)
lfc_listreplica(lfn,'',CNS_LIST_END,listp)
return replicas
class PoolFileCatalog:
'''Helper class to create PoolFileCatalog.xml
'''
def __init__(self,name='PoolFileCatalog.xml'):
self.pfc = open(name,'w')
print >>self.pfc,'<?xml version="1.0" ?>'
print >>self.pfc,'<POOLFILECATALOG>'
def addFile(self,guid,lfn,pfn):
print >>self.pfc,' <File ID="%s">' % guid
print >>self.pfc,' <logical>'
print >>self.pfc,' <lfn name="%s"/>' % lfn
print >>self.pfc,' </logical>'
print >>self.pfc,' <physical>'
print >>self.pfc,' <pfn filetype="ROOT_All" name="%s"/>' % pfn
print >>self.pfc,' </physical>'
print >>self.pfc,' </File>'
def close(self):
print >>self.pfc,'</POOLFILECATALOG>'
class StageIn(Thread):
def __init__(self,lfn,replicas,file):
Thread.__init__(self)
self.lfn = lfn
self.replicas = replicas
self.file = file
def run(self):
for rep in self.replicas:
for r in xrange(0,retry):
if verbose: print 'INFO LFN: %s Replica: %s Retry: %d' % (lfn,rep,r)
cmd = 'lcg-cp --vo atlas -t %d %s file:%s' % (timeout,rep,self.file)
rc, out = getstatusoutput(cmd)
if not rc: return
print 'Return code %d from %s' % (rc,cmd)
print out
if __name__ == '__main__':
directory = os.getcwd()
retry = 2
timeout = 900
input = None
verbose = False
try:
opts, args = getopt(sys.argv[1:],'ht:d:r:i:v',['help','directory=','input=','timeout=','retry=','verbose'])
except GetoptError:
usage()
sys.exit(1)
for opt, val in opts:
if opt in ['-h','--help']:
usage()
sys.exit()
if opt in ['-d','--directory']:
direcory = val
if opt in ['-i','--input']:
input = val
if opt in ['-t','--timeout']:
timeout = int(val)
if opt in ['-r','--retry']:
retry = int(val)
if opt in ['-v','--verbose']:
verbose = True
if input:
lfns = [ line.strip() for line in file(input) ]
else:
lfns = args
if not len(lfns):
print 'No files requested.'
sys.exit()
# determine the closeSEs
rc, output = getstatusoutput('edg-brokerinfo getCloseSEs')
if rc:
print 'ERROR: Could not determine close SEs'
closeSEs = []
else:
closeSEs = output.split()
print 'INFO: Close SEs are ' + ', '.join(closeSEs)
pfc = PoolFileCatalog()
workers=[]
try: lfc_startsess('','')
except NameError: pass
for lfn in lfns:
if verbose: print 'LFN: %s' % lfn
guid = get_guid(lfn)
if not guid:
print 'ERROR: LFN %s not found.' % lfn
continue
if verbose: print 'GUID: %s' % guid
name = os.path.basename(lfn)
pfn = os.path.join(directory,name)
pfc.addFile(guid,name,pfn)
replicas = get_replicas(lfn)
if not replicas:
print 'ERROR: No replica found for LFN %s' % lfn
continue
if verbose:
print 'Replicas :\n %s' % '\n '.join(replicas)
s = StageIn(lfn,replicas,pfn)
s.start()
workers.append(s)
pfc.close()
try: lfc_stopsess()
except NameError: pass
for s in workers:
s.join()
|
gpl-2.0
| -5,774,584,406,186,113,000
| 23.870466
| 115
| 0.527083
| false
| 3.521643
| false
| false
| false
|
none-da/zeshare
|
debug_toolbar/panels/headers.py
|
1
|
1292
|
from django.template.loader import render_to_string
from debug_toolbar.panels import DebugPanel
class HeaderDebugPanel(DebugPanel):
"""
A panel to display HTTP headers.
"""
name = 'Header'
has_content = True
# List of headers we want to display
header_filter = (
'CONTENT_TYPE',
'HTTP_ACCEPT',
'HTTP_ACCEPT_CHARSET',
'HTTP_ACCEPT_ENCODING',
'HTTP_ACCEPT_LANGUAGE',
'HTTP_CACHE_CONTROL',
'HTTP_CONNECTION',
'HTTP_HOST',
'HTTP_KEEP_ALIVE',
'HTTP_REFERER',
'HTTP_USER_AGENT',
'QUERY_STRING',
'REMOTE_ADDR',
'REMOTE_HOST',
'REQUEST_METHOD',
'SCRIPT_NAME',
'SERVER_NAME',
'SERVER_PORT',
'SERVER_PROTOCOL',
'SERVER_SOFTWARE',
)
def nav_title(self):
return 'HTTP Headers'
def title(self):
return 'HTTP Headers'
def url(self):
return ''
def process_request(self, request):
self.headers = dict(
[(k, request.META[k]) for k in self.header_filter if k in request.META]
)
def content(self):
context = {
'headers': self.headers
}
return render_to_string('debug_toolbar/panels/headers.html', context)
|
bsd-3-clause
| -9,078,421,765,086,998,000
| 23.865385
| 83
| 0.557276
| false
| 3.975385
| false
| false
| false
|
elkingtonmcb/bcbio-nextgen
|
bcbio/variation/coverage_experimental.py
|
1
|
7319
|
import os
import pandas as pd
import subprocess
from collections import Counter
import numpy as np
import math
import pysam
import pybedtools
from bcbio.utils import (file_exists, tmpfile, chdir, splitext_plus,
max_command_length, robust_partition_all)
from bcbio.provenance import do
from bcbio.distributed.transaction import file_transaction
from bcbio.log import logger
from bcbio.pipeline import datadict as dd
from bcbio import broad
from bcbio.pipeline import config_utils
class cov_class:
def __init__(self, size, name, sample):
self.size = int(size)
self.name = name
self.position = ""
self.sample = sample
self.cov = {'4': 0, '10': 0, '20': 0, '50': 0}
self.total = Counter()
self.raw = 0
def update(self, size):
self.size += size
def save(self, cov, pt):
self.raw += cov
self.total[cov] = pt
for cut in [4, 10, 20, 50]:
if cov > cut:
self.cov[str(cut)] += pt
def save_coverage(self, cov, nt):
if cov > 100:
cov = 100
elif cov > 10:
cov = int(math.ceil(cov / 10.0)) * 10
# self.size += size
self.total[cov] += nt
def write_coverage(self, out_file):
# names = ["region", "size", "sample", "10", "25", "50"]
df = pd.DataFrame({'depth': self.total.keys(), 'nt': self.total.values()})
df["size"] = self.size
df["sample"] = self.sample
df.to_csv(out_file, mode='a', header=False, index=False, sep="\t")
def _noise(self):
m = np.average(map(int, self.total.keys()), weights=self.total.values())
x = []
[x.extend([k] * int(float(v) * self.size)) for k, v in self.total.items()]
sd = np.std(x)
return m, sd
def write_regions(self, out_file):
m, sd = self._noise()
with open(out_file, 'a') as out_handle:
print >>out_handle, "\t".join(map(str, [self.position, self.name, self.raw,
"+", self.size, self.sample, m, sd] + self.cov.values()))
def _get_exome_coverage_stats(fn, sample, out_file, total_cov):
tmp_region = ""
stats = ""
with open(fn) as in_handle:
for line in in_handle:
if line.startswith("all"):
continue
cols = line.strip().split()
cur_region = "_".join(cols[0:3]) if not isinstance(cols[3], str) else "_".join(cols[0:4])
if cur_region != tmp_region:
if tmp_region != "":
stats.write_regions(out_file)
stats = cov_class(cols[-2], cur_region, sample)
stats.position = "\t".join(cols[0:3])
stats.save(int(cols[-4]), float(cols[-1]))
total_cov.save_coverage(int(cols[-4]), int(cols[-3]))
tmp_region = cur_region
total_cov.update(int(cols[-2]))
stats.write_regions(out_file)
return total_cov
def _silence_run(cmd):
do._do_run(cmd, False)
def coverage(data):
AVERAGE_REGION_STRING_LENGTH = 100
bed_file = dd.get_coverage_experimental(data)
if not bed_file:
return data
work_dir = os.path.join(dd.get_work_dir(data), "report", "coverage")
batch_size = max_command_length() / AVERAGE_REGION_STRING_LENGTH
with chdir(work_dir):
in_bam = data['work_bam']
sample = dd.get_sample_name(data)
logger.debug("doing coverage for %s" % sample)
region_bed = pybedtools.BedTool(bed_file)
parse_file = os.path.join(sample + "_coverage.bed")
parse_total_file = os.path.join(sample + "_cov_total.tsv")
if not file_exists(parse_file):
total_cov = cov_class(0, None, sample)
with file_transaction(parse_file) as out_tx:
with open(out_tx, 'w') as out_handle:
HEADER = ["#chrom", "start", "end", "region", "reads",
"strand", "size", "sample", "mean", "sd", "cutoff10",
"cutoff20", "cutoff4", "cutoff50"]
out_handle.write("\t".join(HEADER) + "\n")
with tmpfile() as tx_tmp_file:
lcount = 0
for chunk in robust_partition_all(batch_size, region_bed):
coord_batch = []
line_batch = ""
for line in chunk:
lcount += 1
chrom = line.chrom
start = max(line.start, 0)
end = line.end
coords = "%s:%s-%s" % (chrom, start, end)
coord_batch.append(coords)
line_batch += str(line)
if not coord_batch:
continue
region_file = pybedtools.BedTool(line_batch,
from_string=True).saveas().fn
coord_string = " ".join(coord_batch)
cmd = ("samtools view -b {in_bam} {coord_string} | "
"bedtools coverage -a {region_file} -b - "
"-hist > {tx_tmp_file}")
_silence_run(cmd.format(**locals()))
total_cov = _get_exome_coverage_stats(os.path.abspath(tx_tmp_file), sample, out_tx, total_cov)
logger.debug("Processed %d regions." % lcount)
total_cov.write_coverage(parse_total_file)
data['coverage'] = os.path.abspath(parse_file)
return data
def variants(data):
if not "vrn_file" in data:
return data
in_vcf = data['vrn_file']
work_dir = os.path.join(dd.get_work_dir(data), "report", "variants")
with chdir(work_dir):
in_bam = data['work_bam']
ref_file = dd.get_ref_file(data)
assert ref_file, "Need the reference genome fasta file."
jvm_opts = broad.get_gatk_framework_opts(data['config'])
gatk_jar = config_utils.get_program("gatk", data['config'], "dir")
bed_file = dd.get_variant_regions(data)
sample = dd.get_sample_name(data)
in_bam = data["work_bam"]
cg_file = os.path.join(sample + "_with-gc.vcf.gz")
parse_file = os.path.join(sample + "_gc-depth-parse.tsv")
if not file_exists(cg_file):
with file_transaction(cg_file) as tx_out:
cmd = ("java -jar {gatk_jar}/GenomeAnalysisTK.jar -T VariantAnnotator -R {ref_file} "
"-L {bed_file} -I {in_bam} "
"-A GCContent --variant {in_vcf} --out {tx_out}")
do.run(cmd.format(**locals()), " GC bias for %s" % in_vcf)
if not file_exists(parse_file):
with file_transaction(parse_file) as out_tx:
with open(out_tx, 'w') as out_handle:
print >>out_handle, "CG\tdepth\tsample"
cmd = ("bcftools query -f '[%GC][\\t%DP][\\t%SAMPLE]\\n' -R {bed_file} {cg_file} >> {out_tx}")
do.run(cmd.format(**locals()), " query for %s" % in_vcf)
logger.debug('parsing coverage: %s' % sample)
# return df
return data
|
mit
| 2,294,521,246,895,444,000
| 40.350282
| 118
| 0.515781
| false
| 3.603644
| false
| false
| false
|
tgquintela/TimeSeriesTools
|
TimeSeriesTools/TS_statistics/probabilitytools.py
|
1
|
2552
|
"""
This module contains functions related with probability and complements the
usual numpy or scipy tools.
"""
import numpy as np
def compute_conditional_probs(probs, marginal_vars):
"""Function which computes the conditional probability from the joint
probability. We have to inform about the dependant variables.
Parameters
----------
probs: array_like
multidimensional array which all the possible combinations of states of
all the variables and the probability of being in each of thie
combinations.
marginal_vars: list or array_like of int
the index of which variables we want to compute the marginal variables.
Returns
-------
p_y_x: array_like
the conditional probability.
"""
## Preparing needed variables
n_vars = len(probs.shape)
dependants = [i for i in range(n_vars) if i not in marginal_vars]
dependants = np.sort(dependants)[::-1]
marginal_vars = np.sort(marginal_vars)[::-1]
n_np = dependants.shape[0]
## Computing dependendants
p_x = compute_marginal_probs(probs, marginal_vars)
## Compute conditioned prob
# Compute swap
swp = np.array([[dependants[i], -i-1] for i in range(n_np)])
# Swap axis
for i in range(swp.shape[0]):
probs = np.swapaxes(probs, swp[i, 0], swp[i, 1])
# Division
p_y_x = np.divide(probs, p_x)
# Reswap axis
for i in range(swp.shape[0]):
p_y_x = np.swapaxes(p_y_x, swp[i, 1], swp[i, 0])
for i in range(swp.shape[0]):
probs = np.swapaxes(probs, swp[i, 1], swp[i, 0])
return p_y_x
def compute_marginal_probs(probs, marginal_vars):
"""Function which computes marginal probabilities given the variables we
want to marginalize.
Parameters
----------
probs: array_like
the joint probability distribution.
marginal_vars: list or array of int
the indexes of the variables to marginalize.
Returns
-------
p_x: array_like
the marginal probability distribution.
"""
## Formatting inputs
# Formatting marginal variables
marginal_vars = np.sort(marginal_vars)[::-1]
## Marginalizing
p_x = probs[:]
for d in marginal_vars:
nstates = p_x.shape[d]
p_x = np.tensordot(np.ones(nstates), p_x, axes=np.array([0, d]))
return p_x
def compute_division_probs(probs1, probs2, correspondences):
"""
"""
return probs
def compute_product_sum():
"""
TODO: Not to sum nan or inf values.
"""
return measure
|
mit
| 2,569,273,240,266,878,500
| 25.309278
| 79
| 0.635188
| false
| 3.559275
| false
| false
| false
|
sherzberg/python-beanstalk-api
|
beanstalk/api/base.py
|
1
|
1554
|
import requests
import json
class IncorrectSetupException(Exception):
pass
class BeanstalkAuth(object):
_instance = None
def __new__(cls, domain, username, password):
if not cls._instance:
cls._instance = object.__new__(cls)
return cls._instance
def __init__(self, domain, username, password):
self.domain = domain
self.username = username
self.password = password
self.api_url = 'https://{0}.beanstalkapp.com/api/'.format(self.domain)
@staticmethod
def get_instance():
if BeanstalkAuth._instance:
return BeanstalkAuth._instance
else:
raise IncorrectSetupException("You need to run beanstalk.setup first!")
class Base():
def _do_request(self, url, method, data):
auth = BeanstalkAuth.get_instance()
request_url = auth.api_url + url
r = getattr(requests, method)(request_url,
data=json.dumps(data),
auth=(auth.username, auth.password),
headers={'content-type': 'application/json'})
r.raise_for_status()
return r.json()
def _do_get(self, url):
return self._do_request(url, 'get', None)
def _do_post(self, url, data):
return self._do_request(url, 'post', data)
def _do_put(self, url, data):
return self._do_request(url, 'put', data)
def _do_delete(self, url, data):
return self._do_request(url, 'delete', data)
|
gpl-3.0
| 2,693,403,016,679,041,000
| 27.254545
| 83
| 0.570785
| false
| 3.944162
| false
| false
| false
|
mehdisadeghi/saga-python
|
src/saga/adaptors/pbspro/pbsprojob.py
|
1
|
47635
|
__author__ = "Andre Merzky, Ole Weidner, Mark Santcroos"
__copyright__ = "Copyright 2012-2015, The SAGA Project"
__license__ = "MIT"
""" PBSPro job adaptor implementation
"""
import threading
import saga.url as surl
import saga.utils.pty_shell as sups
import saga.adaptors.base
import saga.adaptors.cpi.job
from saga.job.constants import *
import re
import os
import time
import threading
from cgi import parse_qs
SYNC_CALL = saga.adaptors.cpi.decorators.SYNC_CALL
ASYNC_CALL = saga.adaptors.cpi.decorators.ASYNC_CALL
SYNC_WAIT_UPDATE_INTERVAL = 1 # seconds
MONITOR_UPDATE_INTERVAL = 60 # seconds
# --------------------------------------------------------------------
#
class _job_state_monitor(threading.Thread):
""" thread that periodically monitors job states
"""
def __init__(self, job_service):
self.logger = job_service._logger
self.js = job_service
self._stop = threading.Event()
super(_job_state_monitor, self).__init__()
self.setDaemon(True)
def stop(self):
self._stop.set()
def run(self):
# we stop the monitoring thread when we see the same error 3 times in
# a row...
error_type_count = dict()
while not self._stop.is_set ():
try:
# FIXME: do bulk updates here! we don't want to pull information
# job by job. that would be too inefficient!
jobs = self.js.jobs
for job_id in jobs.keys() :
job_info = jobs[job_id]
# we only need to monitor jobs that are not in a
# terminal state, so we can skip the ones that are
# either done, failed or canceled
if job_info['state'] not in [saga.job.DONE, saga.job.FAILED, saga.job.CANCELED] :
new_job_info = self.js._job_get_info(job_id, reconnect=False)
self.logger.info ("Job monitoring thread updating Job %s (state: %s)" \
% (job_id, new_job_info['state']))
# fire job state callback if 'state' has changed
if new_job_info['state'] != job_info['state']:
job_obj = job_info['obj']
job_obj._attributes_i_set('state', new_job_info['state'], job_obj._UP, True)
# update job info
jobs[job_id] = new_job_info
except Exception as e:
import traceback
traceback.print_exc ()
self.logger.warning("Exception caught in job monitoring thread: %s" % e)
# check if we see the same error again and again
error_type = str(e)
if error_type not in error_type_count :
error_type_count = dict()
error_type_count[error_type] = 1
else :
error_type_count[error_type] += 1
if error_type_count[error_type] >= 3 :
self.logger.error("too many monitoring errors -- stopping job monitoring thread")
return
finally :
time.sleep (MONITOR_UPDATE_INTERVAL)
# --------------------------------------------------------------------
#
def log_error_and_raise(message, exception, logger):
""" logs an 'error' message and subsequently throws an exception
"""
logger.error(message)
raise exception(message)
# --------------------------------------------------------------------
#
def _pbs_to_saga_jobstate(pbsjs):
""" translates a pbs one-letter state to saga
"""
if pbsjs == 'C': # Torque "Job is completed after having run."
return saga.job.DONE
elif pbsjs == 'F': # PBS Pro "Job is finished."
return saga.job.DONE
elif pbsjs == 'H': # PBS Pro and TORQUE "Job is held."
return saga.job.PENDING
elif pbsjs == 'Q': # PBS Pro and TORQUE "Job is queued(, eligible to run or routed.)
return saga.job.PENDING
elif pbsjs == 'S': # PBS Pro and TORQUE "Job is suspended."
return saga.job.PENDING
elif pbsjs == 'W': # PBS Pro and TORQUE "Job is waiting for its execution time to be reached."
return saga.job.PENDING
elif pbsjs == 'R': # PBS Pro and TORQUE "Job is running."
return saga.job.RUNNING
elif pbsjs == 'E': # PBS Pro and TORQUE "Job is exiting after having run"
return saga.job.RUNNING
elif pbsjs == 'T': # PBS Pro and TORQUE "Job is being moved to new location."
# TODO: PENDING?
return saga.job.RUNNING
elif pbsjs == 'X': # PBS Pro "Subjob has completed execution or has been deleted."
return saga.job.CANCELED
else:
return saga.job.UNKNOWN
# --------------------------------------------------------------------
#
def _pbscript_generator(url, logger, jd, ppn, gres, pbs_version, is_cray=False, queue=None, ):
""" generates a PBS Pro script from a SAGA job description
"""
pbs_params = str()
exec_n_args = str()
exec_n_args += 'export SAGA_PPN=%d\n' % ppn
if jd.executable:
exec_n_args += "%s " % (jd.executable)
if jd.arguments:
for arg in jd.arguments:
exec_n_args += "%s " % (arg)
if jd.name:
pbs_params += "#PBS -N %s \n" % jd.name
if (is_cray is "") or not('Version: 4.2.7' in pbs_version):
# qsub on Cray systems complains about the -V option:
# Warning:
# Your job uses the -V option, which requests that all of your
# current shell environment settings (9913 bytes) be exported to
# it. This is not recommended, as it causes problems for the
# batch environment in some cases.
pbs_params += "#PBS -V \n"
if jd.environment:
pbs_params += "#PBS -v %s\n" % \
','.join (["%s=%s" % (k,v)
for k,v in jd.environment.iteritems()])
# apparently this doesn't work with older PBS installations
# if jd.working_directory:
# pbs_params += "#PBS -d %s \n" % jd.working_directory
# a workaround is to do an explicit 'cd'
if jd.working_directory:
workdir_directives = 'export PBS_O_WORKDIR=%s \n' % jd.working_directory
workdir_directives += 'mkdir -p %s\n' % jd.working_directory
workdir_directives += 'cd %s\n' % jd.working_directory
else:
workdir_directives = ''
if jd.output:
# if working directory is set, we want stdout to end up in
# the working directory as well, unless it containes a specific
# path name.
if jd.working_directory:
if os.path.isabs(jd.output):
pbs_params += "#PBS -o %s \n" % jd.output
else:
# user provided a relative path for STDOUT. in this case
# we prepend the workind directory path before passing
# it on to PBS
pbs_params += "#PBS -o %s/%s \n" % (jd.working_directory, jd.output)
else:
pbs_params += "#PBS -o %s \n" % jd.output
if jd.error:
# if working directory is set, we want stderr to end up in
# the working directory as well, unless it contains a specific
# path name.
if jd.working_directory:
if os.path.isabs(jd.error):
pbs_params += "#PBS -e %s \n" % jd.error
else:
# user provided a realtive path for STDERR. in this case
# we prepend the workind directory path before passing
# it on to PBS
pbs_params += "#PBS -e %s/%s \n" % (jd.working_directory, jd.error)
else:
pbs_params += "#PBS -e %s \n" % jd.error
if jd.wall_time_limit:
hours = jd.wall_time_limit / 60
minutes = jd.wall_time_limit % 60
pbs_params += "#PBS -l walltime=%s:%s:00 \n" \
% (str(hours), str(minutes))
if jd.queue and queue:
pbs_params += "#PBS -q %s \n" % queue
elif jd.queue and not queue:
pbs_params += "#PBS -q %s \n" % jd.queue
elif queue and not jd.queue:
pbs_params += "#PBS -q %s \n" % queue
if jd.project:
if 'PBSPro_1' in pbs_version:
# On PBS Pro we set both -P(roject) and -A(accounting),
# as we don't know what the admins decided, and just
# pray that this doesn't create problems.
pbs_params += "#PBS -P %s \n" % str(jd.project)
pbs_params += "#PBS -A %s \n" % str(jd.project)
else:
# Torque
pbs_params += "#PBS -A %s \n" % str(jd.project)
if jd.job_contact:
pbs_params += "#PBS -m abe \n"
# if total_cpu_count is not defined, we assume 1
if not jd.total_cpu_count:
jd.total_cpu_count = 1
# Request enough nodes to cater for the number of cores requested
nnodes = jd.total_cpu_count / ppn
if jd.total_cpu_count % ppn > 0:
nnodes += 1
# We use the ncpus value for systems that need to specify ncpus as multiple of PPN
ncpus = nnodes * ppn
# Node properties are appended to the nodes argument in the resource_list.
node_properties = []
# Parse candidate_hosts
#
# Currently only implemented for "bigflash" on Gordon@SDSC
# https://github.com/radical-cybertools/saga-python/issues/406
#
if jd.candidate_hosts:
if 'BIG_FLASH' in jd.candidate_hosts:
node_properties.append('bigflash')
else:
raise saga.NotImplemented("This type of 'candidate_hosts' not implemented: '%s'" % jd.candidate_hosts)
if is_cray is not "":
# Special cases for PBS/TORQUE on Cray. Different PBSes,
# different flags. A complete nightmare...
if 'PBSPro_10' in pbs_version:
logger.info("Using Cray XT (e.g. Hopper) specific '#PBS -l mppwidth=xx' flags (PBSPro_10).")
pbs_params += "#PBS -l mppwidth=%s \n" % jd.total_cpu_count
elif 'PBSPro_12' in pbs_version:
logger.info("Using Cray XT (e.g. Archer) specific '#PBS -l select=xx' flags (PBSPro_12).")
pbs_params += "#PBS -l select=%d\n" % nnodes
elif '4.2.6' in pbs_version:
logger.info("Using Titan (Cray XP) specific '#PBS -l nodes=xx'")
pbs_params += "#PBS -l nodes=%d\n" % nnodes
elif '4.2.7' in pbs_version:
logger.info("Using Cray XT @ NERSC (e.g. Edison) specific '#PBS -l mppwidth=xx' flags (PBSPro_10).")
pbs_params += "#PBS -l mppwidth=%s \n" % jd.total_cpu_count
elif 'Version: 5.' in pbs_version:
logger.info("Using TORQUE 5.x notation '#PBS -l procs=XX' ")
pbs_params += "#PBS -l procs=%d\n" % jd.total_cpu_count
else:
logger.info("Using Cray XT (e.g. Kraken, Jaguar) specific '#PBS -l size=xx' flags (TORQUE).")
pbs_params += "#PBS -l size=%s\n" % jd.total_cpu_count
elif 'version: 2.3.13' in pbs_version:
# e.g. Blacklight
# TODO: The more we add, the more it screams for a refactoring
pbs_params += "#PBS -l ncpus=%d\n" % ncpus
elif '4.2.7' in pbs_version:
logger.info("Using Cray XT @ NERSC (e.g. Hopper) specific '#PBS -l mppwidth=xx' flags (PBSPro_10).")
pbs_params += "#PBS -l mppwidth=%s \n" % jd.total_cpu_count
elif 'PBSPro_12' in pbs_version:
logger.info("Using PBSPro 12 notation '#PBS -l select=XX' ")
pbs_params += "#PBS -l select=%d\n" % (nnodes)
else:
# Default case, i.e, standard HPC cluster (non-Cray)
# If we want just a slice of one node
if jd.total_cpu_count < ppn:
ppn = jd.total_cpu_count
pbs_params += "#PBS -l nodes=%d:ppn=%d%s\n" % (
nnodes, ppn, ''.join([':%s' % prop for prop in node_properties]))
# Process Generic Resource specification request
if gres:
pbs_params += "#PBS -l gres=%s\n" % gres
# escape all double quotes and dollarsigns, otherwise 'echo |'
# further down won't work
# only escape '$' in args and exe. not in the params
exec_n_args = workdir_directives + exec_n_args
exec_n_args = exec_n_args.replace('$', '\\$')
pbscript = "\n#!/bin/bash \n%s%s" % (pbs_params, exec_n_args)
pbscript = pbscript.replace('"', '\\"')
return pbscript
# --------------------------------------------------------------------
# some private defs
#
_PTY_TIMEOUT = 2.0
# --------------------------------------------------------------------
# the adaptor name
#
_ADAPTOR_NAME = "saga.adaptor.pbsprojob"
_ADAPTOR_SCHEMAS = ["pbspro", "pbspro+ssh", "pbspro+gsissh"]
_ADAPTOR_OPTIONS = []
# --------------------------------------------------------------------
# the adaptor capabilities & supported attributes
#
_ADAPTOR_CAPABILITIES = {
"jdes_attributes": [saga.job.NAME,
saga.job.EXECUTABLE,
saga.job.ARGUMENTS,
saga.job.CANDIDATE_HOSTS,
saga.job.ENVIRONMENT,
saga.job.INPUT,
saga.job.OUTPUT,
saga.job.ERROR,
saga.job.QUEUE,
saga.job.PROJECT,
saga.job.WALL_TIME_LIMIT,
saga.job.WORKING_DIRECTORY,
saga.job.WALL_TIME_LIMIT,
saga.job.SPMD_VARIATION, # TODO: 'hot'-fix for BigJob
saga.job.PROCESSES_PER_HOST,
saga.job.TOTAL_CPU_COUNT],
"job_attributes": [saga.job.EXIT_CODE,
saga.job.EXECUTION_HOSTS,
saga.job.CREATED,
saga.job.STARTED,
saga.job.FINISHED],
"metrics": [saga.job.STATE],
"callbacks": [saga.job.STATE],
"contexts": {"ssh": "SSH public/private keypair",
"x509": "GSISSH X509 proxy context",
"userpass": "username/password pair (ssh)"}
}
# --------------------------------------------------------------------
# the adaptor documentation
#
_ADAPTOR_DOC = {
"name": _ADAPTOR_NAME,
"cfg_options": _ADAPTOR_OPTIONS,
"capabilities": _ADAPTOR_CAPABILITIES,
"description": """
The PBSPro adaptor allows to run and manage jobs on `PBS <http://www.pbsworks.com/>`_
controlled HPC clusters.
""",
"example": "examples/jobs/pbsjob.py",
"schemas": {"pbspro": "connect to a local cluster",
"pbspro+ssh": "connect to a remote cluster via SSH",
"pbspro+gsissh": "connect to a remote cluster via GSISSH"}
}
# --------------------------------------------------------------------
# the adaptor info is used to register the adaptor with SAGA
#
_ADAPTOR_INFO = {
"name" : _ADAPTOR_NAME,
"version" : "v0.1",
"schemas" : _ADAPTOR_SCHEMAS,
"capabilities": _ADAPTOR_CAPABILITIES,
"cpis": [
{
"type": "saga.job.Service",
"class": "PBSProJobService"
},
{
"type": "saga.job.Job",
"class": "PBSProJob"
}
]
}
###############################################################################
# The adaptor class
class Adaptor (saga.adaptors.base.Base):
""" this is the actual adaptor class, which gets loaded by SAGA (i.e. by
the SAGA engine), and which registers the CPI implementation classes
which provide the adaptor's functionality.
"""
# ----------------------------------------------------------------
#
def __init__(self):
saga.adaptors.base.Base.__init__(self, _ADAPTOR_INFO, _ADAPTOR_OPTIONS)
self.id_re = re.compile('^\[(.*)\]-\[(.*?)\]$')
self.opts = self.get_config (_ADAPTOR_NAME)
# ----------------------------------------------------------------
#
def sanity_check(self):
# FIXME: also check for gsissh
pass
# ----------------------------------------------------------------
#
def parse_id(self, id):
# split the id '[rm]-[pid]' in its parts, and return them.
match = self.id_re.match(id)
if not match or len(match.groups()) != 2:
raise saga.BadParameter("Cannot parse job id '%s'" % id)
return (match.group(1), match.group(2))
###############################################################################
#
class PBSProJobService (saga.adaptors.cpi.job.Service):
""" implements saga.adaptors.cpi.job.Service
"""
# ----------------------------------------------------------------
#
def __init__(self, api, adaptor):
self._mt = None
_cpi_base = super(PBSProJobService, self)
_cpi_base.__init__(api, adaptor)
self._adaptor = adaptor
# ----------------------------------------------------------------
#
def __del__(self):
self.close()
# ----------------------------------------------------------------
#
def close(self):
if self.mt :
self.mt.stop()
self.mt.join(10) # don't block forever on join()
self._logger.info("Job monitoring thread stopped.")
self.finalize(True)
# ----------------------------------------------------------------
#
def finalize(self, kill_shell=False):
if kill_shell :
if self.shell :
self.shell.finalize (True)
# ----------------------------------------------------------------
#
@SYNC_CALL
def init_instance(self, adaptor_state, rm_url, session):
""" service instance constructor
"""
self.rm = rm_url
self.session = session
self.ppn = None
self.is_cray = ""
self.queue = None
self.shell = None
self.jobs = dict()
self.gres = None
# the monitoring thread - one per service instance
self.mt = _job_state_monitor(job_service=self)
self.mt.start()
rm_scheme = rm_url.scheme
pty_url = surl.Url(rm_url)
# this adaptor supports options that can be passed via the
# 'query' component of the job service URL.
if rm_url.query:
for key, val in parse_qs(rm_url.query).iteritems():
if key == 'queue':
self.queue = val[0]
elif key == 'craytype':
self.is_cray = val[0]
elif key == 'ppn':
self.ppn = int(val[0])
elif key == 'gres':
self.gres = val[0]
# we need to extract the scheme for PTYShell. That's basically the
# job.Service Url without the pbs+ part. We use the PTYShell to execute
# pbs commands either locally or via gsissh or ssh.
if rm_scheme == "pbspro":
pty_url.scheme = "fork"
elif rm_scheme == "pbspro+ssh":
pty_url.scheme = "ssh"
elif rm_scheme == "pbspro+gsissh":
pty_url.scheme = "gsissh"
# these are the commands that we need in order to interact with PBS.
# the adaptor will try to find them during initialize(self) and bail
# out in case they are note available.
self._commands = {'pbsnodes': None,
'qstat': None,
'qsub': None,
'qdel': None}
self.shell = sups.PTYShell(pty_url, self.session)
# self.shell.set_initialize_hook(self.initialize)
# self.shell.set_finalize_hook(self.finalize)
self.initialize()
return self.get_api()
# ----------------------------------------------------------------
#
def initialize(self):
# check if all required pbs tools are available
for cmd in self._commands.keys():
ret, out, _ = self.shell.run_sync("which %s " % cmd)
if ret != 0:
message = "Error finding PBS tools: %s" % out
log_error_and_raise(message, saga.NoSuccess, self._logger)
else:
path = out.strip() # strip removes newline
if cmd == 'qdel': # qdel doesn't support --version!
self._commands[cmd] = {"path": path,
"version": "?"}
elif cmd == 'qsub': # qsub doesn't always support --version!
self._commands[cmd] = {"path": path,
"version": "?"}
else:
ret, out, _ = self.shell.run_sync("%s --version" % cmd)
if ret != 0:
message = "Error finding PBS tools: %s" % out
log_error_and_raise(message, saga.NoSuccess,
self._logger)
else:
# version is reported as: "version: x.y.z"
version = out#.strip().split()[1]
# add path and version to the command dictionary
self._commands[cmd] = {"path": path,
"version": version}
self._logger.info("Found PBS tools: %s" % self._commands)
#
# TODO: Get rid of this, as I dont think there is any justification that Cray's are special
#
# let's try to figure out if we're working on a Cray machine.
# naively, we assume that if we can find the 'aprun' command in the
# path that we're logged in to a Cray machine.
if self.is_cray == "":
ret, out, _ = self.shell.run_sync('which aprun')
if ret != 0:
self.is_cray = ""
else:
self._logger.info("Host '%s' seems to be a Cray machine." \
% self.rm.host)
self.is_cray = "unknowncray"
else:
self._logger.info("Assuming host is a Cray since 'craytype' is set to: %s" % self.is_cray)
#
# Get number of processes per node
#
if self.ppn:
self._logger.debug("Using user specified 'ppn': %d" % self.ppn)
return
# TODO: this is quite a hack. however, it *seems* to work quite
# well in practice.
if 'PBSPro_12' in self._commands['qstat']['version']:
ret, out, _ = self.shell.run_sync('unset GREP_OPTIONS; %s -a | grep -E "resources_available.ncpus"' % \
self._commands['pbsnodes']['path'])
else:
ret, out, _ = self.shell.run_sync('unset GREP_OPTIONS; %s -a | grep -E "(np|pcpu)[[:blank:]]*=" ' % \
self._commands['pbsnodes']['path'])
if ret != 0:
message = "Error running pbsnodes: %s" % out
log_error_and_raise(message, saga.NoSuccess, self._logger)
else:
# this is black magic. we just assume that the highest occurrence
# of a specific np is the number of processors (cores) per compute
# node. this equals max "PPN" for job scripts
ppn_list = dict()
for line in out.split('\n'):
np = line.split(' = ')
if len(np) == 2:
np_str = np[1].strip()
if np_str == '<various>':
continue
else:
np = int(np_str)
if np in ppn_list:
ppn_list[np] += 1
else:
ppn_list[np] = 1
self.ppn = max(ppn_list, key=ppn_list.get)
self._logger.debug("Found the following 'ppn' configurations: %s. "
"Using %s as default ppn." % (ppn_list, self.ppn))
# ----------------------------------------------------------------
#
def _job_run(self, job_obj):
""" runs a job via qsub
"""
# get the job description
jd = job_obj.get_description()
# normalize working directory path
if jd.working_directory :
jd.working_directory = os.path.normpath (jd.working_directory)
# TODO: Why would one want this?
if self.queue and jd.queue:
self._logger.warning("Job service was instantiated explicitly with \
'queue=%s', but job description tries to a different queue: '%s'. Using '%s'." %
(self.queue, jd.queue, self.queue))
try:
# create a PBS job script from SAGA job description
script = _pbscript_generator(url=self.rm, logger=self._logger,
jd=jd, ppn=self.ppn, gres=self.gres,
pbs_version=self._commands['qstat']['version'],
is_cray=self.is_cray, queue=self.queue,
)
self._logger.info("Generated PBS script: %s" % script)
except Exception, ex:
log_error_and_raise(str(ex), saga.BadParameter, self._logger)
# try to create the working directory (if defined)
# WARNING: this assumes a shared filesystem between login node and
# compute nodes.
if jd.working_directory:
self._logger.info("Creating working directory %s" % jd.working_directory)
ret, out, _ = self.shell.run_sync("mkdir -p %s" % (jd.working_directory))
if ret != 0:
# something went wrong
message = "Couldn't create working directory - %s" % (out)
log_error_and_raise(message, saga.NoSuccess, self._logger)
# Now we want to execute the script. This process consists of two steps:
# (1) we create a temporary file with 'mktemp' and write the contents of
# the generated PBS script into it
# (2) we call 'qsub <tmpfile>' to submit the script to the queueing system
cmdline = """SCRIPTFILE=`mktemp -t SAGA-Python-PBSProJobScript.XXXXXX` && echo "%s" > $SCRIPTFILE && %s $SCRIPTFILE && rm -f $SCRIPTFILE""" % (script, self._commands['qsub']['path'])
ret, out, _ = self.shell.run_sync(cmdline)
if ret != 0:
# something went wrong
message = "Error running job via 'qsub': %s. Commandline was: %s" \
% (out, cmdline)
log_error_and_raise(message, saga.NoSuccess, self._logger)
else:
# parse the job id. qsub usually returns just the job id, but
# sometimes there are a couple of lines of warnings before.
# if that's the case, we log those as 'warnings'
lines = out.split('\n')
lines = filter(lambda lines: lines != '', lines) # remove empty
if len(lines) > 1:
self._logger.warning('qsub: %s' % ''.join(lines[:-2]))
# we asssume job id is in the last line
#print cmdline
#print out
job_id = "[%s]-[%s]" % (self.rm, lines[-1].strip().split('.')[0])
self._logger.info("Submitted PBS job with id: %s" % job_id)
state = saga.job.PENDING
# populate job info dict
self.jobs[job_id] = {'obj' : job_obj,
'job_id' : job_id,
'state' : state,
'exec_hosts' : None,
'returncode' : None,
'create_time' : None,
'start_time' : None,
'end_time' : None,
'gone' : False
}
self._logger.info ("assign job id %s / %s / %s to watch list (%s)" \
% (None, job_id, job_obj, self.jobs.keys()))
# set status to 'pending' and manually trigger callback
job_obj._attributes_i_set('state', state, job_obj._UP, True)
# return the job id
return job_id
# ----------------------------------------------------------------
#
def _retrieve_job(self, job_id):
""" see if we can get some info about a job that we don't
know anything about
"""
# rm, pid = self._adaptor.parse_id(job_id)
# # run the PBS 'qstat' command to get some infos about our job
# if 'PBSPro_1' in self._commands['qstat']['version']:
# qstat_flag = '-f'
# else:
# qstat_flag ='-f1'
#
# ret, out, _ = self.shell.run_sync("unset GREP_OPTIONS; %s %s %s | "\
# "grep -E -i '(job_state)|(exec_host)|(exit_status)|(ctime)|"\
# "(start_time)|(comp_time)|(stime)|(qtime)|(mtime)'" \
# % (self._commands['qstat']['path'], qstat_flag, pid))
# if ret != 0:
# message = "Couldn't reconnect to job '%s': %s" % (job_id, out)
# log_error_and_raise(message, saga.NoSuccess, self._logger)
# else:
# # the job seems to exist on the backend. let's gather some data
# job_info = {
# 'job_id': job_id,
# 'state': saga.job.UNKNOWN,
# 'exec_hosts': None,
# 'returncode': None,
# 'create_time': None,
# 'start_time': None,
# 'end_time': None,
# 'gone': False
# }
#
# job_info = self._parse_qstat(out, job_info)
#
# return job_info
# ----------------------------------------------------------------
#
def _job_get_info(self, job_id, reconnect):
""" Get job information attributes via qstat.
"""
# If we don't have the job in our dictionary, we don't want it,
# unless we are trying to reconnect.
if not reconnect and job_id not in self.jobs:
message = "Unknown job id: %s. Can't update state." % job_id
log_error_and_raise(message, saga.NoSuccess, self._logger)
if not reconnect:
# job_info contains the info collect when _job_get_info
# was called the last time
job_info = self.jobs[job_id]
# if the 'gone' flag is set, there's no need to query the job
# state again. it's gone forever
if job_info['gone'] is True:
return job_info
else:
# Create a template data structure
job_info = {
'job_id': job_id,
'state': saga.job.UNKNOWN,
'exec_hosts': None,
'returncode': None,
'create_time': None,
'start_time': None,
'end_time': None,
'gone': False
}
rm, pid = self._adaptor.parse_id(job_id)
# run the PBS 'qstat' command to get some infos about our job
# TODO: create a PBSPRO/TORQUE flag once
if 'PBSPro_1' in self._commands['qstat']['version']:
qstat_flag = '-fx'
else:
qstat_flag ='-f1'
ret, out, _ = self.shell.run_sync("unset GREP_OPTIONS; %s %s %s | "
"grep -E -i '(job_state)|(exec_host)|(exit_status)|"
"(ctime)|(start_time)|(stime)|(mtime)'"
% (self._commands['qstat']['path'], qstat_flag, pid))
if ret != 0:
if reconnect:
message = "Couldn't reconnect to job '%s': %s" % (job_id, out)
log_error_and_raise(message, saga.NoSuccess, self._logger)
if ("Unknown Job Id" in out):
# Let's see if the last known job state was running or pending. in
# that case, the job is gone now, which can either mean DONE,
# or FAILED. the only thing we can do is set it to 'DONE'
job_info['gone'] = True
# TODO: we can also set the end time?
self._logger.warning("Previously running job has disappeared. "
"This probably means that the backend doesn't store "
"information about finished jobs. Setting state to 'DONE'.")
if job_info['state'] in [saga.job.RUNNING, saga.job.PENDING]:
job_info['state'] = saga.job.DONE
else:
# TODO: This is an uneducated guess?
job_info['state'] = saga.job.FAILED
else:
# something went wrong
message = "Error retrieving job info via 'qstat': %s" % out
log_error_and_raise(message, saga.NoSuccess, self._logger)
else:
# The job seems to exist on the backend. let's process some data.
# TODO: make the parsing "contextual", in the sense that it takes
# the state into account.
# parse the egrep result. this should look something like this:
# job_state = C
# exec_host = i72/0
# exit_status = 0
results = out.split('\n')
for line in results:
if len(line.split('=')) == 2:
key, val = line.split('=')
key = key.strip()
val = val.strip()
# The ubiquitous job state
if key in ['job_state']: # PBS Pro and TORQUE
job_info['state'] = _pbs_to_saga_jobstate(val)
# Hosts where the job ran
elif key in ['exec_host']: # PBS Pro and TORQUE
job_info['exec_hosts'] = val.split('+') # format i73/7+i73/6+...
# Exit code of the job
elif key in ['exit_status', # TORQUE
'Exit_status' # PBS Pro
]:
job_info['returncode'] = int(val)
# Time job got created in the queue
elif key in ['ctime']: # PBS Pro and TORQUE
job_info['create_time'] = val
# Time job started to run
elif key in ['start_time', # TORQUE
'stime' # PBS Pro
]:
job_info['start_time'] = val
# Time job ended.
#
# PBS Pro doesn't have an "end time" field.
# It has an "resources_used.walltime" though,
# which could be added up to the start time.
# We will not do that arithmetic now though.
#
# Alternatively, we can use mtime, as the latest
# modification time will generally also be the end time.
#
# TORQUE has an "comp_time" (completion? time) field,
# that is generally the same as mtime at the finish.
#
# For the time being we will use mtime as end time for
# both TORQUE and PBS Pro.
#
if key in ['mtime']: # PBS Pro and TORQUE
job_info['end_time'] = val
# return the updated job info
return job_info
def _parse_qstat(self, haystack, job_info):
# return the new job info dict
return job_info
# ----------------------------------------------------------------
#
def _job_get_state(self, job_id):
""" get the job's state
"""
return self.jobs[job_id]['state']
# ----------------------------------------------------------------
#
def _job_get_exit_code(self, job_id):
""" get the job's exit code
"""
ret = self.jobs[job_id]['returncode']
# FIXME: 'None' should cause an exception
if ret == None : return None
else : return int(ret)
# ----------------------------------------------------------------
#
def _job_get_execution_hosts(self, job_id):
""" get the job's exit code
"""
return self.jobs[job_id]['exec_hosts']
# ----------------------------------------------------------------
#
def _job_get_create_time(self, job_id):
""" get the job's creation time
"""
return self.jobs[job_id]['create_time']
# ----------------------------------------------------------------
#
def _job_get_start_time(self, job_id):
""" get the job's start time
"""
return self.jobs[job_id]['start_time']
# ----------------------------------------------------------------
#
def _job_get_end_time(self, job_id):
""" get the job's end time
"""
return self.jobs[job_id]['end_time']
# ----------------------------------------------------------------
#
def _job_cancel(self, job_id):
""" cancel the job via 'qdel'
"""
rm, pid = self._adaptor.parse_id(job_id)
ret, out, _ = self.shell.run_sync("%s %s\n" \
% (self._commands['qdel']['path'], pid))
if ret != 0:
message = "Error canceling job via 'qdel': %s" % out
log_error_and_raise(message, saga.NoSuccess, self._logger)
# assume the job was succesfully canceled
self.jobs[job_id]['state'] = saga.job.CANCELED
# ----------------------------------------------------------------
#
def _job_wait(self, job_id, timeout):
""" wait for the job to finish or fail
"""
time_start = time.time()
time_now = time_start
rm, pid = self._adaptor.parse_id(job_id)
while True:
state = self.jobs[job_id]['state'] # this gets updated in the bg.
if state == saga.job.DONE or \
state == saga.job.FAILED or \
state == saga.job.CANCELED:
return True
# avoid busy poll
time.sleep(SYNC_WAIT_UPDATE_INTERVAL)
# check if we hit timeout
if timeout >= 0:
time_now = time.time()
if time_now - time_start > timeout:
return False
# ----------------------------------------------------------------
#
@SYNC_CALL
def create_job(self, jd):
""" implements saga.adaptors.cpi.job.Service.get_url()
"""
# this dict is passed on to the job adaptor class -- use it to pass any
# state information you need there.
adaptor_state = {"job_service": self,
"job_description": jd,
"job_schema": self.rm.schema,
"reconnect": False
}
# create and return a new job object
return saga.job.Job(_adaptor=self._adaptor,
_adaptor_state=adaptor_state)
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_job(self, job_id):
""" Implements saga.adaptors.cpi.job.Service.get_job()
Re-create job instance from a job-id.
"""
# If we already have the job info, we just pass the current info.
if job_id in self.jobs :
return self.jobs[job_id]['obj']
# Try to get some initial information about this job (again)
job_info = self._job_get_info(job_id, reconnect=True)
# this dict is passed on to the job adaptor class -- use it to pass any
# state information you need there.
adaptor_state = {"job_service": self,
# TODO: fill job description
"job_description": saga.job.Description(),
"job_schema": self.rm.schema,
"reconnect": True,
"reconnect_jobid": job_id
}
job_obj = saga.job.Job(_adaptor=self._adaptor,
_adaptor_state=adaptor_state)
# throw it into our job dictionary.
job_info['obj'] = job_obj
self.jobs[job_id] = job_info
return job_obj
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_url(self):
""" implements saga.adaptors.cpi.job.Service.get_url()
"""
return self.rm
# ----------------------------------------------------------------
#
@SYNC_CALL
def list(self):
""" implements saga.adaptors.cpi.job.Service.list()
"""
ids = []
ret, out, _ = self.shell.run_sync("unset GREP_OPTIONS; %s | grep `whoami`" %
self._commands['qstat']['path'])
if ret != 0 and len(out) > 0:
message = "failed to list jobs via 'qstat': %s" % out
log_error_and_raise(message, saga.NoSuccess, self._logger)
elif ret != 0 and len(out) == 0:
# qstat | grep `` exits with 1 if the list is empty
pass
else:
for line in out.split("\n"):
# output looks like this:
# 112059.svc.uc.futuregrid testjob oweidner 0 Q batch
# 112061.svc.uc.futuregrid testjob oweidner 0 Q batch
if len(line.split()) > 1:
job_id = "[%s]-[%s]" % (self.rm, line.split()[0].split('.')[0])
ids.append(str(job_id))
return ids
# # ----------------------------------------------------------------
# #
# def container_run (self, jobs) :
# self._logger.debug ("container run: %s" % str(jobs))
# # TODO: this is not optimized yet
# for job in jobs:
# job.run ()
#
#
# # ----------------------------------------------------------------
# #
# def container_wait (self, jobs, mode, timeout) :
# self._logger.debug ("container wait: %s" % str(jobs))
# # TODO: this is not optimized yet
# for job in jobs:
# job.wait ()
#
#
# # ----------------------------------------------------------------
# #
# def container_cancel (self, jobs) :
# self._logger.debug ("container cancel: %s" % str(jobs))
# raise saga.NoSuccess ("Not Implemented");
###############################################################################
#
class PBSProJob (saga.adaptors.cpi.job.Job):
""" implements saga.adaptors.cpi.job.Job
"""
def __init__(self, api, adaptor):
# initialize parent class
_cpi_base = super(PBSProJob, self)
_cpi_base.__init__(api, adaptor)
def _get_impl(self):
return self
@SYNC_CALL
def init_instance(self, job_info):
""" implements saga.adaptors.cpi.job.Job.init_instance()
"""
# init_instance is called for every new saga.job.Job object
# that is created
self.jd = job_info["job_description"]
self.js = job_info["job_service"]
if job_info['reconnect'] is True:
self._id = job_info['reconnect_jobid']
self._started = True
else:
self._id = None
self._started = False
return self.get_api()
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_state(self):
""" implements saga.adaptors.cpi.job.Job.get_state()
"""
if self._started is False:
return saga.job.NEW
return self.js._job_get_state(job_id=self._id)
# ----------------------------------------------------------------
#
@SYNC_CALL
def wait(self, timeout):
""" implements saga.adaptors.cpi.job.Job.wait()
"""
if self._started is False:
log_error_and_raise("Can't wait for job that hasn't been started",
saga.IncorrectState, self._logger)
else:
self.js._job_wait(job_id=self._id, timeout=timeout)
# ----------------------------------------------------------------
#
@SYNC_CALL
def cancel(self, timeout):
""" implements saga.adaptors.cpi.job.Job.cancel()
"""
if self._started is False:
log_error_and_raise("Can't wait for job that hasn't been started",
saga.IncorrectState, self._logger)
else:
self.js._job_cancel(self._id)
# ----------------------------------------------------------------
#
@SYNC_CALL
def run(self):
""" implements saga.adaptors.cpi.job.Job.run()
"""
self._id = self.js._job_run(self._api())
self._started = True
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_service_url(self):
""" implements saga.adaptors.cpi.job.Job.get_service_url()
"""
return self.js.rm
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_id(self):
""" implements saga.adaptors.cpi.job.Job.get_id()
"""
return self._id
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_exit_code(self):
""" implements saga.adaptors.cpi.job.Job.get_exit_code()
"""
if self._started is False:
return None
else:
return self.js._job_get_exit_code(self._id)
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_created(self):
""" implements saga.adaptors.cpi.job.Job.get_created()
"""
if self._started is False:
return None
else:
return self.js._job_get_create_time(self._id)
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_started(self):
""" implements saga.adaptors.cpi.job.Job.get_started()
"""
if self._started is False:
return None
else:
return self.js._job_get_start_time(self._id)
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_finished(self):
""" implements saga.adaptors.cpi.job.Job.get_finished()
"""
if self._started is False:
return None
else:
return self.js._job_get_end_time(self._id)
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_execution_hosts(self):
""" implements saga.adaptors.cpi.job.Job.get_execution_hosts()
"""
if self._started is False:
return None
else:
return self.js._job_get_execution_hosts(self._id)
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_description(self):
""" implements saga.adaptors.cpi.job.Job.get_execution_hosts()
"""
return self.jd
|
mit
| 4,840,097,012,143,292,000
| 36.156786
| 192
| 0.473454
| false
| 3.959684
| false
| false
| false
|
sentriz/steely
|
steely/plugins/limp.py
|
1
|
3205
|
import sys
import limp
import limp.errors
import limp.environment
COMMAND = 'limp'
__author__ = 'byxor'
GLOBAL_DEFINITIONS = {}
def main(bot, author_id, source_code, thread_id, thread_type, **kwargs):
def send(message):
bot.sendMessage(str(message), thread_id=thread_id,
thread_type=thread_type)
def send_error(info, error):
full_error_message = f'\n{type(error).__name__}: {error}'
send(f'{info} {full_error_message}')
def last_message():
return bot.fetchThreadMessages(thread_id=thread_id, limit=2)[1].text
def define_global(name, variable):
send("This is a hack; enjoy.")
GLOBAL_DEFINITIONS[name] = variable
try:
environment = limp.environment.create_standard()
_define({
'send': send,
'last-message': last_message,
'define-global': define_global,
**GLOBAL_DEFINITIONS}, environment)
result = limp.evaluate(source_code, environment)
send(result)
except limp.errors.LimpError as error:
send_error('You got a limp error', error)
except Exception as error:
send_error('Something unexpected happened', error)
send("It's possible that it's your fault.")
def _define(custom_symbols, environment):
for name, value in custom_symbols.items():
try:
environment.define(name, value)
except limp.errors.RedefinedSymbol:
pass
def _generate_help():
def _help():
_FULL_COMMAND = f".{COMMAND}"
_REPOSITORY = f"https://www.github.com/byxor/limp"
_BORDER = '=' * 10
def _CREATE_CODE_EXAMPLE(code):
result = limp.evaluate(code)
message = f"User: {_FULL_COMMAND} {code}"
response = f"ChatBot: {result}"
return message + "\n" + response + "\n\n"
_CREATE_CODE_EXAMPLES = lambda input_examples: "".join(
list(map(_CREATE_CODE_EXAMPLE, input_examples))).strip()
description = "Evaluate the limp programming language!"
usage = f"Usage: {_FULL_COMMAND} <source_code>"
examples = _CREATE_CODE_EXAMPLES([
"(+ 1 2)",
"(// 100 (- 5 2))",
"((x -> (* x 2)) 10)",
"((x -> (* x 2)) 50)",
"(map (name -> (concatenate \"hi, \" name)) [\"john\" \"jane\" \"bob\"])",
"(do\n (define add (a b -> (+ a b)))\n (add 30 70))",
])
source_code = f"Source code: {_REPOSITORY}"
contributing = f"Want to contribute? Awesome! Make sure you read CONTRIBUTING.md in the repository first."
return "\n\n".join([
description,
usage,
_BORDER,
examples,
_BORDER,
source_code,
contributing,
])
try:
message = _help()
except Exception as e:
global __doc__
message = "The help could not be autogenerated. It's possible that the code examples are outdated and aren't valid syntax anymore. Please inform Brandon."
message += "\n\n"
message += f"Reason: {e}"
sys.modules[__name__].__doc__ = message
_generate_help()
|
gpl-3.0
| 194,483,731,133,469,540
| 28.953271
| 162
| 0.558502
| false
| 3.801898
| false
| false
| false
|
realestate-com-au/harpoon
|
harpoon/ship/network.py
|
1
|
1619
|
from docker.errors import APIError as DockerAPIError
import logging
import uuid
log = logging.getLogger("harpoon.ship.network")
class NetworkManager(object):
def __init__(self, docker_api):
self.networks = {}
self.docker_api = docker_api
def register(self, conf, container_name):
if not conf.links:
return
network = self.docker_api.create_network(str(uuid.uuid1()))["Id"]
inside = self.networks[network] = set()
log.info("Created network %s\tlinks=%s", network, [l.pair for l in conf.links])
for link in conf.links:
dep_container_name, link_name = link.pair
inside.add(dep_container_name)
conf.harpoon.docker_api.connect_container_to_network(dep_container_name, network
, aliases = [link_name]
)
conf.harpoon.docker_api.connect_container_to_network(container_name, network)
inside.add(container_name)
def removed(self, container_name):
for network, containers in list(self.networks.items()):
if network not in self.networks:
continue
if container_name in containers:
containers.remove(container_name)
if not containers:
try:
log.info("Removing network %s", network)
self.docker_api.remove_network(network)
except DockerAPIError as error:
log.warning("Failed to remove network %s\terror=%s", network, error)
finally:
del self.networks[network]
|
mit
| -949,235,595,282,341,400
| 35.795455
| 92
| 0.593576
| false
| 4.23822
| false
| false
| false
|
polyaxon/polyaxon
|
core/polyaxon/pql/builder.py
|
1
|
15832
|
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from collections import namedtuple
from typing import Any, Callable, Optional
from polyaxon.exceptions import PolyaxonDateTimeFormatterException, PQLException
from polyaxon.utils.bool_utils import to_bool
from polyaxon.utils.date_utils import DateTimeFormatter
from polyaxon.utils.list_utils import to_list
class QueryCondSpec(namedtuple("QueryCondSpec", "cond params")):
def items(self):
return self._asdict().items()
class QueryBuilder:
"""The `QueryBuild` adds filters to a `QuerySet` from a `params` mapping.
Filters are a mapping of <name: Condition>, Condition being an object that update the queryset.
"""
def __init__(self, filters):
self.filters = filters
def build(self, queryset: Any, params: Any) -> Any:
for name, condition in self.filters.items():
if name in params:
queryset = condition.apply(queryset, name, params[name])
return queryset
class BaseCondition:
"""The base condition representing a single filter to apply to a `QuerySet`"""
def apply(
self, queryset: Any, name: str, params: Any, query_backend: Any, timezone: str
):
raise NotImplementedError
class BaseOperatorCondition(BaseCondition):
def __init__(self, op: str, negation: bool = False) -> None:
if op not in self.VALUES and op not in self.REPRESENTATIONS:
raise PQLException(
"Received an invalid operator `{}`, "
"possible values `{}` or `{}`.".format(
op, self.VALUES, self.REPRESENTATIONS
)
)
self.operator = self._get_operator(op, negation)
def __eq__(self, other: "BaseOperatorCondition") -> bool:
return self.operator == other.operator
def apply(
self, queryset: Any, name: str, params: Any, query_backend: Any, timezone: str
) -> Any:
return queryset.filter(
self.operator(
name=name, params=params, query_backend=query_backend, timezone=timezone
)
)
def apply_operator(
self, name: str, params: Any, query_backend: Any, timezone: str
) -> Any:
return self.operator(
name=name, params=params, query_backend=query_backend, timezone=timezone
)
class CallbackCondition(BaseCondition):
"""The `CallbackCondition` represents a filter based on a callback to apply."""
def __init__(self, callback: Callable) -> None:
self.callback = callback
self.negation = False
def __call__(self, op, negation: bool = False) -> "CallbackCondition":
self.negation = negation
return self
def apply(
self, queryset: Any, name: str, params: Any, query_backend: Any, timezone: str
) -> Any:
return self.callback(
queryset,
params,
self.negation,
query_backend=query_backend,
timezone=timezone,
)
def apply_operator(
self, name: str, params: Any, query_backend: Any, timezone: str
) -> Any:
return self.callback(
query_backend,
params=params,
negation=self.negation,
query_backend=query_backend,
timezone=timezone,
)
class NilCondition(BaseOperatorCondition):
VALUES = {"nil"}
REPRESENTATIONS = {"nil"}
REPRESENTATION_MAPPING = (("nil", "nil"),)
@classmethod
def _get_operator(cls, op: str, negation: bool = False) -> Optional[Callable]:
if op not in cls.VALUES and op not in cls.REPRESENTATIONS:
return None
if negation:
return cls._not_nil_operator
return cls._nil_operator
@staticmethod
def _nil_operator(name: str, params: Any, query_backend: Any, timezone: str) -> Any:
name = "{}__isnull".format(name)
return query_backend(**{name: True})
@classmethod
def _not_nil_operator(
cls, name: str, params: Any, query_backend: Any, timezone: str
) -> any:
name = "{}__isnull".format(name)
return query_backend(**{name: False})
class EqualityCondition(NilCondition):
VALUES = NilCondition.VALUES | {"eq"}
REPRESENTATIONS = NilCondition.REPRESENTATIONS | {"="}
REPRESENTATION_MAPPING = NilCondition.REPRESENTATION_MAPPING + (("=", "eq"),)
@classmethod
def _get_operator(cls, op: str, negation: bool = False) -> Optional[Callable]:
if op not in cls.VALUES and op not in cls.REPRESENTATIONS:
return None
_op = NilCondition._get_operator(op, negation)
if _op:
return _op
if negation:
return cls._neq_operator
return cls._eq_operator
@staticmethod
def _eq_operator(name: str, params: Any, query_backend: Any, timezone: str) -> Any:
return query_backend(**{name: params})
@classmethod
def _neq_operator(
cls, name: str, params: Any, query_backend: Any, timezone: str
) -> any:
return ~cls._eq_operator(name, params, query_backend, timezone)
class BoolCondition(EqualityCondition):
@staticmethod
def _eq_operator(name: str, params: Any, query_backend: Any, timezone: str) -> Any:
return query_backend(**{name: to_bool(params)})
class ComparisonCondition(EqualityCondition):
VALUES = EqualityCondition.VALUES | {"in", "lt", "lte", "gt", "gte"}
REPRESENTATIONS = EqualityCondition.REPRESENTATIONS | {"|", "<", "<=", ">", ">="}
REPRESENTATION_MAPPING = EqualityCondition.REPRESENTATION_MAPPING + (
("|", "in"),
("<", "lt"),
("<=", "lte"),
(">", "gt"),
(">=", "gte"),
)
@classmethod
def _get_operator(cls, op: str, negation: bool = False) -> Optional[Callable]:
if op not in cls.VALUES and op not in cls.REPRESENTATIONS:
return None
_op = EqualityCondition._get_operator(op, negation)
if _op:
return _op
if op == "lt" or op == "<":
if negation:
return cls._gte_operator
return cls._lt_operator
if op == "lte" or op == "<=":
if negation:
return cls._gt_operator
return cls._lte_operator
if op == "gt" or op == ">":
if negation:
return cls._lte_operator
return cls._gt_operator
if op == "gte" or op == ">=":
if negation:
return cls._lt_operator
return cls._gte_operator
if op == "in" or op == "|":
if negation:
return cls._nin_operator
return cls._in_operator
@staticmethod
def _lt_operator(name: str, params: Any, query_backend: Any, timezone: str) -> Any:
name = "{}__lt".format(name)
return query_backend(**{name: params})
@staticmethod
def _gt_operator(name: str, params: Any, query_backend: Any, timezone: str) -> Any:
name = "{}__gt".format(name)
return query_backend(**{name: params})
@staticmethod
def _lte_operator(name: str, params: Any, query_backend: Any, timezone: str) -> Any:
name = "{}__lte".format(name)
return query_backend(**{name: params})
@staticmethod
def _gte_operator(name: str, params: Any, query_backend: Any, timezone: str) -> Any:
name = "{}__gte".format(name)
return query_backend(**{name: params})
@staticmethod
def _in_operator(name: str, params: Any, query_backend: Any, timezone: str) -> Any:
assert isinstance(params, (list, tuple))
name = "{}__in".format(name)
return query_backend(**{name: params})
@classmethod
def _nin_operator(
cls, name: str, params: Any, query_backend: Any, timezone: str
) -> Any:
return ~cls._in_operator(name, params, query_backend, timezone)
class DateTimeCondition(ComparisonCondition):
VALUES = ComparisonCondition.VALUES | {"range"}
REPRESENTATIONS = ComparisonCondition.REPRESENTATIONS | {".."}
REPRESENTATION_MAPPING = ComparisonCondition.REPRESENTATION_MAPPING + (
("..", "range"),
)
@classmethod
def _get_operator(cls, op: str, negation: bool = False) -> Optional[Callable]:
if op not in cls.VALUES and op not in cls.REPRESENTATIONS:
return None
# handle eq in from current class
if op == "eq" or op == "=":
if negation:
return cls._neq_operator
return cls._eq_operator
_op = ComparisonCondition._get_operator(op, negation)
if _op:
return _op
if negation:
return cls._nrange_operator
return cls._range_operator
@staticmethod
def _eq_operator(name: str, params: Any, query_backend: Any, timezone: str) -> Any:
try:
# Check If params is date
DateTimeFormatter.extract_timestamp(
params,
dt_format=DateTimeFormatter.DATE_FORMAT,
timezone=timezone,
)
return query_backend(**{f"{name}__date": params})
except (TypeError, ValueError):
pass
return query_backend(**{name: params})
@staticmethod
def _range_operator(
name: str, params: Any, query_backend: Any, timezone: str
) -> Any:
assert len(params) == 2
try:
start_date = DateTimeFormatter.extract(params[0], timezone)
end_date = DateTimeFormatter.extract(params[1], timezone)
except PolyaxonDateTimeFormatterException as e:
raise PQLException(e)
name = "{}__range".format(name)
return query_backend(**{name: (start_date, end_date)})
@classmethod
def _nrange_operator(
cls, name: str, params: Any, query_backend: Any, timezone: str
) -> Any:
return ~cls._range_operator(
name, params, query_backend=query_backend, timezone=timezone
)
class ValueCondition(EqualityCondition):
VALUES = EqualityCondition.VALUES | {"in"}
REPRESENTATIONS = EqualityCondition.REPRESENTATIONS | {"|"}
REPRESENTATION_MAPPING = EqualityCondition.REPRESENTATION_MAPPING + (("|", "in"),)
@classmethod
def _get_operator(cls, op: str, negation: bool = False) -> Any:
if op not in cls.VALUES and op not in cls.REPRESENTATIONS:
return None
_op = EqualityCondition._get_operator(op, negation)
if _op:
return _op
if negation:
return cls._nin_operator
return cls._in_operator
@staticmethod
def _in_operator(name: str, params: Any, query_backend: Any, timezone: str) -> Any:
assert isinstance(params, (list, tuple))
name = "{}__in".format(name)
return query_backend(**{name: params})
@classmethod
def _nin_operator(
cls, name: str, params: Any, query_backend: Any, timezone: str
) -> Any:
return ~cls._in_operator(name, params, query_backend, timezone)
class SearchCondition(ValueCondition):
VALUES = ValueCondition.VALUES | {"icontains", "istartswith", "iendswith"}
REPRESENTATIONS = ValueCondition.REPRESENTATIONS | {"%%", "%_", "_%"}
REPRESENTATION_MAPPING = EqualityCondition.REPRESENTATION_MAPPING + (
("%%", "icontains"),
("_%", "istartswith"),
("%_", "iendswith"),
)
@classmethod
def _get_operator(cls, op: str, negation: bool = False) -> Any:
if op not in cls.VALUES and op not in cls.REPRESENTATIONS:
return None
_op = ValueCondition._get_operator(op, negation)
if _op:
return _op
if op == "%%" or op == "icontains":
if negation:
return cls._ncontains_operator
return cls._contains_operator
if op == "_%" or op == "istartswith":
if negation:
return cls._nstartswith_operator
return cls._startswith_operator
if op == "%_" or op == "iendswith":
if negation:
return cls._nendswith_operator
return cls._endswith_operator
@staticmethod
def _contains_operator(
name: str, params: str, query_backend: Any, timezone: str
) -> Any:
assert isinstance(params, str)
name = "{}__icontains".format(name)
return query_backend(**{name: params})
@classmethod
def _ncontains_operator(
cls, name: str, params: str, query_backend: Any, timezone: str
) -> Any:
return ~cls._contains_operator(name, params, query_backend, timezone)
@staticmethod
def _startswith_operator(
name: str, params: str, query_backend: Any, timezone: str
) -> Any:
assert isinstance(params, str)
name = "{}__istartswith".format(name)
return query_backend(**{name: params})
@classmethod
def _nstartswith_operator(
cls, name: str, params: str, query_backend: Any, timezone: str
) -> Any:
return ~cls._startswith_operator(
name, params, query_backend=query_backend, timezone=timezone
)
@staticmethod
def _endswith_operator(
name: str, params: str, query_backend: Any, timezone: str
) -> Any:
assert isinstance(params, str)
name = "{}__iendswith".format(name)
return query_backend(**{name: params})
@classmethod
def _nendswith_operator(
cls, name: str, params: str, query_backend: Any, timezone: str
) -> Any:
return ~cls._endswith_operator(name, params, query_backend, timezone)
class ArrayCondition(EqualityCondition):
VALUES = EqualityCondition.VALUES | {"in"}
REPRESENTATIONS = EqualityCondition.REPRESENTATIONS | {"|"}
REPRESENTATION_MAPPING = EqualityCondition.REPRESENTATION_MAPPING + (("|", "in"),)
@classmethod
def _get_operator(cls, op: str, negation: bool = False) -> Optional[Callable]:
if op not in cls.VALUES and op not in cls.REPRESENTATIONS:
return None
_op = cls._get_eq_operator(op, negation)
if _op:
return _op
if negation:
return cls._nin_operator
return cls._in_operator
@classmethod
def _get_eq_operator(cls, op: str, negation: bool = False) -> Optional[Callable]:
if (
op not in EqualityCondition.VALUES
and op not in EqualityCondition.REPRESENTATIONS
):
return None
if negation:
return cls._neq_operator
return cls._eq_operator
@staticmethod
def _eq_operator(name: str, params: Any, query_backend: Any, timezone: str) -> Any:
name = "{}__contains".format(name)
return query_backend(**{name: to_list(params)})
@staticmethod
def _in_operator(name: str, params: Any, query_backend: Any, timezone: str) -> Any:
assert isinstance(params, (list, tuple))
name = "{}__overlap".format(name)
return query_backend(**{name: params})
@classmethod
def _nin_operator(
cls, name: str, params: Any, query_backend: Any, timezone: str
) -> Any:
return ~cls._in_operator(name, params, query_backend, timezone)
|
apache-2.0
| -8,171,395,019,497,414,000
| 32.052192
| 99
| 0.600682
| false
| 4.053251
| false
| false
| false
|
rschnapka/bank-payment
|
account_banking/wizard/banking_transaction_wizard.py
|
1
|
19506
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2009 EduSense BV (<http://www.edusense.nl>).
# (C) 2011 - 2013 Therp BV (<http://therp.nl>).
# (C) 2011 Smile (<http://smile.fr>).
# All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
"""
The banking transaction wizard is linked to a button in the statement line
tree view. It allows the user to undo the duplicate flag, select between
multiple matches or select a manual match.
"""
from openerp.osv import orm, fields
from openerp.tools.translate import _
class banking_transaction_wizard(orm.TransientModel):
_name = 'banking.transaction.wizard'
_description = 'Match transaction'
def create(self, cr, uid, vals, context=None):
"""
Make sure that the statement line has an import transaction
"""
res = super(banking_transaction_wizard, self).create(
cr, uid, vals, context=context)
if res and vals.get('statement_line_id'):
line_pool = self.pool.get('account.bank.statement.line')
line_pool.create_instant_transaction(
cr, uid, vals['statement_line_id'], context=context)
return res
def create_act_window(self, cr, uid, ids, nodestroy=True, context=None):
"""
Return a popup window for this model
"""
if isinstance(ids, (int, long)):
ids = [ids]
return {
'name': self._description,
'view_type': 'form',
'view_mode': 'form',
'res_model': self._name,
'domain': [],
'context': context,
'type': 'ir.actions.act_window',
'target': 'new',
'res_id': ids[0],
'nodestroy': nodestroy,
}
def trigger_match(self, cr, uid, ids, context=None):
"""
Call the automatic matching routine for one or
more bank transactions
"""
if isinstance(ids, (int, long)):
ids = [ids]
import_transaction_obj = self.pool.get('banking.import.transaction')
trans_id = self.read(
cr, uid, ids[0], ['import_transaction_id'],
context=context)['import_transaction_id'][0] # many2one tuple
import_transaction_obj.match(cr, uid, [trans_id], context=context)
return self.create_act_window(cr, uid, ids, context=None)
def write(self, cr, uid, ids, vals, context=None):
"""
Implement a trigger to retrieve the corresponding move line
when the invoice_id changes
"""
statement_line_obj = self.pool.get('account.bank.statement.line')
transaction_obj = self.pool.get('banking.import.transaction')
if not vals or not ids:
return True
wiz = self.browse(cr, uid, ids[0], context=context)
# The following fields get never written
# they are just triggers for manual matching
# which populates regular fields on the transaction
manual_invoice_ids = vals.pop('manual_invoice_ids', [])
manual_move_line_ids = vals.pop('manual_move_line_ids', [])
res = super(banking_transaction_wizard, self).write(
cr, uid, ids, vals, context=context)
wiz.refresh()
# Process the logic of the written values
# An invoice is selected from multiple candidates
if vals and 'invoice_id' in vals:
if (wiz.import_transaction_id.match_type == 'invoice' and
wiz.import_transaction_id.invoice_id):
found = False
# the current value might apply
if (wiz.move_line_id and wiz.move_line_id.invoice and
wiz.move_line_id.invoice == wiz.invoice_id):
found = True
else:
# Otherwise, retrieve the move line for this invoice
# Given the arity of the relation, there is are always
# multiple possibilities but the move lines here are
# prefiltered for having account_id.type payable/receivable
# and the regular invoice workflow should only come up with
# one of those only.
for move_line in wiz.import_transaction_id.move_line_ids:
if (move_line.invoice ==
wiz.import_transaction_id.invoice_id):
transaction_obj.write(
cr, uid, wiz.import_transaction_id.id,
{'move_line_id': move_line.id, },
context=context
)
statement_line_obj.write(
cr, uid,
wiz.import_transaction_id.statement_line_id.id,
{
'partner_id': (
move_line.partner_id.id or False),
'account_id': move_line.account_id.id,
}, context=context)
found = True
break
# Cannot match the invoice
if not found:
orm.except_orm(
_("No entry found for the selected invoice"),
_("No entry found for the selected invoice. " +
"Try manual reconciliation."))
if manual_move_line_ids or manual_invoice_ids:
move_line_obj = self.pool.get('account.move.line')
invoice_obj = self.pool.get('account.invoice')
statement_line_obj = self.pool.get('account.bank.statement.line')
# Rewrite *2many directive notation
if manual_invoice_ids:
manual_invoice_ids = (
[i[1] for i in manual_invoice_ids if i[0] == 4] +
[j for i in manual_invoice_ids if i[0] == 6 for j in i[2]])
if manual_move_line_ids:
manual_move_line_ids = (
[i[1] for i in manual_move_line_ids if i[0] == 4] +
[j for i in manual_move_line_ids
if i[0] == 6 for j in i[2]])
for wiz in self.browse(cr, uid, ids, context=context):
# write can be called multiple times for the same values
# that doesn't hurt above, but it does here
if wiz.match_type and (
len(manual_move_line_ids) > 1 or
len(manual_invoice_ids) > 1):
continue
todo = []
for invoice in invoice_obj.browse(
cr, uid, manual_invoice_ids, context=context):
found_move_line = False
if invoice.move_id:
for line in invoice.move_id.line_id:
if line.account_id.type in ('receivable',
'payable'):
todo.append((invoice.id, line.id))
found_move_line = True
break
if not found_move_line:
raise orm.except_orm(
_("Cannot select for reconcilion"),
_("No entry found for the selected invoice. "))
for move_line_id in manual_move_line_ids:
todo_entry = [False, move_line_id]
move_line = move_line_obj.read(
cr,
uid,
move_line_id,
['invoice'],
context=context
)
if move_line['invoice']:
todo_entry[0] = move_line['invoice'][0]
todo.append(todo_entry)
while todo:
todo_entry = todo.pop()
move_line = move_line_obj.browse(
cr, uid, todo_entry[1], context)
transaction_id = wiz.import_transaction_id.id
statement_line_id = wiz.statement_line_id.id
if len(todo) > 0:
statement_line_id = wiz.statement_line_id.split_off(
move_line.debit or -move_line.credit)[0]
transaction_id = statement_line_obj.browse(
cr,
uid,
statement_line_id,
context=context
).import_transaction_id.id
vals = {
'move_line_id': todo_entry[1],
'move_line_ids': [(6, 0, [todo_entry[1]])],
'invoice_id': todo_entry[0],
'invoice_ids': [
(6, 0, [todo_entry[0]] if todo_entry[0] else [])
],
'match_type': 'manual',
}
transaction_obj.clear_and_write(
cr, uid, transaction_id, vals, context=context)
st_line_vals = {
'account_id': move_line_obj.read(
cr, uid, todo_entry[1],
['account_id'], context=context)['account_id'][0],
}
if todo_entry[0]:
st_line_vals['partner_id'] = invoice_obj.browse(
cr, uid, todo_entry[0], context=context
).partner_id.commercial_partner_id.id
statement_line_obj.write(
cr, uid, statement_line_id,
st_line_vals, context=context)
return res
def trigger_write(self, cr, uid, ids, context=None):
"""
Just a button that triggers a write.
"""
return self.create_act_window(cr, uid, ids, context=None)
def disable_match(self, cr, uid, ids, context=None):
"""
Clear manual and automatic match information
"""
settings_pool = self.pool.get('account.banking.account.settings')
statement_pool = self.pool.get('account.bank.statement.line')
if isinstance(ids, (int, long)):
ids = [ids]
for wiz in self.browse(cr, uid, ids, context=context):
# Get the bank account setting record, to reset the account
account_id = False
journal_id = wiz.statement_line_id.statement_id.journal_id.id
setting_ids = settings_pool.find(
cr, uid, journal_id, context=context
)
# Restore partner id from the bank account or else reset
partner_id = False
if (wiz.statement_line_id.partner_bank_id and
wiz.statement_line_id.partner_bank_id.partner_id):
partner_id = (
wiz.statement_line_id.partner_bank_id.partner_id.id
)
wiz.write({'partner_id': partner_id})
bank_partner = False
if partner_id:
bank_partner = wiz.statement_line_id.partner_bank_id.partner_id
if wiz.amount < 0:
if bank_partner:
account_id = bank_partner.\
def_journal_account_bank_decr()[bank_partner.id]
elif setting_ids:
account_id = settings_pool.browse(
cr, uid, setting_ids[0],
context=context).default_credit_account_id.id
else:
if bank_partner:
account_id = bank_partner.\
def_journal_account_bank_incr()[bank_partner.id]
elif setting_ids:
account_id = settings_pool.browse(
cr, uid, setting_ids[0],
context=context).default_debit_account_id.id
if account_id:
wiz.statement_line_id.write({'account_id': account_id})
if wiz.statement_line_id:
# delete splits causing an unsplit if this is a split
# transaction
statement_pool.unlink(
cr,
uid,
statement_pool.search(
cr, uid,
[('parent_id', '=', wiz.statement_line_id.id)],
context=context
),
context=context
)
if wiz.import_transaction_id:
wiz.import_transaction_id.clear_and_write()
return self.create_act_window(cr, uid, ids, context=None)
def reverse_duplicate(self, cr, uid, ids, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
transaction_obj = self.pool.get('banking.import.transaction')
for wiz in self.read(
cr, uid, ids, ['duplicate', 'import_transaction_id'],
context=context):
transaction_obj.write(
cr, uid, wiz['import_transaction_id'][0],
{'duplicate': not wiz['duplicate']}, context=context)
return self.create_act_window(cr, uid, ids, context=None)
def button_done(self, cr, uid, ids, context=None):
return {'type': 'ir.actions.act_window_close'}
_columns = {
'name': fields.char('Name', size=64),
'statement_line_id': fields.many2one(
'account.bank.statement.line', 'Statement line'),
'amount': fields.related(
'statement_line_id', 'amount', type='float',
string="Amount", readonly=True),
'date': fields.related(
'statement_line_id', 'date', type='date',
string="Date", readonly=True),
'ref': fields.related(
'statement_line_id', 'ref', type='char', size=32,
string="Reference", readonly=True),
'message': fields.related(
'statement_line_id', 'import_transaction_id', 'message',
type='char', size=1024,
string="Message", readonly=True),
'partner_id': fields.related(
'statement_line_id', 'partner_id',
type='many2one', relation='res.partner',
string="Partner", readonly=True),
'statement_line_parent_id': fields.related(
'statement_line_id', 'parent_id', type='many2one',
relation='account.bank.statement.line', readonly=True),
'import_transaction_id': fields.related(
'statement_line_id', 'import_transaction_id',
string="Import transaction",
type='many2one', relation='banking.import.transaction'),
'residual': fields.related(
'import_transaction_id', 'residual', type='float',
string='Residual', readonly=True),
'writeoff_account_id': fields.related(
'import_transaction_id', 'writeoff_account_id',
type='many2one', relation='account.account',
string='Write-off account'),
'invoice_ids': fields.related(
'import_transaction_id', 'invoice_ids', string="Matching invoices",
type='many2many', relation='account.invoice'),
'invoice_id': fields.related(
'import_transaction_id',
'invoice_id',
string="Invoice to reconcile",
type='many2one',
relation='account.invoice',
),
'move_line_ids': fields.related(
'import_transaction_id', 'move_line_ids', string="Entry lines",
type='many2many', relation='account.move.line'),
'move_line_id': fields.related(
'import_transaction_id', 'move_line_id', string="Entry line",
type='many2one', relation='account.move.line'),
'duplicate': fields.related(
'import_transaction_id',
'duplicate',
string='Flagged as duplicate',
type='boolean',
),
'match_multi': fields.related(
'import_transaction_id', 'match_multi',
type="boolean", string='Multiple matches'),
'match_type': fields.related(
'import_transaction_id',
'match_type',
type='selection',
selection=[
('move', 'Move'),
('invoice', 'Invoice'),
('payment', 'Payment line'),
('payment_order', 'Payment order'),
('storno', 'Storno'),
('manual', 'Manual'),
('payment_manual', 'Payment line (manual)'),
('payment_order_manual', 'Payment order (manual)'),
],
string='Match type',
readonly=True,
),
'manual_invoice_ids': fields.many2many(
'account.invoice',
'banking_transaction_wizard_account_invoice_rel',
'wizard_id', 'invoice_id', string='Match one or more invoices',
domain=[('reconciled', '=', False)]),
'manual_move_line_ids': fields.many2many(
'account.move.line',
'banking_transaction_wizard_account_move_line_rel',
'wizard_id', 'move_line_id', string='Or match one or more entries',
domain=[('account_id.reconcile', '=', True),
('reconcile_id', '=', False)]),
'payment_option': fields.related(
'import_transaction_id',
'payment_option',
string='Payment Difference',
type='selection',
required=True,
selection=[
('without_writeoff', 'Keep Open'),
('with_writeoff', 'Reconcile Payment Balance')
],
),
'writeoff_analytic_id': fields.related(
'import_transaction_id', 'writeoff_analytic_id',
type='many2one', relation='account.analytic.account',
string='Write-off analytic account'),
'analytic_account_id': fields.related(
'statement_line_id', 'analytic_account_id',
type='many2one', relation='account.analytic.account',
string="Analytic Account"),
'move_currency_amount': fields.related(
'import_transaction_id',
'move_currency_amount',
type='float',
string='Match Currency Amount',
readonly=True,
),
}
|
agpl-3.0
| -7,091,693,435,405,808,000
| 42.346667
| 79
| 0.501384
| false
| 4.50485
| false
| false
| false
|
gnome-prototypes-team/gnome-music
|
gnomemusic/query.py
|
1
|
42091
|
# Copyright (c) 2013 Arnel A. Borja <kyoushuu@yahoo.com>
# Copyright (c) 2013 Vadim Rutkovsky <roignac@gmail.com>
# Copyright (c) 2013 Seif Lotfy <seif@lotfy.com>
# Copyright (c) 2013 Guillaume Quintard <guillaume.quintard@gmail.com>
#
# GNOME Music is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# GNOME Music is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with GNOME Music; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# The GNOME Music authors hereby grant permission for non-GPL compatible
# GStreamer plugins to be used and distributed together with GStreamer
# and GNOME Music. This permission is above and beyond the permissions
# granted by the GPL license by which GNOME Music is covered. If you
# modify this code, you may extend this exception to your version of the
# code, but you are not obligated to do so. If you do not wish to do so,
# delete this exception statement from your version.
from gettext import gettext as _
from gi.repository import GLib, Tracker
import os
import logging
logger = logging.getLogger(__name__)
import time
sparql_midnight_dateTime_format = "%Y-%m-%dT00:00:00Z"
SECONDS_PER_DAY = 86400
class Query():
music_folder = None
MUSIC_URI = None
download_folder = None
DOWNLOAD_URI = None
try:
music_folder = GLib.get_user_special_dir(GLib.UserDirectory.DIRECTORY_MUSIC)
MUSIC_URI = Tracker.sparql_escape_string(GLib.filename_to_uri(music_folder))
download_folder = GLib.get_user_special_dir(GLib.UserDirectory.DIRECTORY_DOWNLOAD)
DOWNLOAD_URI = Tracker.sparql_escape_string(GLib.filename_to_uri(download_folder))
for folder in [music_folder, download_folder]:
if os.path.islink(folder):
logger.warn("%s is a symlink, this folder will be omitted" % folder)
else:
i = len(next(os.walk(folder))[2])
logger.debug("Found %d files in %s" % (i, folder))
except TypeError:
logger.warn("XDG user dirs are not set")
@staticmethod
def order_by_statement(attr):
"""Returns a SPARQL ORDER BY statement sorting by the given attribute, ignoring
articles as defined in _("the"). 'Attr' should be given without parentheses,
e.g., "attr='?author'"."""
return_statement = "fn:lower-case(%(attribute)s)" % {'attribute': attr}
# TRANSLATORS: _("the") should be a space-separated list of all-lowercase articles
# (such as 'the') that should be ignored when alphabetizing artists/albums. This
# list should include 'the' regardless of language. If some articles occur more
# frequently than others, most common should appear first, least common last.
for article in reversed(_("the a an").split(" ")):
return_statement = '''IF(fn:starts-with(fn:lower-case(%(attribute)s), "%(article)s"),
fn:substring(fn:lower-case(%(attribute)s), %(substr_start)s),
%(nested_if)s)''' % {
'attribute': attr,
'article': article + " ",
'substr_start': str(len(article) + 2),
'nested_if': return_statement}
return return_statement
@staticmethod
def all_albums():
return Query.albums('?album a nmm:MusicAlbum .')
@staticmethod
def all_artists():
return Query.artists('?album a nmm:MusicAlbum .')
@staticmethod
def all_songs():
return Query.songs('?song a nmm:MusicPiece ; a nfo:FileDataObject .')
@staticmethod
def all_playlists():
return Query.playlists('?playlist a nmm:Playlist .')
@staticmethod
def all_songs_count():
query = '''
SELECT
COUNT(?song) AS childcount
WHERE {
?song a nmm:MusicPiece ;
a nfo:FileDataObject
FILTER (
tracker:uri-is-descendant(
'%(music_dir)s', nie:url(?song)
) ||
tracker:uri-is-descendant(
'%(download_dir)s', nie:url(?song)
)
)
FILTER (
NOT EXISTS {
?song a nmm:Video
} &&
NOT EXISTS {
?song a nmm:Playlist
}
)
}
'''.replace('\n', ' ').strip() % {
'music_dir': Query.MUSIC_URI,
'download_dir': Query.DOWNLOAD_URI
}
return query
@staticmethod
def albums(where_clause):
query = '''
SELECT DISTINCT
rdf:type(?album)
tracker:id(?album) AS id
(
SELECT
nmm:artistName(?artist)
WHERE {
?album nmm:albumArtist ?artist
}
LIMIT 1
) AS artist
nie:title(?album) AS title
nie:title(?album) AS album
tracker:coalesce(
(
SELECT
GROUP_CONCAT(
nmm:artistName(?artist),
','
)
WHERE {
?album nmm:albumArtist ?artist
}
),
(
SELECT
GROUP_CONCAT(
(
SELECT
nmm:artistName(nmm:performer(?_12)) AS perf
WHERE {
?_12 nmm:musicAlbum ?album
}
GROUP BY ?perf
),
','
) AS album_performer
WHERE {
}
)
) AS author
xsd:integer(
tracker:coalesce(
nmm:albumTrackCount(?album),
(
SELECT
COUNT(?_1)
WHERE {
?_1 nmm:musicAlbum ?album ;
tracker:available 'true'
FILTER (
tracker:uri-is-descendant(
'%(music_dir)s', nie:url(?_1)
) ||
tracker:uri-is-descendant(
'%(download_dir)s', nie:url(?_1)
)
)
FILTER (
NOT EXISTS {
?_1 a nmm:Video
} &&
NOT EXISTS {
?_1 a nmm:Playlist
}
)
}
)
)
) AS childcount
(
SELECT
fn:year-from-dateTime(?c)
WHERE {
?_2 nmm:musicAlbum ?album ;
nie:contentCreated ?c ;
tracker:available 'true'
FILTER (
tracker:uri-is-descendant(
'%(music_dir)s', nie:url(?_2)
) ||
tracker:uri-is-descendant(
'%(download_dir)s', nie:url(?_2)
)
)
FILTER (
NOT EXISTS {
?_2 a nmm:Video
} &&
NOT EXISTS {
?_2 a nmm:Playlist
}
)
}
LIMIT 1
) AS creation-date
{
%(where_clause)s
FILTER (
EXISTS {
?_3 nmm:musicAlbum ?album ;
tracker:available 'true'
FILTER (
tracker:uri-is-descendant(
'%(music_dir)s', nie:url(?_3)
) ||
tracker:uri-is-descendant(
'%(download_dir)s', nie:url(?_3)
)
)
FILTER (
NOT EXISTS {
?_3 a nmm:Video
} &&
NOT EXISTS {
?_3 a nmm:Playlist
}
)
}
)
}
ORDER BY %(album_order)s
%(artist_order)s
?albumyear
'''.replace('\n', ' ').strip() % {
'where_clause': where_clause.replace('\n', ' ').strip(),
'music_dir': Query.MUSIC_URI,
'download_dir': Query.DOWNLOAD_URI,
'album_order': Query.order_by_statement("?title"),
'artist_order': Query.order_by_statement("?author")
}
return query
@staticmethod
def artists(where_clause):
query = '''
SELECT DISTINCT
rdf:type(?album)
tracker:id(?album) AS id
(
SELECT
nmm:artistName(?artist)
WHERE {
?album nmm:albumArtist ?artist
}
LIMIT 1
) AS artist
nie:title(?album) AS title
nie:title(?album) AS album
tracker:coalesce(
(
SELECT
GROUP_CONCAT(
nmm:artistName(?artist),
','
)
WHERE {
?album nmm:albumArtist ?artist
}
),
(
SELECT
GROUP_CONCAT(
(
SELECT
nmm:artistName(nmm:performer(?_12)) AS perf
WHERE {
?_12 nmm:musicAlbum ?album
FILTER (
tracker:uri-is-descendant(
'%(music_dir)s', nie:url(?_12)
) ||
tracker:uri-is-descendant(
'%(download_dir)s', nie:url(?_12)
)
)
FILTER (
NOT EXISTS {
?_12 a nmm:Video
} &&
NOT EXISTS {
?_12 a nmm:Playlist
}
)
}
GROUP BY ?perf
),
','
) AS album_performer
WHERE {
}
)
) AS author
xsd:integer(
tracker:coalesce(
nmm:albumTrackCount(?album),
(
SELECT
COUNT(?_1)
WHERE {
?_1 nmm:musicAlbum ?album ;
tracker:available 'true'
FILTER (
tracker:uri-is-descendant(
'%(music_dir)s', nie:url(?_1)
) ||
tracker:uri-is-descendant(
'%(download_dir)s', nie:url(?_1)
)
)
FILTER (
NOT EXISTS {
?_1 a nmm:Video
} &&
NOT EXISTS {
?_1 a nmm:Playlist
}
)
}
)
)
) AS childcount
(
SELECT
fn:year-from-dateTime(?c)
WHERE {
?_2 nmm:musicAlbum ?album ;
nie:contentCreated ?c ;
tracker:available 'true'
FILTER (
tracker:uri-is-descendant(
'%(music_dir)s', nie:url(?_2)
) ||
tracker:uri-is-descendant(
'%(download_dir)s', nie:url(?_2)
)
)
FILTER (
NOT EXISTS {
?_2 a nmm:Video
} &&
NOT EXISTS {
?_2 a nmm:Playlist
}
)
}
LIMIT 1
) AS creation-date
{
%(where_clause)s
FILTER (
EXISTS {
?_3 nmm:musicAlbum ?album ;
tracker:available 'true'
FILTER (
tracker:uri-is-descendant(
'%(music_dir)s', nie:url(?_3)
) ||
tracker:uri-is-descendant(
'%(download_dir)s', nie:url(?_3)
)
)
FILTER (
NOT EXISTS {
?_3 a nmm:Video
} &&
NOT EXISTS {
?_3 a nmm:Playlist
}
)
}
)
}
ORDER BY %(artist_order)s
?albumyear
%(album_order)s
'''.replace('\n', ' ').strip() % {
'where_clause': where_clause.replace('\n', ' ').strip(),
'music_dir': Query.MUSIC_URI,
'download_dir': Query.DOWNLOAD_URI,
'artist_order': Query.order_by_statement("?author"),
'album_order': Query.order_by_statement("nie:title(?album)")
}
return query
@staticmethod
def songs(where_clause):
query = '''
SELECT DISTINCT
rdf:type(?song)
tracker:id(?song) AS id
nie:url(?song) AS url
nie:title(?song) AS title
nmm:artistName(nmm:performer(?song)) AS artist
nie:title(nmm:musicAlbum(?song)) AS album
nfo:duration(?song) AS duration
IF(bound(?tag), 'truth!', '') AS lyrics
{
%(where_clause)s
OPTIONAL {
?song nao:hasTag ?tag .
FILTER( ?tag = nao:predefined-tag-favorite )
}
FILTER (
tracker:uri-is-descendant(
'%(music_dir)s', nie:url(?song)
) ||
tracker:uri-is-descendant(
'%(download_dir)s', nie:url(?song)
)
)
FILTER (
NOT EXISTS {
?song a nmm:Video
} &&
NOT EXISTS {
?song a nmm:Playlist
}
)
}
ORDER BY tracker:added(?song)
'''.replace('\n', ' ').strip() % {
'where_clause': where_clause.replace('\n', ' ').strip(),
'music_dir': Query.MUSIC_URI,
'download_dir': Query.DOWNLOAD_URI
}
return query
@staticmethod
def playlists(where_clause):
query = '''
SELECT DISTINCT
rdf:type(?playlist)
tracker:id(?playlist) AS id
nie:title(?playlist) AS title
nfo:entryCounter(?playlist) AS childcount
{
%(where_clause)s
OPTIONAL {
?playlist a nfo:FileDataObject .
FILTER (
EXISTS {
?playlist tracker:available 'true'
FILTER (
tracker:uri-is-descendant(
'%(music_dir)s', nie:url(?playlist)
) ||
tracker:uri-is-descendant(
'%(download_dir)s', nie:url(?playlist)
)
)
}
)
}
}
ORDER BY fn:lower-case(?title)
'''.replace('\n', ' ').strip() % {
'where_clause': where_clause.replace('\n', ' ').strip(),
'music_dir': Query.MUSIC_URI,
'download_dir': Query.DOWNLOAD_URI
}
return query
@staticmethod
def album_songs(album_id):
query = '''
SELECT DISTINCT
rdf:type(?song)
tracker:id(?song) AS id
nie:url(?song) AS url
nie:title(?song) AS title
nmm:artistName(nmm:performer(?song)) AS artist
nie:title(nmm:musicAlbum(?song)) AS album
nfo:duration(?song) AS duration
IF(bound(?tag), 'truth!', '') AS lyrics
WHERE {
?song a nmm:MusicPiece ;
a nfo:FileDataObject ;
nmm:musicAlbum ?album .
OPTIONAL {
?song nao:hasTag ?tag .
FILTER( ?tag = nao:predefined-tag-favorite )
}
FILTER (
tracker:id(?album) = %(album_id)s
)
FILTER (
tracker:uri-is-descendant(
'%(music_dir)s', nie:url(?song)
) ||
tracker:uri-is-descendant(
'%(download_dir)s', nie:url(?song)
)
)
FILTER (
NOT EXISTS {
?song a nmm:Video
} &&
NOT EXISTS {
?song a nmm:Playlist
}
)
}
ORDER BY
nmm:setNumber(nmm:musicAlbumDisc(?song))
nmm:trackNumber(?song)
tracker:added(?song)
'''.replace('\n', ' ').strip() % {
'album_id': album_id,
'music_dir': Query.MUSIC_URI,
'download_dir': Query.DOWNLOAD_URI
}
return query
@staticmethod
def playlist_songs(playlist_id, filter_clause=None):
query = '''
SELECT
rdf:type(?song)
tracker:id(?entry) AS id
nie:url(?song) AS url
nie:title(?song) AS title
nmm:artistName(nmm:performer(?song)) AS artist
nie:title(nmm:musicAlbum(?song)) AS album
nfo:duration(?song) AS duration
IF(bound(?tag), 'truth!', '') AS lyrics
WHERE {
?playlist a nmm:Playlist ;
a nfo:MediaList ;
nfo:hasMediaFileListEntry ?entry .
?entry a nfo:MediaFileListEntry ;
nfo:entryUrl ?url .
?song a nmm:MusicPiece ;
a nfo:FileDataObject ;
nie:url ?url .
OPTIONAL {
?song nao:hasTag ?tag .
FILTER( ?tag = nao:predefined-tag-favorite )
}
FILTER (
%(filter_clause)s
)
FILTER (
NOT EXISTS {
?song a nmm:Video
} &&
NOT EXISTS {
?song a nmm:Playlist
}
)
}
ORDER BY
nfo:listPosition(?entry)
'''.replace('\n', ' ').strip() % {
'playlist_id': playlist_id,
'filter_clause':
filter_clause or 'tracker:id(?playlist) = ' + playlist_id,
'music_dir': Query.MUSIC_URI,
'download_dir': Query.DOWNLOAD_URI
}
return query
@staticmethod
def get_album_for_album_id(album_id):
query = """
SELECT DISTINCT
rdf:type(?album)
tracker:id(?album) AS id
(
SELECT
nmm:artistName(?artist)
WHERE {
?album nmm:albumArtist ?artist
}
LIMIT 1
) AS artist
nie:title(?album) AS title
nie:title(?album) AS album
WHERE {
?album a nmm:MusicAlbum .
FILTER (
tracker:id(?album) = %(album_id)s
)
}
""".replace("\n", " ").strip() % {
'album_id': album_id,
'music_dir': Query.MUSIC_URI,
'download_dir': Query.DOWNLOAD_URI
}
return query
@staticmethod
def get_album_for_song_id(song_id):
query = """
SELECT DISTINCT
rdf:type(?album)
tracker:id(?album) AS id
(
SELECT
nmm:artistName(?artist)
WHERE {
?album nmm:albumArtist ?artist
}
LIMIT 1
) AS artist
nie:title(?album) AS title
nie:title(?album) AS album
WHERE {
?song a nmm:MusicPiece ;
nmm:musicAlbum ?album .
FILTER (
tracker:id(?song) = %(song_id)s
)
FILTER (
tracker:uri-is-descendant(
'%(music_dir)s', nie:url(?song)
) ||
tracker:uri-is-descendant(
'%(download_dir)s', nie:url(?song)
)
)
FILTER (
NOT EXISTS {
?song a nmm:Video
} &&
NOT EXISTS {
?song a nmm:Playlist
}
)
}
""".replace("\n", " ").strip() % {
'song_id': song_id,
'music_dir': Query.MUSIC_URI,
'download_dir': Query.DOWNLOAD_URI
}
return query
@staticmethod
def update_playcount(song_url):
query = """
INSERT OR REPLACE { ?song nie:usageCounter ?playcount . }
WHERE {
SELECT
IF(bound(?usage), (?usage + 1), 1) AS playcount
?song
WHERE {
?song a nmm:MusicPiece .
OPTIONAL { ?song nie:usageCounter ?usage . }
FILTER ( nie:url(?song) = "%(song_url)s" )
}
}
""".replace("\n", " ").strip() % {
'song_url': song_url
}
return query
@staticmethod
def update_last_played(song_url, time):
query = """
INSERT OR REPLACE { ?song nfo:fileLastAccessed '%(time)s' . }
WHERE {
SELECT
?song
WHERE {
?song a nmm:MusicPiece .
FILTER ( nie:url(?song) = "%(song_url)s" )
}
}
""".replace("\n", " ").strip() % {
'song_url': song_url,
'time': time
}
return query
@staticmethod
def create_playlist(title):
query = """
INSERT {
_:playlist
a nmm:Playlist ;
a nfo:MediaList ;
nie:title "%(title)s" ;
nfo:entryCounter 0 .
}
""".replace("\n", " ").strip() % {
'title': title
}
return query
@staticmethod
def create_tag(tag_text):
query = """
INSERT OR REPLACE {
_:tag
a nao:Tag ;
rdfs:comment '%(tag_text)s'.
}
""".replace("\n", " ").strip() % {
'tag_text': tag_text
}
return query
@staticmethod
def create_playlist_with_tag(title, tag_text):
# TODO: make this an extension of 'create playlist' rather than its own func.?
# TODO: CREATE TAG IF IT DOESN'T EXIST!
query = """
INSERT {
_:playlist
a nmm:Playlist ;
a nfo:MediaList ;
nie:title "%(title)s" ;
nfo:entryCounter 0 ;
nao:hasTag ?tag.
}
WHERE {
SELECT ?tag
WHERE {
?tag a nao:Tag ;
rdfs:comment '%(tag_text)s'.
}
}
""".replace("\n", " ").strip() % {
'title': title,
'tag_text': tag_text
}
return query
@staticmethod
def delete_playlist(playlist_id):
query = """
DELETE {
?playlist
a rdfs:Resource .
?entry
a rdfs:Resource .
}
WHERE {
?playlist
a nmm:Playlist ;
a nfo:MediaList .
OPTIONAL {
?playlist
nfo:hasMediaFileListEntry ?entry .
}
FILTER (
tracker:id(?playlist) = %(playlist_id)s
)
}
""".replace("\n", " ").strip() % {
'playlist_id': playlist_id
}
return query
@staticmethod
def add_song_to_playlist(playlist_id, song_uri):
query = """
INSERT OR REPLACE {
_:entry
a nfo:MediaFileListEntry ;
nfo:entryUrl "%(song_uri)s" ;
nfo:listPosition ?position .
?playlist
nfo:entryCounter ?position ;
nfo:hasMediaFileListEntry _:entry .
}
WHERE {
SELECT
?playlist
(?counter + 1) AS position
WHERE {
?playlist
a nmm:Playlist ;
a nfo:MediaList ;
nfo:entryCounter ?counter .
FILTER (
tracker:id(?playlist) = %(playlist_id)s
)
}
}
""".replace("\n", " ").strip() % {
'playlist_id': playlist_id,
'song_uri': song_uri
}
return query
@staticmethod
def remove_song_from_playlist(playlist_id, song_id):
query = """
INSERT OR REPLACE {
?entry
nfo:listPosition ?position .
}
WHERE {
SELECT
?entry
(?old_position - 1) AS position
WHERE {
?entry
a nfo:MediaFileListEntry ;
nfo:listPosition ?old_position .
?playlist
nfo:hasMediaFileListEntry ?entry .
FILTER (?old_position > ?removed_position)
{
SELECT
?playlist
?removed_position
WHERE {
?playlist
a nmm:Playlist ;
a nfo:MediaList ;
nfo:hasMediaFileListEntry ?removed_entry .
?removed_entry
nfo:listPosition ?removed_position .
FILTER (
tracker:id(?playlist) = %(playlist_id)s &&
tracker:id(?removed_entry) = %(song_id)s
)
}
}
}
}
INSERT OR REPLACE {
?playlist
nfo:entryCounter ?new_counter .
}
WHERE {
SELECT
?playlist
(?counter - 1) AS new_counter
WHERE {
?playlist
a nmm:Playlist ;
a nfo:MediaList ;
nfo:entryCounter ?counter .
FILTER (
tracker:id(?playlist) = %(playlist_id)s
)
}
}
DELETE {
?playlist
nfo:hasMediaFileListEntry ?entry .
?entry
a rdfs:Resource .
}
WHERE {
?playlist
a nmm:Playlist ;
a nfo:MediaList ;
nfo:hasMediaFileListEntry ?entry .
FILTER (
tracker:id(?playlist) = %(playlist_id)s &&
tracker:id(?entry) = %(song_id)s
)
}
""".replace("\n", " ").strip() % {
'playlist_id': playlist_id,
'song_id': song_id
}
return query
@staticmethod
def get_playlist_with_id(playlist_id):
query = """
?playlist a nmm:Playlist .
FILTER (
tracker:id(?playlist) = %(playlist_id)s
)
""".replace('\n', ' ').strip() % {'playlist_id': playlist_id}
return Query.playlists(query)
@staticmethod
def get_playlist_with_tag(playlist_tag):
query = """
?playlist
a nmm:Playlist ;
nao:hasTag ?tag .
?tag rdfs:comment ?tag_text .
FILTER ( ?tag_text = '%(playlist_tag)s' )
""".replace('\n', ' ').strip() % {'playlist_tag': playlist_tag}
return Query.playlists(query)
@staticmethod
def get_playlist_with_urn(playlist_urn):
query = """
SELECT DISTINCT
tracker:id(<%(playlist_urn)s>) AS id
WHERE {
<%(playlist_urn)s> a nmm:Playlist
}
""".replace('\n', ' ').strip() % {'playlist_urn': playlist_urn}
return query
@staticmethod
def get_playlist_song_with_id(playlist_id, entry_id):
return Query.playlist_songs(
playlist_id, 'tracker:id(?entry) = ' + str(entry_id)
)
@staticmethod
def get_playlist_song_with_urn(entry_urn):
query = """
SELECT DISTINCT
tracker:id(<%(entry_urn)s>) AS id
WHERE {
<%(entry_urn)s> a nfo:MediaFileListEntry
}
""".replace('\n', ' ').strip() % {'entry_urn': entry_urn}
return query
@staticmethod
def clear_playlist_with_id(playlist_id):
query = """
DELETE {
?playlist
nfo:hasMediaFileListEntry ?entry .
?entry
a rdfs:Resource .
}
WHERE {
?playlist
a nmm:Playlist ;
a nfo:MediaList ;
nfo:hasMediaFileListEntry ?entry .
FILTER (
tracker:id(?playlist) = %(playlist_id)s
)
}
""".replace('\n', ' ').strip() % {'playlist_id': playlist_id}
return query
@staticmethod
def get_most_played_songs():
# TODO: set playlist size somewhere? Currently default is 50.
query = """
SELECT ?url
WHERE {
?song a nmm:MusicPiece ;
nie:usageCounter ?count ;
nie:isStoredAs ?as .
?as nie:url ?url .
} ORDER BY DESC(?count) LIMIT 50
""".replace('\n', ' ').strip()
return query
@staticmethod
def get_never_played_songs():
query = """
SELECT ?url
WHERE {
?song a nmm:MusicPiece ;
nie:isStoredAs ?as .
?as nie:url ?url .
FILTER ( NOT EXISTS { ?song nie:usageCounter ?count .} )
} ORDER BY nfo:fileLastAccessed(?song)
""".replace('\n', ' ').strip()
return query
def get_recently_played_songs():
#TODO: or this could take comparison date as an argument so we don't need to make a date string in query.py...
#TODO: set time interval somewhere? A settings file? (Default is maybe 2 weeks...?)
days_difference = 7 # currently hardcoding time interval of 7 days
seconds_difference = days_difference * SECONDS_PER_DAY
compare_date = time.strftime(
sparql_midnight_dateTime_format, time.gmtime(time.time() - seconds_difference))
query = """
SELECT ?url
WHERE {
?song a nmm:MusicPiece ;
nie:isStoredAs ?as ;
nfo:fileLastAccessed ?last_played .
?as nie:url ?url .
FILTER ( ?last_played > '%(compare_date)s'^^xsd:dateTime )
FILTER ( EXISTS { ?song nie:usageCounter ?count .} )
} ORDER BY DESC(?last_played)
""".replace('\n', ' ').strip() % {'compare_date': compare_date}
return query
def get_recently_added_songs():
#TODO: or this could take comparison date as an argument so we don't need to make a date string in query.py...
#TODO: set time interval somewhere? A settings file? (Default is maybe 2 weeks...?)
days_difference = 7 # currently hardcoding time interval of 7 days
seconds_difference = days_difference * SECONDS_PER_DAY
compare_date = time.strftime(sparql_midnight_dateTime_format, time.gmtime(time.time()-seconds_difference))
query = """
SELECT ?url
WHERE {
?song a nmm:MusicPiece ;
nie:isStoredAs ?as ;
tracker:added ?added .
?as nie:url ?url .
FILTER ( ?added > '%(compare_date)s'^^xsd:dateTime )
} ORDER BY DESC(?added)
""".replace('\n', ' ').strip() % {'compare_date': compare_date}
return query
def get_favorite_songs():
query = """
SELECT ?url
WHERE {
?song a nmm:MusicPiece ;
nie:isStoredAs ?as ;
nao:hasTag nao:predefined-tag-favorite .
?as nie:url ?url .
} ORDER BY DESC(tracker:added(?song))
""".replace('\n', ' ').strip()
return query
# Functions for search
# TODO: make those queries actually return something
@staticmethod
def get_albums_with_any_match(name):
name = Tracker.sparql_escape_string(GLib.utf8_casefold(name, -1))
query = '''
{
SELECT DISTINCT
nmm:musicAlbum(?song) AS album
WHERE {
?song a nmm:MusicPiece .
FILTER (
fn:contains(tracker:case-fold(nie:title(nmm:musicAlbum(?song))), "%(name)s") ||
fn:contains(tracker:case-fold(nmm:artistName(nmm:performer(?song))), "%(name)s") ||
fn:contains(tracker:case-fold(nie:title(?song)), "%(name)s")
)
}
}
'''.replace('\n', ' ').strip() % {'name': name}
return Query.albums(query)
@staticmethod
def get_albums_with_artist_match(name):
name = Tracker.sparql_escape_string(GLib.utf8_casefold(name, -1))
query = '''
{
SELECT DISTINCT
?album
WHERE {
?album a nmm:MusicAlbum ;
nmm:albumArtist ?artist .
FILTER (
fn:contains(tracker:case-fold(nmm:artistName(?artist)), "%(name)s")
)
}
}
'''.replace('\n', ' ').strip() % {'name': name}
return Query.albums(query)
@staticmethod
def get_albums_with_album_match(name):
name = Tracker.sparql_escape_string(GLib.utf8_casefold(name, -1))
query = '''
{
SELECT DISTINCT
?album
WHERE {
?album a nmm:MusicAlbum .
FILTER (
fn:contains(tracker:case-fold(nie:title(?album)), "%(name)s")
)
}
}
'''.replace('\n', ' ').strip() % {'name': name}
return Query.albums(query)
@staticmethod
def get_albums_with_track_match(name):
name = Tracker.sparql_escape_string(GLib.utf8_casefold(name, -1))
query = '''
{
SELECT DISTINCT
nmm:musicAlbum(?song) AS album
WHERE {
?song a nmm:MusicPiece .
FILTER (
fn:contains(tracker:case-fold(nie:title(?song)), "%(name)s")
)
}
}
'''.replace('\n', ' ').strip() % {'name': name}
return Query.albums(query)
@staticmethod
def get_artists_with_any_match(name):
name = Tracker.sparql_escape_string(GLib.utf8_casefold(name, -1))
query = '''
{
SELECT DISTINCT
nmm:musicAlbum(?song) AS album
WHERE {
?song a nmm:MusicPiece .
FILTER (
fn:contains(tracker:case-fold(nie:title(nmm:musicAlbum(?song))), "%(name)s") ||
fn:contains(tracker:case-fold(nmm:artistName(nmm:performer(?song))), "%(name)s") ||
fn:contains(tracker:case-fold(nie:title(?song)), "%(name)s")
)
}
}
'''.replace('\n', ' ').strip() % {'name': name}
return Query.artists(query)
@staticmethod
def get_artists_with_artist_match(name):
name = Tracker.sparql_escape_string(GLib.utf8_casefold(name, -1))
query = '''
{
SELECT DISTINCT
?album
WHERE {
?album a nmm:MusicAlbum ;
nmm:albumArtist ?artist .
FILTER (
fn:contains(tracker:case-fold(nmm:artistName(?artist)), "%(name)s")
)
}
}
'''.replace('\n', ' ').strip() % {'name': name}
return Query.artists(query)
@staticmethod
def get_artists_with_album_match(name):
name = Tracker.sparql_escape_string(GLib.utf8_casefold(name, -1))
query = '''
{
SELECT DISTINCT
?album
WHERE {
?album a nmm:MusicAlbum .
FILTER (
fn:contains(tracker:case-fold(nie:title(?album)), "%(name)s")
)
}
}
'''.replace('\n', ' ').strip() % {'name': name}
return Query.artists(query)
@staticmethod
def get_artists_with_track_match(name):
name = Tracker.sparql_escape_string(GLib.utf8_casefold(name, -1))
query = '''
{
SELECT DISTINCT
nmm:musicAlbum(?song) AS album
WHERE {
?song a nmm:MusicPiece .
FILTER (
fn:contains(tracker:case-fold(nie:title(?song)), "%(name)s")
)
}
}
'''.replace('\n', ' ').strip() % {'name': name}
return Query.artists(query)
@staticmethod
def get_songs_with_any_match(name):
name = Tracker.sparql_escape_string(GLib.utf8_casefold(name, -1))
query = '''
{
SELECT DISTINCT
?song
WHERE {
?song a nmm:MusicPiece .
FILTER (
fn:contains(tracker:case-fold(nie:title(?song)), "%(name)s") ||
fn:contains(tracker:case-fold(nmm:artistName(nmm:performer(?song))), "%(name)s") ||
fn:contains(tracker:case-fold(nie:title(nmm:musicAlbum(?song))), "%(name)s")
)
}
}
'''.replace('\n', ' ').strip() % {'name': name}
return Query.songs(query)
@staticmethod
def get_songs_with_artist_match(name):
name = Tracker.sparql_escape_string(GLib.utf8_casefold(name, -1))
query = '''
{
SELECT DISTINCT
?song
WHERE {
?song a nmm:MusicPiece .
FILTER (
fn:contains(tracker:case-fold(nmm:artistName(nmm:performer(?song))), "%(name)s")
)
}
}
'''.replace('\n', ' ').strip() % {'name': name}
return Query.songs(query)
@staticmethod
def get_songs_with_album_match(name):
name = Tracker.sparql_escape_string(GLib.utf8_casefold(name, -1))
query = '''
{
SELECT DISTINCT
?song
WHERE {
?song a nmm:MusicPiece .
FILTER (
fn:contains(tracker:case-fold(nie:title(nmm:musicAlbum(?song))), "%(name)s")
)
}
}
'''.replace('\n', ' ').strip() % {'name': name}
return Query.songs(query)
@staticmethod
def get_songs_with_track_match(name):
name = Tracker.sparql_escape_string(GLib.utf8_casefold(name, -1))
query = '''
{
SELECT DISTINCT
?song
WHERE {
?song a nmm:MusicPiece .
FILTER (
fn:contains(tracker:case-fold(nie:title(?song)), "%(name)s")
)
}
}
'''.replace('\n', ' ').strip() % {'name': name}
return Query.songs(query)
@staticmethod
def clear_playlist(playlist_id):
# TODO is there a way to do this with only one FILTER statement?
query = """
DELETE {
?playlist
nfo:hasMediaFileListEntry ?entry .
?entry
a rdfs:Resource .
}
WHERE {
?playlist
a nmm:Playlist ;
a nfo:MediaList ;
nfo:hasMediaFileListEntry ?entry .
FILTER (
tracker:id(?playlist) = %(playlist_id)s
)
}
INSERT OR REPLACE {
?playlist nfo:entryCounter '0'
}
WHERE {
?playlist
a nmm:Playlist ;
a nfo:MediaList .
FILTER (
tracker:id(?playlist) = %(playlist_id)s
)
}
""".replace("\n", " ").strip() % {
'playlist_id': playlist_id
}
return query
def add_favorite(song_url):
query = """
INSERT {
?song nao:hasTag nao:predefined-tag-favorite
}
WHERE {
?song a nmm:MusicPiece .
FILTER ( nie:url(?song) = "%(song_url)s" )
}
""".replace("\n", " ").strip() % {
'song_url': song_url
}
return query
def remove_favorite(song_url):
query = """
DELETE {
?song nao:hasTag nao:predefined-tag-favorite
}
WHERE {
?song a nmm:MusicPiece .
FILTER ( nie:url(?song) = "%(song_url)s" )
}
""".replace("\n", " ").strip() % {
'song_url': song_url
}
return query
|
gpl-2.0
| 4,295,170,475,370,668,500
| 30.133136
| 122
| 0.436625
| false
| 4.380828
| false
| false
| false
|
Yangqing/caffe2
|
caffe2/python/checkpoint.py
|
1
|
29797
|
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
## @package checkpoint
# Module caffe2.python.checkpoint
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import logging
from caffe2.python import core, context
from caffe2.python.net_builder import ops
from caffe2.python.task import Node, Task, TaskGroup, TaskOutput, WorkspaceType
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
@context.define_context()
class Job(object):
"""
A Job defines three TaskGroups: the `init_group`, the `epoch_group` and the
`exit_group` which will be run by a JobRunner.
The `init_group` will be run only once at startup. Its role is to
initialize globally persistent blobs such as model weights, accumulators
and data file lists.
The `epoch_group` will be run in a loop after init_group. The loop will
exit when any of the stop signals added with `add_stop_signal` is True
at the end of an epoch.
The download_group will be run only once, after all the executions of
epoch_group finish. Its role is to collect the distribute scattered
parameters back after training.
The `exit_group` will be run only once at the very end of the job, the
role of this group is to save the results of training in the end of the job.
Jobs are context-driven, so that Tasks can be added to the active Job
without having to explicitly pass the job object around.
Example of usage:
def build_reader(partitions):
with Job.current().init_group:
reader = HiveReader(init_reader, ..., partitions)
Task(step=init_reader)
with Job.current().epoch_group:
limited_reader = ReaderWithLimit(reader, num_iter=10000)
data_queue = pipe(limited_reader, num_threads=8)
Job.current().add_stop_signal(limited_reader.data_finished())
return data_queue
def build_hogwild_trainer(reader, model):
with Job.current().init_group:
Task(step=model.param_init_net)
with Job.current().epoch_group:
pipe(reader, processor=model, num_threads=8)
with Job.current().exit_group:
Task(step=model.save_model_net)
with Job() as job:
reader = build_reader(partitions)
model = build_model(params)
build_hogwild_trainer(reader, model)
"""
def __init__(self,
init_group=None, epoch_group=None,
download_group=None, exit_group=None,
stop_signals=None, nodes_to_checkpoint=None):
self.init_group = init_group or TaskGroup(
workspace_type=WorkspaceType.GLOBAL)
self.epoch_group = epoch_group or TaskGroup()
self.download_group = download_group or TaskGroup()
self.exit_group = exit_group or TaskGroup()
self.stop_signals = stop_signals or []
self._nodes_to_checkpoint = nodes_to_checkpoint
def nodes_to_checkpoint(self):
if self._nodes_to_checkpoint:
return self._nodes_to_checkpoint
else:
return self.init_group.used_nodes()
def compile(self, session_class):
return Job(
init_group=session_class.compile(self.init_group),
epoch_group=session_class.compile(self.epoch_group),
download_group=session_class.compile(self.download_group),
exit_group=session_class.compile(self.exit_group),
stop_signals=self.stop_signals,
nodes_to_checkpoint=self.nodes_to_checkpoint())
def __enter__(self):
self.epoch_group.__enter__()
return self
def __exit__(self, *args):
self.epoch_group.__exit__()
def add_stop_signal(self, output):
if isinstance(output, core.BlobReference):
t = Task(outputs=[output], group=self.epoch_group)
output = t.outputs()[0]
assert isinstance(output, TaskOutput)
self.stop_signals.append(output)
def get_ckpt_filename(node_name, epoch):
"""Returns the checkpoint filename.
Args:
node_name: A string. The name of the node.
epoch: An integer. The checkpoint epoch.
Returns:
ckpt_filename: A string. The filename of the checkpoint.
"""
return node_name + '.' + str(epoch)
def db_name(epoch, node_name, db_prefix, path_prefix=None):
"""Returns the full db name where checkpoint files are saved.
Args:
epoch: An integer. The checkpoint epoch.
node_name: A string. The name of the node.
db_prefix: A string. The prefix used to construct full db name.
path_prefix: A string. Optional param used to construct db name or path
where checkpoint files are are stored.
Returns:
db_name: A string. The absolute path of full_db_name where checkpoint
files are saved
"""
if path_prefix:
db_name = path_prefix + get_ckpt_filename(node_name, epoch)
else:
ckpt_filename = get_ckpt_filename(node_name, epoch)
db_name = os.path.join(db_prefix, ckpt_filename)
return db_name
class CheckpointManager(object):
"""
Controls saving and loading of workspaces on every epoch boundary of a job.
If a CheckpointManager instance is passed to JobRunner, then JobRunner will
call `init`, `read` and `save` at different moments in between epoch runs.
Args:
db_prefix: The prefix used to construct full db name. Since `absolute_path`
is set to True, this will be used as db_name in SaveOp.
node_name: Name of the node where this checkpoint_manager is used.
db_type: Type of database to use for storing checkpoint.
metadata_handler: An optional object capable of reading/writing
checkpoint info in storage of choice.
"""
def __init__(self, db_prefix, node_name, db_type, metadata_handler=None):
self._db_prefix = db_prefix
self._node_name = node_name
self._db_type = db_type
self._metadata_handler = metadata_handler
# make sure these blobs are the first in the checkpoint file.
self._net = core.Net('!!checkpoint_mngr')
self._blob_names = self._net.AddExternalInput('blob_names')
self._names_output = None
self._path_prefix = None
self._path_type = None
"""
Initialize the checkpoint manager. Determines all blobs that need to be saved
or loads from a checkpoint.
Args:
nodes: An array of nodes where this checkpoint manager is running. Should
only contain a single node.
retrieve_from_epoch: Set to a number to load blobs from this epoch.
path_prefix: Used to construct db name or path where checkpoint files are
stored.
path_type: Indicate the type of path where checkpoint files are stored.
"""
def init(
self,
nodes=None,
retrieve_from_epoch=None,
path_prefix=None,
path_type=None
):
"""
Build a Task that will be run once after the job's `init_group` is run.
This task will determine which blobs need to be checkpointed.
If retrieve_from_epoch is not None, then the checkpoint metadata is
retrieved from a previously saved checkpoint.
"""
assert nodes is None or len(nodes) == 1, (
'CheckpointManager only supports single node.')
with Task(outputs=[self._blob_names]) as task:
if retrieve_from_epoch is None:
ops.GetAllBlobNames(
[],
self._blob_names,
include_shared=False)
else:
full_db_name = db_name(retrieve_from_epoch,
self._node_name, self._db_prefix, path_prefix)
db_type = path_type or self._db_type
logger.info("Initializing checkpoints from = %s"
% full_db_name)
ops.Load(
[], self._blob_names,
db=full_db_name,
db_type=db_type,
absolute_path=True)
self._names_output = task.outputs()[0]
return task
def blob_list(self):
assert self._names_output
return self._names_output.fetch().tolist()
def load(self, epoch, path_prefix=None, path_type=None):
"""
Build a Task that will be run by JobRunner when the job is to be
resumed from a given epoch. This task will run a Load op that will
load and deserialize all relevant blobs from a persistent storage.
"""
full_db_name = db_name(epoch, self._node_name, self._db_prefix, path_prefix)
db_type = path_type or self._db_type
logger.info("Loading checkpoints from = %s" % full_db_name)
with Task() as task:
ops.Load(
[],
self.blob_list(),
db=full_db_name,
db_type=db_type,
absolute_path=True)
return task
def load_blobs_from_checkpoint(self, blob_names, epoch):
"""
Builds a Task that loads only the necessary blobs from a checkpoint of
the given epoch. The necessary blobs are given in the blob_names
argument.
Args:
blob_names: A list of strings. Each string is the name of a
blob.
epoch: The checkpoint epoch to load from.
Returns:
A Task which loads the specified blobs from the checkpoint of the
given epoch.
"""
logger.info('Load from %s' % db_name(epoch, self._node_name, self._db_prefix))
with Task() as task:
ops.Load(
[],
blob_names,
db=db_name(epoch, self._node_name, self._db_prefix),
db_type=self._db_type,
absolute_path=True,
allow_incomplete=True)
return task
def check_db_exists(self, epoch):
logger.info('Check existence of %s' %
db_name(epoch, self._node_name, self._db_prefix))
with Task() as task:
existence = ops.Const(False)
ops.DBExists(
[],
[existence],
db_name=db_name(epoch, self._node_name, self._db_prefix),
db_type=self._db_type,
absolute_path=True)
task.add_output(existence)
return task
def save(self, epoch):
"""
Build a Task that is run once after `init_group` and after each
epoch is run. This will execute a Save ops to serialize and persist
blobs present in the global workspace.
"""
logger.info('Saving to %s' % db_name(epoch, self._node_name, self._db_prefix))
with Task() as task:
ops.Save(
self.blob_list(), [],
db=db_name(epoch, self._node_name, self._db_prefix),
db_type=self._db_type, absolute_path=True)
return task
def write_checkpoint_metadata(self, epoch):
"""
Write metadata for checkpoint
Args:
epoch: An integer. The epoch-id for which checkpoint metadata is
written
"""
if self._metadata_handler is not None:
self._metadata_handler.write(epoch=epoch)
def get_resume_from_epoch_id(self, user_epoch=None):
"""
Identify the epoch-id from which Job must resume
Args:
user_epoch: An integer. Optional parameter for user to explicitly
identify the epoch-id to load checkpoint from
Retruns:
epoch: the epoch-id to load checkpoints from
or None if no checkpoints were written
"""
last_epoch = user_epoch
if self._metadata_handler is not None:
last_epoch = self._metadata_handler.last_epoch(user_epoch=user_epoch)
return last_epoch
def set_params(self, nodes, path_prefix=None, path_type=None):
"""Set parameters associated with CP manager
Args:
nodes: An array of nodes where this checkpoint manager is running.
path_prefix: Used to construct db name or path where checkpoint files are
stored.
path_type: Indicate the type of path where checkpoint files are stored.
"""
if path_prefix:
self._path_prefix = path_prefix
if path_type:
self._path_type = path_type
if self._metadata_handler:
self._metadata_handler.set_params(
db_prefix=self._db_prefix,
db_type=self._db_type,
node_names=[str(self._node_name)],
path_prefix=self._path_prefix,
path_type=self._path_type)
def cp_accessible(self, epoch=None):
"""Returns True if Checkpoint data is accessible
Args:
epoch: An integer. The epoch of the checkpoint. If None,
it implies we need to check if checkpoint directory is accessible
Returns:
is_cp_accessible: A boolean. Returns True if Checkpoint data is accessible
"""
if self._metadata_handler is not None:
return self._metadata_handler.cp_accessible(epoch)
else:
return True
class MultiNodeCheckpointManager(object):
"""
Coordinates checkpointing and checkpointing across multiple nodes.
Each of `init`, `load` and `save` will build TaskGroups which will
trigger checkpointing on each of the nodes involved in a distributed job.
Args:
db_prefix: The prefix used to construct full db name. Since `absolute_path`
is set to True, this will be used as db_name in SaveOp.
db_type: Type of database to use for storing checkpoint.
metadata_handler: An optional object capable of reading/writing
checkpoint info in storage of choice.
"""
def __init__(self, db_prefix, db_type, metadata_handler=None):
self._node_managers = None
self._db_prefix = db_prefix
self._db_type = db_type
self._metadata_handler = metadata_handler
self._path_prefix = None
self._path_type = None
def _task_group(self, func, *args, **kw):
assert self._node_managers is not None, 'init must be called first.'
with TaskGroup(WorkspaceType.GLOBAL) as task_group:
for node, manager in self._node_managers:
with Node(node):
func(manager, *args, **kw)
return task_group
"""
Args:
nodes: An array of nodes where this checkpoint manager is running.
retrieve_from_epoch: Set to a number to load blobs from this epoch.
path_prefix: Used to construct db name or path where checkpoint files are
stored.
path_type: Indicate the type of path where checkpoint files are stored.
"""
def init(
self, nodes, retrieve_from_epoch=None, path_prefix=None, path_type=None
):
if self._node_managers is not None:
assert [node for node, _ in self._node_managers] == nodes
return TaskGroup(WorkspaceType.GLOBAL)
self._node_managers = []
for node in nodes:
with Node(node):
manager = CheckpointManager(
db_prefix=self._db_prefix,
node_name=str(node),
db_type=self._db_type)
self._node_managers.append((node, manager))
return self._task_group(
CheckpointManager.init,
nodes=[node],
retrieve_from_epoch=retrieve_from_epoch,
path_prefix=path_prefix,
path_type=path_type)
def load(self, epoch, path_prefix=None, path_type=None):
return self._task_group(
CheckpointManager.load,
epoch,
path_prefix=path_prefix,
path_type=path_type)
def load_blobs_locally(self, nodes, blob_names, epoch, session):
"""Loads the necessary blobs from the checkpoints to the current node.
Args:
blob_names: A list of strings. Each string is the name of a
blob.
epoch: An integer. The checkpoint epoch to load from.
session: A Session object to execute the Load ops.
"""
if self._node_managers is not None:
assert [node for node, _ in self._node_managers] == nodes
else:
self._node_managers = []
for node in nodes:
with Node(node):
manager = CheckpointManager(
db_prefix=self._db_prefix,
node_name=str(node),
db_type=self._db_type)
self._node_managers.append((node, manager))
assert self._node_managers is not None, 'must initialize node managers'
for _, manager in self._node_managers:
existence_task = manager.check_db_exists(epoch)
session.run(existence_task)
existence = existence_task.outputs()[0].fetch()
if not existence:
logger.info('DB %s does not exist!' %
db_name(epoch, manager._node_name, manager._db_prefix))
return False
load_task = manager.load_blobs_from_checkpoint(blob_names, epoch)
session.run(load_task)
logger.info('Successfully loaded from checkpoints.')
return True
def get_ckpt_db_name(self, node_name, epoch):
"""Returns the DB name of the given node and the given epoch.
The DB name is effectively the checkpoint path of the given node and
the given epoch.
Args:
node_name: A string. The node name of interest.
epoch: An integer. The epoch of the checkpoint.
Returns:
checkpoint_db_name: A string. The checkpoint path of the given
node and the given epoch.
"""
for node, manager in self._node_managers:
if str(node) == node_name:
return db_name(epoch, manager._node_name, manager._db_prefix)
def save(self, epoch):
"""
Build a Task that will execute a Save ops to serialize and persist
blobs present in the global workspace.
"""
return self._task_group(CheckpointManager.save, epoch)
def write_checkpoint_metadata(self, epoch):
"""
Write metadata for checkpoint
Args:
epoch: An integer. The epoch-id for which checkpoint metadata is
written
"""
if self._metadata_handler is not None:
self._metadata_handler.write(epoch=epoch)
def get_resume_from_epoch_id(self, user_epoch=None):
"""
Identify the epoch-id from which Job must resume
Args:
user_epoch: An integer. Optional parameter for user to explicitly
identify the epoch-id to load checkpoint from
Retruns:
epoch: the epoch-id to load checkpoints from
or None if no checkpoints were written
"""
last_epoch = user_epoch
if self._metadata_handler is not None:
last_epoch = self._metadata_handler.last_epoch(user_epoch=user_epoch)
return last_epoch
def set_params(self, nodes, path_prefix=None, path_type=None):
"""Set parameters associated with CP manager
Args:
nodes: An array of nodes where this checkpoint manager is running.
path_prefix: Used to construct db name or path where checkpoint files are
stored.
path_type: Indicate the type of path where checkpoint files are stored.
"""
self._node_names = [str(node) for node in nodes]
if path_prefix:
self._path_prefix = path_prefix
if path_type:
self._path_type = path_type
if self._metadata_handler:
self._metadata_handler.set_params(
db_prefix=self._db_prefix,
db_type=self._db_type,
node_names=self._node_names,
path_prefix=self._path_prefix,
path_type=self._path_type)
def cp_accessible(self, epoch=None):
"""Returns True if Checkpoint data is accessible
Args:
epoch: An integer. The epoch of the checkpoint. If None,
it implies we need to check if checkpoint directory is accessible
Returns:
is_cp_accessible: A boolean. Returns True if Checkpoint data is accessible
"""
if self._metadata_handler is not None:
return self._metadata_handler.cp_accessible(epoch)
else:
return True
class UploadTaskGroupBuilder(object):
"""A simple class to upload checkpoints."""
def build(self, epoch, checkpoint_manager):
"""Builds the task group to upload checkpoints.
Args:
epoch: An integer. The checkpoint epoch to be uploaded.
checkpoint_manager: Can be a CheckpointManager for single machine
or a MultiNodeCheckpointManager for multi-machine. The manager
that initializes/saves/loads checkpoints.
Raises:
NotImplementedError: This base class only has the interface,
the implementation will be in the subclasses.
"""
raise NotImplementedError()
class JobRunner(object):
"""
Implement the runtime logic for jobs with checkpointing at the level of
epoch. Can be used to run either single-host or distributed jobs. Job
runner is a callable to be called once from the master, passing a session
as an argument. This call will block until the Job execution is complete.
If a checkpoint_manager is passed, checkpoints will be taken after
initialization and after each epoch execution. If, in addition,
`resume_from_epoch` is an epoch number, the corresponding checkpoint will
be loaded and job execution will continue from the given epoch. In
this case, the job's init_group will not be run.
Refer to checkpoint_test.py for an example.
"""
def __init__(self, job, checkpoint_manager=None, resume_from_epoch=None,
upload_task_group_builder=None):
"""Initializes the JobRunner.
Args:
job: A Job object. The job to be executed.
checkpoint_manager: Can be a CheckpointManager for single machine
or a MultiNodeCheckpointManager for multi-machine. The manager
that initializes/saves/loads checkpoints.
resume_from_epoch: An integer. The epoch to resume from.
upload_task_group_builder: A subclass of the
UploadTaskGroupBuilder. Creates a task group to upload
checkpoints.
"""
self.resume_from_epoch = resume_from_epoch
self.checkpoint_manager = checkpoint_manager
self.job = job
self.upload_task_group_builder = upload_task_group_builder
def __call__(self, session):
"""Runs the training flow.
Args:
session: A Session object. Valid choises are: LocalSession,
LocalHostScheduler, and DistributedSession. It is used to
execute one TaskGroup a time.
"""
# identify the epoch we must resume from
if self.checkpoint_manager:
self.checkpoint_manager.set_params(nodes=self.job.nodes_to_checkpoint())
self.resume_from_epoch = self.checkpoint_manager.\
get_resume_from_epoch_id(self.resume_from_epoch)
if self.resume_from_epoch is not None:
logger.info('Resuming from epoch {}'.format(self.resume_from_epoch))
# Initialize all the nodes.
from_scratch = self.resume_from_epoch is None
if from_scratch:
session.run(self.job.init_group)
if self.checkpoint_manager:
logger.info('Preparing checkpoints ...')
session.run(self.checkpoint_manager.init(
self.job.nodes_to_checkpoint(),
retrieve_from_epoch=self.resume_from_epoch))
# Save the first checkpoint before training starts, or resume from
# a previously saved checkpoint.
if from_scratch:
self.save_checkpoints(0, session)
else:
logger.info('Loading checkpoints for epoch {} ...'.format(
self.resume_from_epoch))
session.run(
self.checkpoint_manager.load(self.resume_from_epoch))
logger.info('Checkpoint loaded')
logger.info("Finished initializing")
# Start training.
epoch = 1 if from_scratch else self.resume_from_epoch + 1
while True:
logger.info('Starting epoch %d' % epoch)
session.run(self.job.epoch_group)
logger.info('Finished epoch %d' % epoch)
stop_signals = [o.fetch() for o in self.job.stop_signals]
if self.checkpoint_manager:
self.save_checkpoints(epoch, session)
if any(stop_signals):
logger.info('Stopping')
break
epoch += 1
logger.info('Finished training')
# Upload the checkpoints.
if (self.upload_task_group_builder):
upload_task_group = self.upload_task_group_builder.build(
epoch, self.checkpoint_manager)
session.run(upload_task_group)
logger.info('Finished uploading the checkpoints')
# Download the parameters to save
session.run(self.job.download_group)
logger.info('Finished downloading the parameters')
# Finally run the exit step to save nets
session.run(self.job.exit_group)
logger.info('Finished running the exit group')
return epoch
def load_blobs_from_checkpoints(self, blob_names, epoch, session):
"""Loads the necessary blobs from the checkpoints.
Checkpoints store the snapshots of the workspace in each node.
Sometimes we only need to load a subset of the blobs from the
checkpoints. One common scenario is to load only the model blobs from
the checkpoints for evaluation purpose. Given the names of the
necessary blobs, this function goes over all the checkpoints of all the
nodes, but only loads the blobs specified in the blob_names to the
current workspace.
Args:
blob_names: A list of strings. Each string is the name of a
blob.
epoch: An integer. The checkpoint epoch to load from.
session: A Session object to execute the load ops.
Raises:
ValueError: When the checkpoint manager is invalid.
"""
if not self.checkpoint_manager:
raise ValueError('Checkpoint manager is None')
logger.info('Loading checkpoint for epoch {} ...'.format(epoch))
return self.checkpoint_manager.load_blobs_locally(
self.job.nodes_to_checkpoint(), blob_names, epoch, session)
def save_checkpoints(self, epoch, session):
"""Triggers operation to save checkpoints
This method will trigger the Save ops to serialize and persist the
blobs present in the global workspaace.
Args:
epoch: An integer. The checkpoint epoch-id that we are saving.
session: A Session object to execute the save ops.
Raises:
ValueError: When the checkpoint manager is invalid.
"""
if not self.checkpoint_manager:
raise ValueError('Checkpoint manager is None')
try:
is_accessible = self.checkpoint_manager.cp_accessible(epoch=None)
if is_accessible:
logger.info('Saving checkpoints for epoch {}'.format(epoch))
session.run(self.checkpoint_manager.save(epoch))
self.checkpoint_manager.write_checkpoint_metadata(epoch)
logger.info('Checkpoints saved')
else:
logger.warning("Checkpoint files cannot be accessed!")
except Exception as ex:
logger.warning("Unable to write checkpoint for epoch {}. Error={}".
format(epoch, ex))
def epoch_limiter(num_epochs):
"""
Creates a task that will output True when a given
number of epochs has finished.
"""
with Job.current().init_group:
init_net = core.Net('epoch_counter_init')
counter = init_net.CreateCounter([], init_count=num_epochs - 1)
Task(step=init_net)
with Job.current().epoch_group:
epoch_net = core.Net('epoch_countdown')
finished = epoch_net.CountDown(counter)
output = Task(step=epoch_net, outputs=finished).outputs()[0]
Job.current().add_stop_signal(output)
|
apache-2.0
| 7,775,444,930,955,179,000
| 38.571049
| 86
| 0.606202
| false
| 4.39938
| false
| false
| false
|
SeavantUUz/silence
|
tools.py
|
1
|
1952
|
#coding: utf-8
__all__ = ['covert','line_resize','parse','draw_line','draw_screen','move','draw_input','check_pos']
import locale
import logging
locale.setlocale(locale.LC_ALL,'')
code = locale.getpreferredencoding()
def covert(string,code):
new_string = string.decode('utf-8')
lenth = len(new_string)
return new_string.encode(code), lenth
def line_resize(lines, width, code):
count = len(lines)
index = 0
while index < count:
line = lines[index].decode('utf-8')
line_lenth = len(line)
if line_lenth > width:
s_width = 0
while s_width < line_lenth:
yield line[s_width:s_width+width].encode(code)
s_width += width
index += 1
else:
yield line.encode(code)
index += 1
def combine(func):
def wrapper(*args, **kwargs):
value = "".join(reversed(list(func(*args, **kwargs))))
return value
return wrapper
@combine
def parse(value):
while value:
ch = value % 1000
value /= 1000
yield chr(ch)
def draw_line(stdscr, y, x):
stdscr.hline(y,0,ord('-'),x)
def move(stdscr, y, x):
stdscr.move(y,x)
def draw_screen(stdscr, content, hight, width):
lines = list(line_resize(content, width, code))
move(stdscr,0,0)
y = 0
for line in lines[-hight:]:
stdscr.addstr(covert(line,code)[0])
y += 1
move(stdscr, y, 0)
def draw_input(stdscr, line, y, x):
logging.info(line)
move(stdscr, y,0)
stdscr.clrtoeol()
stdscr.refresh()
stdscr.addstr(line)
logging.info(line)
move(stdscr,y,x)
def check_pos(stdscr, type_, value):
y, x = stdscr.getmaxyx()
if type_ == 'x':
if value < 0:
value = 0
if value > x-1:
value = x-1
if type_ == 'y':
if value < 0:
value = 0
if value > y-1:
value = y-1
return value
|
mit
| 707,597,869,600,511,700
| 23.098765
| 100
| 0.553279
| false
| 3.2
| false
| false
| false
|
rvianello/rdkit
|
rdkit/Chem/UnitTestQED.py
|
4
|
4498
|
from __future__ import print_function
from collections import namedtuple
import doctest
import os.path
import unittest
from rdkit import Chem
from rdkit import RDConfig
from rdkit.Chem import QED
doLong = False
TestData = namedtuple('TestData', 'lineNo,smiles,mol,expected')
dataNCI200 = os.path.join(RDConfig.RDCodeDir, 'Chem', 'test_data', 'QED', 'NCI_200_qed.csv')
dataRegression = os.path.join(RDConfig.RDCodeDir, 'Chem', 'test_data', 'QED', 'Regression_qed.csv')
def load_tests(loader, tests, ignore):
""" Add the Doctests from the module """
tests.addTests(doctest.DocTestSuite(QED, optionflags=doctest.ELLIPSIS))
return tests
class TestCase(unittest.TestCase):
def testQED(self):
self.assertEqual(QED.qed.version, '1.1.0',
msg='QED version has changed. Update the regression tests if required.')
def testNCI200(self):
for d in readTestData(dataNCI200):
self.assertAlmostEqual(QED.qed(d.mol), d.expected,
msg='QED not equal to expected in line {}'.format(d.lineNo))
# Check that adding hydrogens will not change the result
# This is currently not the case. Hydrogens change the number of rotatable bonds and the
# number of alerts.
mol = Chem.AddHs(d.mol)
self.assertAlmostEqual(QED.qed(mol), d.expected,
msg='QED not equal to expected in line {}'.format(d.lineNo))
def testRegression(self):
if not doLong:
raise unittest.SkipTest('long test')
for d in readTestData(dataRegression):
self.assertAlmostEqual(QED.qed(d.mol), d.expected,
msg='QED not equal to expected in line {}'.format(d.lineNo))
def test_properties(self):
m = Chem.MolFromSmiles('N=C(CCSCc1csc(N=C(N)N)n1)NS(N)(=O)=O')
p = QED.properties(m)
self.assertAlmostEqual(p.MW, 337.456)
self.assertAlmostEqual(p.ALOGP, -0.55833)
self.assertAlmostEqual(p.HBA, 6)
self.assertAlmostEqual(p.HBD, 5)
self.assertAlmostEqual(p.PSA, 173.33)
self.assertAlmostEqual(p.ROTB, 7)
self.assertAlmostEqual(p.AROM, 1)
self.assertAlmostEqual(p.ALERTS, 3)
p = QED.properties(Chem.AddHs(m))
self.assertAlmostEqual(p.MW, 337.456)
self.assertAlmostEqual(p.ALOGP, -0.55833)
self.assertAlmostEqual(p.HBA, 6)
self.assertAlmostEqual(p.HBD, 5)
self.assertAlmostEqual(p.PSA, 173.33)
self.assertAlmostEqual(p.ROTB, 7)
self.assertAlmostEqual(p.AROM, 1)
self.assertAlmostEqual(p.ALERTS, 3)
def test_examples(self):
# Paroxetine 0.935
self.assertAlmostEqual(QED.qed(Chem.MolFromSmiles('c1cc2OCOc2cc1OCC1CNCCC1c1ccc(F)cc1')), 0.934,
places=3)
# Leflunomide 0.929
self.assertAlmostEqual(QED.qed(Chem.MolFromSmiles('C1=NOC(C)=C1C(=O)Nc1ccc(cc1)C(F)(F)F')),
0.911, places=3)
# Clomipramine 0.779
self.assertAlmostEqual(QED.qed(Chem.MolFromSmiles('CN(C)CCCN1c2ccccc2CCc2ccc(Cl)cc21')),
0.818, places=3)
# Tegaserod 0.213
self.assertAlmostEqual(QED.qed(Chem.MolFromSmiles('CCCCCNC(=N)NN=CC1=CNc2ccc(CO)cc21')),
0.235, places=3)
def readTestData(filename):
""" Read test data from file """
with open(filename, 'r') as f:
for lineNo, line in enumerate(f, 1):
if line[0] == '#':
continue
smiles, expected = line.strip().split(',')
mol = Chem.MolFromSmiles(smiles)
if not mol:
raise AssertionError('molecule construction failed on line %d' % lineNo)
yield TestData(lineNo, smiles, mol, float(expected))
def updateTestData():
""" Update the test data. This should only be done if the method changes! """
for filename in (dataNCI200, dataRegression,):
data = list(readTestData(filename))
with open(filename, 'w') as f:
print('# Test data for QED descriptor', file=f)
for d in data:
expected = QED.qed(d.mol)
print('{0.smiles},{1}'.format(d, expected), file=f)
if __name__ == '__main__': # pragma: nocover
import argparse
import sys
parser = argparse.ArgumentParser()
parser.add_argument('-l', default=False, action='store_true', dest='doLong')
parser.add_argument('-u', default=False, action='store_true', dest='updateTestData')
args = parser.parse_args()
# Handle possible arguments
doLong = args.doLong
if args.doLong:
sys.argv.remove('-l')
if args.updateTestData:
updateTestData()
sys.argv.remove('-u')
unittest.main()
|
bsd-3-clause
| -2,996,229,248,481,166,000
| 34.417323
| 100
| 0.659627
| false
| 3.219757
| true
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.