code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
from django.db import models
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
class QuestionManager(models.Manager):
def new(self):
return super(QuestionManager, self).get_query_set().all().order_by('-added_at')
def popular(self):
return super(QuestionManager, self).get_query_set().all().order_by('-rating')
class Question(models.Model):
title = models.CharField(max_length=255)
text = models.TextField()
added_at = models.DateTimeField(auto_now_add=True)
rating = models.IntegerField(null=True, blank=True)
author = models.ForeignKey(User, null=True, related_name='author', on_delete=models.SET_NULL)
likes = models.ManyToManyField(User, related_name='likes')
objects = QuestionManager()
def get_url(self):
return reverse('question', kwargs = { 'id': self.id})
class Answer(models.Model):
text = models.TextField()
added_at = models.DateTimeField(auto_now_add=True)
question = models.ForeignKey(Question, null=True, on_delete=models.SET_NULL)
author = models.ForeignKey(User, null=True, on_delete=models.SET_NULL)
def get_url(self):
return reverse('question', kwargs = { 'id': self.question.id })
| dennis95stumm/stepic_web_project | ask/qa/models.py | Python | gpl-3.0 | 1,192 |
import datetime
from django.conf import settings
from django.core.management.base import NoArgsCommand
from django.db import transaction
from django.db.models import Q
from djcode.reservations.models import Day_status, Medical_office, Visit_disable_rule
from djcode.reservations.models import Visit_reservation, Visit_template
class Command(NoArgsCommand):
help = "Pregenerate Visit_reservation records by Visit_template"
def handle_noargs(self, **options):
try:
for office in Medical_office.objects.all():
print '\nI: Office: %s' % office
end_day = datetime.date.today() + datetime.timedelta(office.days_to_generate)
print 'I: Days to generate: %d' % office.days_to_generate
sid = transaction.savepoint()
try:
day = Visit_reservation.objects.filter(office = office).latest("starting_time").starting_time.date()
except Visit_reservation.DoesNotExist:
day = datetime.date.today()
day += datetime.timedelta(1)
while day <= end_day:
day_status, day_status_created = Day_status.objects.get_or_create(
day=day,
office=office,
defaults={"has_reservations": False})
templates = Visit_template.objects.filter(day = day.isoweekday())
templates = templates.filter(office = office, valid_since__lte = day)
templates = templates.filter(Q(valid_until__exact=None) | Q(valid_until__gt=day))
for tmp in templates:
starting_time = datetime.datetime.combine(day, tmp.starting_time)
if Visit_disable_rule.objects.filter(begin__lte = starting_time,
end__gte = starting_time, office = office):
status = 1 # disabled
else:
status = 2 # enabled
print 'I: Creating reservation: %s' % (starting_time)
Visit_reservation.objects.create(
starting_time=starting_time,
office=office,
status=status,
authenticated_only=tmp.authenticated_only
)
day += datetime.timedelta(1)
transaction.savepoint_commit(sid)
except ValueError:
transaction.savepoint_rollback(sid)
| mmincikova/medobs | djcode/reservations/management/commands/medobsgen.py | Python | gpl-3.0 | 2,042 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import functools
import sys
from .core import UnifiedIORegistry
__all__ = ["register_reader", "register_writer", "register_identifier", # noqa: F822
"unregister_reader", "unregister_writer", "unregister_identifier",
"get_reader", "get_writer", "get_formats",
"read", "write",
"identify_format", "delay_doc_updates"]
# make a default global-state registry (not publicly scoped, but often accessed)
# this is for backward compatibility when ``io.registry`` was a file.
default_registry = UnifiedIORegistry()
# also need to expose the enclosed registries
_identifiers = default_registry._identifiers
_readers = default_registry._readers
_writers = default_registry._writers
def _make_io_func(method_name):
"""Makes a function for a method on UnifiedIORegistry.
.. todo::
Make kwarg "registry" not hidden.
Returns
-------
wrapper : callable
Signature matches method on UnifiedIORegistry.
Accepts (hidden) kwarg "registry". default is ``default_registry``.
"""
@functools.wraps(getattr(default_registry, method_name))
def wrapper(*args, registry=None, **kwargs):
# written this way in case ever controlled by ScienceState
if registry is None:
registry = default_registry
# get and call bound method from registry instance
return getattr(registry, method_name)(*args, **kwargs)
return wrapper
# =============================================================================
# JIT function creation and lookup (PEP 562)
def __dir__():
dir_out = list(globals())
return sorted(dir_out + __all__)
def __getattr__(method: str):
if method in __all__:
return _make_io_func(method)
raise AttributeError(f"module {__name__!r} has no attribute {method!r}")
| pllim/astropy | astropy/io/registry/compat.py | Python | bsd-3-clause | 1,896 |
"""Script for adding empty UD annotation tiers to ELAN files from SSLC.
Requires pympi: https://github.com/dopefishh/pympi/
"""
import sys
import os
from pympi import Elan
def add_tiers(filename):
# Names of the controlled vocabulary / linguistic type specifications.
# Arbitrary.
cv = 'ud_dep'
lang = 'und'
lingtype = 'ud_lingtype'
lingtype_cv = lingtype # 'ud_lingtype_cv'
# This list will be inserted as a controlled vocabulary.
ud_deps = ['amod', 'advmod', 'advcl', 'acl', 'case', 'auxpass', 'aux',
'appos', 'ccomp', 'cc', 'remnant', 'punct', 'root', 'reparandum',
'nsubjpass', 'nsubj', 'parataxis', 'nummod', 'xcomp', 'vocative',
'dobj', 'dislocated', 'csubj', 'cop', 'conj', 'compound',
'discourse', 'det', 'dep', 'csubjpass', 'goeswith', 'iobj',
'expl', 'foreign', 'mwe', 'name', 'list', 'mark', 'neg', 'nmod',
'acl:relcl']
eaf = Elan.Eaf(filename)
if 'swl' not in eaf.languages:
eaf.add_language('swl', 'swl', 'Swedish Sign Language')
# Add a controlled vocabulary for the UD labels.
#eaf.add_controlled_vocabulary(cv)
#eaf.add_cv_description(cv, lang, 'UD dependency labels')
#for i,dep in enumerate(ud_deps):
# eaf.add_cv_entry(cv, 'cveid%d' % i, [(dep, lang, dep)])
# Add a lingtype which ensures that the annotations are aligned with the
# respective gloss tier.
eaf.add_linguistic_type(
lingtype,
constraints='Symbolic_Association',
timealignable=False)
# Add another lingtype for the UD labels controlled vocabulary.
#eaf.add_linguistic_type(lingtype_cv, param_dict={
# 'LINGUISTIC_TYPE_ID': lingtype_cv,
# 'TIME_ALIGNABLE': 'false',
# 'GRAPHIC_REFERENCES': 'false',
# 'CONTROLLED_VOCABULARY_REF': cv})
for signer in (1, 2):
def get_glosses(hand):
return [(hand,) + t for t in eaf.get_annotation_data_for_tier(
'Glosa_%s S%d' % (hand, signer))]
# Get a list of glosses for both hands of this signer.
glosses = get_glosses('DH') + get_glosses('NonDH')
glosses.sort(key=lambda t: t[1])
# Add the necessary tiers for this signer.
for ud_part in ('Index', 'UD', 'Link'):
for hand in ('DH', 'NonDH'):
tier = '%s_%s S%d' % (ud_part, hand, signer)
ref_tier = 'Glosa_%s S%d' % (hand, signer)
eaf.add_tier(
tier,
ling=lingtype_cv if ud_part == 'UD' else lingtype,
parent=ref_tier)
# Enumerate each one of the merged DH+NonDH glosses and write the
# index to the Index tier.
for i, (hand, t1, t2, _) in enumerate(glosses):
tier = 'Index_%s S%d' % (hand, signer)
ref_tier = 'Glosa_%s S%d' % (hand, signer)
eaf.add_ref_annotation(tier, ref_tier, (t1+t2)//2, value=str(i+1))
return eaf
if __name__ == '__main__':
if len(sys.argv) < 3:
print 'Usage: add_tiers.py output-directory source-directory/*.eaf'
sys.exit(1)
out = sys.argv[1]
for filename in sys.argv[2:]:
print 'Converting %s...' % filename
eaf = add_tiers(filename)
base = os.path.splitext(os.path.basename(filename))[0]
target = os.path.join(out, base+'_UD.eaf')
Elan.to_eaf(target, eaf)
| robertostling/ud-swl | scripts/add_tiers.py | Python | gpl-3.0 | 3,445 |
from yawPyCrypto import Key
## rsakey = Key.Key(
| kichkasch/ioids | tests/yawpycrypto_test.py | Python | gpl-3.0 | 53 |
import logging
import requests
from redash.destinations import *
from redash.models import Alert
from redash.utils import json_dumps, deprecated
colors = {
Alert.OK_STATE: "green",
Alert.TRIGGERED_STATE: "red",
Alert.UNKNOWN_STATE: "yellow",
}
@deprecated()
class HipChat(BaseDestination):
@classmethod
def configuration_schema(cls):
return {
"type": "object",
"properties": {
"url": {
"type": "string",
"title": "HipChat Notification URL (get it from the Integrations page)",
}
},
"secret": ["url"],
"required": ["url"],
}
@classmethod
def icon(cls):
return "fa-comment-o"
def notify(self, alert, query, user, new_state, app, host, options):
try:
alert_url = "{host}/alerts/{alert_id}".format(host=host, alert_id=alert.id)
query_url = "{host}/queries/{query_id}".format(host=host, query_id=query.id)
message = '<a href="{alert_url}">{alert_name}</a> changed state to {new_state} (based on <a href="{query_url}">this query</a>).'.format(
alert_name=alert.name,
new_state=new_state.upper(),
alert_url=alert_url,
query_url=query_url,
)
data = {"message": message, "color": colors.get(new_state, "green")}
headers = {"Content-Type": "application/json"}
response = requests.post(
options["url"], data=json_dumps(data), headers=headers, timeout=5.0
)
if response.status_code != 204:
logging.error(
"Bad status code received from HipChat: %d", response.status_code
)
except Exception:
logging.exception("HipChat Send ERROR.")
register(HipChat)
| getredash/redash | redash/destinations/hipchat.py | Python | bsd-2-clause | 1,906 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt, fmt_money, getdate, formatdate
from frappe import _
from frappe.model.document import Document
class CustomerFrozen(frappe.ValidationError): pass
class InvalidCurrency(frappe.ValidationError): pass
class InvalidAccountCurrency(frappe.ValidationError): pass
class GLEntry(Document):
def validate(self):
self.flags.ignore_submit_comment = True
self.check_mandatory()
self.pl_must_have_cost_center()
self.validate_posting_date()
self.check_pl_account()
self.validate_cost_center()
self.validate_party()
self.validate_currency()
def on_update_with_args(self, adv_adj, update_outstanding = 'Yes'):
self.validate_account_details(adv_adj)
validate_frozen_account(self.account, adv_adj)
check_freezing_date(self.posting_date, adv_adj)
validate_balance_type(self.account, adv_adj)
# Update outstanding amt on against voucher
if self.against_voucher_type in ['Journal Entry', 'Sales Invoice', 'Purchase Invoice'] \
and self.against_voucher and update_outstanding == 'Yes':
update_outstanding_amt(self.account, self.party_type, self.party, self.against_voucher_type,
self.against_voucher)
def check_mandatory(self):
mandatory = ['account','remarks','voucher_type','voucher_no','fiscal_year','company']
for k in mandatory:
if not self.get(k):
frappe.throw(_("{0} is required").format(self.meta.get_label(k)))
account_type = frappe.db.get_value("Account", self.account, "account_type")
if account_type in ["Receivable", "Payable"] and not (self.party_type and self.party):
frappe.throw(_("Party Type and Party is required for Receivable / Payable account {0}").format(self.account))
# Zero value transaction is not allowed
if not (flt(self.debit) or flt(self.credit)):
frappe.throw(_("Either debit or credit amount is required for {0}").format(self.account))
def pl_must_have_cost_center(self):
if frappe.db.get_value("Account", self.account, "report_type") == "Profit and Loss":
if not self.cost_center and self.voucher_type != 'Period Closing Voucher':
frappe.throw(_("Cost Center is required for 'Profit and Loss' account {0}").format(self.account))
elif self.cost_center:
self.cost_center = None
def validate_posting_date(self):
from erpnext.accounts.utils import validate_fiscal_year
validate_fiscal_year(self.posting_date, self.fiscal_year, _("Posting Date"), self)
def check_pl_account(self):
if self.is_opening=='Yes' and \
frappe.db.get_value("Account", self.account, "report_type")=="Profit and Loss":
frappe.throw(_("'Profit and Loss' type account {0} not allowed in Opening Entry").format(self.account))
def validate_account_details(self, adv_adj):
"""Account must be ledger, active and not freezed"""
ret = frappe.db.sql("""select is_group, docstatus, company
from tabAccount where name=%s""", self.account, as_dict=1)[0]
if ret.is_group==1:
frappe.throw(_("Account {0} cannot be a Group").format(self.account))
if ret.docstatus==2:
frappe.throw(_("Account {0} is inactive").format(self.account))
if ret.company != self.company:
frappe.throw(_("Account {0} does not belong to Company {1}").format(self.account, self.company))
def validate_cost_center(self):
if not hasattr(self, "cost_center_company"):
self.cost_center_company = {}
def _get_cost_center_company():
if not self.cost_center_company.get(self.cost_center):
self.cost_center_company[self.cost_center] = frappe.db.get_value(
"Cost Center", self.cost_center, "company")
return self.cost_center_company[self.cost_center]
if self.cost_center and _get_cost_center_company() != self.company:
frappe.throw(_("Cost Center {0} does not belong to Company {1}").format(self.cost_center, self.company))
def validate_party(self):
if self.party_type and self.party:
frozen_accounts_modifier = frappe.db.get_value( 'Accounts Settings', None,'frozen_accounts_modifier')
if not frozen_accounts_modifier in frappe.get_roles():
if frappe.db.get_value(self.party_type, self.party, "is_frozen"):
frappe.throw("{0} {1} is frozen".format(self.party_type, self.party), CustomerFrozen)
def validate_currency(self):
company_currency = frappe.db.get_value("Company", self.company, "default_currency")
account_currency = frappe.db.get_value("Account", self.account, "account_currency") or company_currency
if not self.account_currency:
self.account_currency = company_currency
if account_currency != self.account_currency:
frappe.throw(_("Accounting Entry for {0} can only be made in currency: {1}")
.format(self.account, (account_currency or company_currency)), InvalidAccountCurrency)
if self.party_type and self.party:
party_account_currency = frappe.db.get_value(self.party_type, self.party, "party_account_currency") \
or company_currency
if party_account_currency != self.account_currency:
frappe.throw(_("Accounting Entry for {0}: {1} can only be made in currency: {2}")
.format(self.party_type, self.party, party_account_currency), InvalidAccountCurrency)
def validate_balance_type(account, adv_adj=False):
if not adv_adj and account:
balance_must_be = frappe.db.get_value("Account", account, "balance_must_be")
if balance_must_be:
balance = frappe.db.sql("""select sum(ifnull(debit, 0)) - sum(ifnull(credit, 0))
from `tabGL Entry` where account = %s""", account)[0][0]
if (balance_must_be=="Debit" and flt(balance) < 0) or \
(balance_must_be=="Credit" and flt(balance) > 0):
frappe.throw(_("Balance for Account {0} must always be {1}").format(account, _(balance_must_be)))
def check_freezing_date(posting_date, adv_adj=False):
"""
Nobody can do GL Entries where posting date is before freezing date
except authorized person
"""
if not adv_adj:
acc_frozen_upto = frappe.db.get_value('Accounts Settings', None, 'acc_frozen_upto')
if acc_frozen_upto:
frozen_accounts_modifier = frappe.db.get_value( 'Accounts Settings', None,'frozen_accounts_modifier')
if getdate(posting_date) <= getdate(acc_frozen_upto) \
and not frozen_accounts_modifier in frappe.get_roles():
frappe.throw(_("You are not authorized to add or update entries before {0}").format(formatdate(acc_frozen_upto)))
def update_outstanding_amt(account, party_type, party, against_voucher_type, against_voucher, on_cancel=False):
if party_type and party:
party_condition = " and ifnull(party_type, '')='{0}' and ifnull(party, '')='{1}'"\
.format(frappe.db.escape(party_type), frappe.db.escape(party))
else:
party_condition = ""
# get final outstanding amt
bal = flt(frappe.db.sql("""
select sum(ifnull(debit_in_account_currency, 0)) - sum(ifnull(credit_in_account_currency, 0))
from `tabGL Entry`
where against_voucher_type=%s and against_voucher=%s
and account = %s {0}""".format(party_condition),
(against_voucher_type, against_voucher, account))[0][0] or 0.0)
if against_voucher_type == 'Purchase Invoice':
bal = -bal
elif against_voucher_type == "Journal Entry":
against_voucher_amount = flt(frappe.db.sql("""
select sum(ifnull(debit_in_account_currency, 0)) - sum(ifnull(credit_in_account_currency, 0))
from `tabGL Entry` where voucher_type = 'Journal Entry' and voucher_no = %s
and account = %s and ifnull(against_voucher, '') = '' {0}"""
.format(party_condition), (against_voucher, account))[0][0])
if not against_voucher_amount:
frappe.throw(_("Against Journal Entry {0} is already adjusted against some other voucher")
.format(against_voucher))
bal = against_voucher_amount + bal
if against_voucher_amount < 0:
bal = -bal
# Validation : Outstanding can not be negative for JV
if bal < 0 and not on_cancel:
frappe.throw(_("Outstanding for {0} cannot be less than zero ({1})").format(against_voucher, fmt_money(bal)))
# Update outstanding amt on against voucher
if against_voucher_type in ["Sales Invoice", "Purchase Invoice"]:
frappe.db.sql("update `tab%s` set outstanding_amount=%s where name=%s" %
(against_voucher_type, '%s', '%s'), (bal, against_voucher))
def validate_frozen_account(account, adv_adj=None):
frozen_account = frappe.db.get_value("Account", account, "freeze_account")
if frozen_account == 'Yes' and not adv_adj:
frozen_accounts_modifier = frappe.db.get_value( 'Accounts Settings', None,
'frozen_accounts_modifier')
if not frozen_accounts_modifier:
frappe.throw(_("Account {0} is frozen").format(account))
elif frozen_accounts_modifier not in frappe.get_roles():
frappe.throw(_("Not authorized to edit frozen Account {0}").format(account))
def update_against_account(voucher_type, voucher_no):
entries = frappe.db.get_all("GL Entry",
filters={"voucher_type": voucher_type, "voucher_no": voucher_no},
fields=["name", "party", "against", "debit", "credit", "account"])
accounts_debited, accounts_credited = [], []
for d in entries:
if flt(d.debit > 0): accounts_debited.append(d.party or d.account)
if flt(d.credit) > 0: accounts_credited.append(d.party or d.account)
for d in entries:
if flt(d.debit > 0):
new_against = ", ".join(list(set(accounts_credited)))
if flt(d.credit > 0):
new_against = ", ".join(list(set(accounts_debited)))
if d.against != new_against:
frappe.db.set_value("GL Entry", d.name, "against", new_against)
| hanselke/erpnext-1 | erpnext/accounts/doctype/gl_entry/gl_entry.py | Python | agpl-3.0 | 9,495 |
if __name__=="__main__":
import modules.QA_Logger as QA_Logger
LOG = QA_Logger.getLogger(name="feedmon")
import modules.SimpleOptparse as SimpleOptparse
from modules.feedmon import FeedMon
optDef = {
(('--help', '-h'), "This help"): False,
(('--verbosity', '-v'), "Enable verbose output"): QA_Logger.QA_Logger.L_INFO,
(('--rss-fields', '-r'), "restrict search to supplied fields only"): "summary,summary_detail,title,title_detail",
(('--rss-feeds', '-f'), "feed uris (feed1,feed2,...)"): "http://www.exploit-db.com/rss.xml,http://www.securiteam.com/securiteam.rss,http://www.securityfocus.com/rss/vulnerabilities.xml,http://seclists.org/rss/bugtraq.rss,http://seclists.org/rss/fulldisclosure.rss,http://rss.packetstormsecurity.com/files/,http://www.heise.de/security/news/news-atom.xml,http://feeds.feedburner.com/ZDI-Published-Advisories,http://feeds.feedburner.com/ZDI-Upcoming-Advisories,http://www.eeye.com/resources/media-center/rss?rss=Zero-Day-Tracker,http://www.cert.at/all.warnings.all.rss_2.0.xml,http://www.kb.cert.org/vulfeed",
(('--hours', '-s'), "only check feeds newer than xx hours, default=disabled "): 0,
(('--minutes', '-m'), "only check feeds newer than xx hours, default=disabled "): 0,
(('--keywordsfile', '-x'), "load keywords from file (NL separated)"): None,
}
options,arguments=SimpleOptparse.parseOpts(optDef)
LOG.setLevel(int(options['verbosity']))
if not len(arguments):
print "! - missing search keywords (arguments)"
print SimpleOptparse.buildUsageString(optDef)
exit()
if options['rss-fields']:
options['rss-fields']=options['rss-fields'].split(",")
LOG.info("RSS-Fields set for scanning: %s"%options['rss-fields'])
if options['rss-feeds']:
options['rss-feeds']=options['rss-feeds'].split(",")
LOG.info("RSS-Feeds: %s"%options['rss-feeds'])
if options['keywordsfile']:
f = open(options['keywordsfile'],'r')
arguments = f.read().split('\n')
f.close()
# start the magic
fmon = FeedMon(hours=int(options['hours']),minutes=int(options['minutes']))
fmon.addFeeds(options['rss-feeds'])
fmon.fetch()
fmon.setMatchFields(options['rss-fields'])
keywords = arguments
warnings = 0
LOG.debug("searching for keywords in rss feed entries...")
for e in fmon.search(keywords):
warnings +=1
LOG.warning(e)
LOG.info("--DONE--")
exit(warnings)
| tintinweb/feedmon | src/feedmon.py | Python | gpl-2.0 | 2,806 |
"""A semi-synchronous Client for IPython parallel"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
import os
import json
from threading import Thread, Event, current_thread
import time
import types
import warnings
from datetime import datetime
from getpass import getpass
from pprint import pprint
pjoin = os.path.join
import zmq
from zmq.eventloop.ioloop import IOLoop
from zmq.eventloop.zmqstream import ZMQStream
from tornado.concurrent import Future
from tornado.gen import multi_future
from traitlets.config.configurable import MultipleInstanceError
from IPython.core.application import BaseIPythonApplication
from IPython.core.profiledir import ProfileDir, ProfileDirError
from IPython.utils.capture import RichOutput
from IPython.utils.coloransi import TermColors
from jupyter_client.jsonutil import extract_dates, parse_date
from jupyter_client.localinterfaces import localhost, is_local_ip
from IPython.paths import get_ipython_dir
from IPython.utils.path import compress_user
from ipython_genutils.py3compat import cast_bytes, string_types, xrange, iteritems
from traitlets import (
HasTraits, Instance, Unicode,
Dict, List, Bool, Set, Any
)
from decorator import decorator
from ipyparallel import Reference
from ipyparallel import error
from ipyparallel import util
from jupyter_client.session import Session
from ipyparallel import serialize
from .asyncresult import AsyncResult, AsyncHubResult
from .futures import MessageFuture
from .view import DirectView, LoadBalancedView
#--------------------------------------------------------------------------
# Decorators for Client methods
#--------------------------------------------------------------------------
@decorator
def unpack_message(f, self, msg_parts):
"""Unpack a message before calling the decorated method."""
idents, msg = self.session.feed_identities(msg_parts, copy=False)
try:
msg = self.session.deserialize(msg, content=True, copy=False)
except:
self.log.error("Invalid Message", exc_info=True)
else:
if self.debug:
pprint(msg)
return f(self, msg)
#--------------------------------------------------------------------------
# Classes
#--------------------------------------------------------------------------
_no_connection_file_msg = """
Failed to connect because no Controller could be found.
Please double-check your profile and ensure that a cluster is running.
"""
class ExecuteReply(RichOutput):
"""wrapper for finished Execute results"""
def __init__(self, msg_id, content, metadata):
self.msg_id = msg_id
self._content = content
self.execution_count = content['execution_count']
self.metadata = metadata
# RichOutput overrides
@property
def source(self):
execute_result = self.metadata['execute_result']
if execute_result:
return execute_result.get('source', '')
@property
def data(self):
execute_result = self.metadata['execute_result']
if execute_result:
return execute_result.get('data', {})
@property
def _metadata(self):
execute_result = self.metadata['execute_result']
if execute_result:
return execute_result.get('metadata', {})
def display(self):
from IPython.display import publish_display_data
publish_display_data(self.data, self.metadata)
def _repr_mime_(self, mime):
if mime not in self.data:
return
data = self.data[mime]
if mime in self._metadata:
return data, self._metadata[mime]
else:
return data
def __getitem__(self, key):
return self.metadata[key]
def __getattr__(self, key):
if key not in self.metadata:
raise AttributeError(key)
return self.metadata[key]
def __repr__(self):
execute_result = self.metadata['execute_result'] or {'data':{}}
text_out = execute_result['data'].get('text/plain', '')
if len(text_out) > 32:
text_out = text_out[:29] + '...'
return "<ExecuteReply[%i]: %s>" % (self.execution_count, text_out)
def _repr_pretty_(self, p, cycle):
execute_result = self.metadata['execute_result'] or {'data':{}}
text_out = execute_result['data'].get('text/plain', '')
if not text_out:
return
try:
ip = get_ipython()
except NameError:
colors = "NoColor"
else:
colors = ip.colors
if colors == "NoColor":
out = normal = ""
else:
out = TermColors.Red
normal = TermColors.Normal
if '\n' in text_out and not text_out.startswith('\n'):
# add newline for multiline reprs
text_out = '\n' + text_out
p.text(
out + u'Out[%i:%i]: ' % (
self.metadata['engine_id'], self.execution_count
) + normal + text_out
)
class Metadata(dict):
"""Subclass of dict for initializing metadata values.
Attribute access works on keys.
These objects have a strict set of keys - errors will raise if you try
to add new keys.
"""
def __init__(self, *args, **kwargs):
dict.__init__(self)
md = {'msg_id' : None,
'submitted' : None,
'started' : None,
'completed' : None,
'received' : None,
'engine_uuid' : None,
'engine_id' : None,
'follow' : None,
'after' : None,
'status' : None,
'execute_input' : None,
'execute_result' : None,
'error' : None,
'stdout' : '',
'stderr' : '',
'outputs' : [],
'data': {},
}
self.update(md)
self.update(dict(*args, **kwargs))
def __getattr__(self, key):
"""getattr aliased to getitem"""
if key in self:
return self[key]
else:
raise AttributeError(key)
def __setattr__(self, key, value):
"""setattr aliased to setitem, with strict"""
if key in self:
self[key] = value
else:
raise AttributeError(key)
def __setitem__(self, key, value):
"""strict static key enforcement"""
if key in self:
dict.__setitem__(self, key, value)
else:
raise KeyError(key)
class Client(HasTraits):
"""A semi-synchronous client to an IPython parallel cluster
Parameters
----------
url_file : str
The path to ipcontroller-client.json.
This JSON file should contain all the information needed to connect to a cluster,
and is likely the only argument needed.
Connection information for the Hub's registration. If a json connector
file is given, then likely no further configuration is necessary.
[Default: use profile]
profile : bytes
The name of the Cluster profile to be used to find connector information.
If run from an IPython application, the default profile will be the same
as the running application, otherwise it will be 'default'.
cluster_id : str
String id to added to runtime files, to prevent name collisions when using
multiple clusters with a single profile simultaneously.
When set, will look for files named like: 'ipcontroller-<cluster_id>-client.json'
Since this is text inserted into filenames, typical recommendations apply:
Simple character strings are ideal, and spaces are not recommended (but
should generally work)
context : zmq.Context
Pass an existing zmq.Context instance, otherwise the client will create its own.
debug : bool
flag for lots of message printing for debug purposes
timeout : float
time (in seconds) to wait for connection replies from the Hub
[Default: 10]
Other Parameters
----------------
sshserver : str
A string of the form passed to ssh, i.e. 'server.tld' or 'user@server.tld:port'
If keyfile or password is specified, and this is not, it will default to
the ip given in addr.
sshkey : str; path to ssh private key file
This specifies a key to be used in ssh login, default None.
Regular default ssh keys will be used without specifying this argument.
password : str
Your ssh password to sshserver. Note that if this is left None,
you will be prompted for it if passwordless key based login is unavailable.
paramiko : bool
flag for whether to use paramiko instead of shell ssh for tunneling.
[default: True on win32, False else]
Attributes
----------
ids : list of int engine IDs
requesting the ids attribute always synchronizes
the registration state. To request ids without synchronization,
use semi-private _ids attributes.
history : list of msg_ids
a list of msg_ids, keeping track of all the execution
messages you have submitted in order.
outstanding : set of msg_ids
a set of msg_ids that have been submitted, but whose
results have not yet been received.
results : dict
a dict of all our results, keyed by msg_id
block : bool
determines default behavior when block not specified
in execution methods
"""
block = Bool(False)
outstanding = Set()
results = Instance('collections.defaultdict', (dict,))
metadata = Instance('collections.defaultdict', (Metadata,))
history = List()
debug = Bool(False)
_futures = Dict()
_output_futures = Dict()
_io_loop = Any()
_io_thread = Any()
profile=Unicode()
def _profile_default(self):
if BaseIPythonApplication.initialized():
# an IPython app *might* be running, try to get its profile
try:
return BaseIPythonApplication.instance().profile
except (AttributeError, MultipleInstanceError):
# could be a *different* subclass of config.Application,
# which would raise one of these two errors.
return u'default'
else:
return u'default'
_outstanding_dict = Instance('collections.defaultdict', (set,))
_ids = List()
_connected=Bool(False)
_ssh=Bool(False)
_context = Instance('zmq.Context', allow_none=True)
_config = Dict()
_engines=Instance(util.ReverseDict, (), {})
_query_socket=Instance('zmq.Socket', allow_none=True)
_control_socket=Instance('zmq.Socket', allow_none=True)
_iopub_socket=Instance('zmq.Socket', allow_none=True)
_notification_socket=Instance('zmq.Socket', allow_none=True)
_mux_socket=Instance('zmq.Socket', allow_none=True)
_task_socket=Instance('zmq.Socket', allow_none=True)
_task_scheme=Unicode()
_closed = False
def __new__(self, *args, **kw):
# don't raise on positional args
return HasTraits.__new__(self, **kw)
def __init__(self, url_file=None, profile=None, profile_dir=None, ipython_dir=None,
context=None, debug=False,
sshserver=None, sshkey=None, password=None, paramiko=None,
timeout=10, cluster_id=None, **extra_args
):
if profile:
super(Client, self).__init__(debug=debug, profile=profile)
else:
super(Client, self).__init__(debug=debug)
if context is None:
context = zmq.Context.instance()
self._context = context
if 'url_or_file' in extra_args:
url_file = extra_args['url_or_file']
warnings.warn("url_or_file arg no longer supported, use url_file", DeprecationWarning)
if url_file and util.is_url(url_file):
raise ValueError("single urls cannot be specified, url-files must be used.")
self._setup_profile_dir(self.profile, profile_dir, ipython_dir)
no_file_msg = '\n'.join([
"You have attempted to connect to an IPython Cluster but no Controller could be found.",
"Please double-check your configuration and ensure that a cluster is running.",
])
if self._cd is not None:
if url_file is None:
if not cluster_id:
client_json = 'ipcontroller-client.json'
else:
client_json = 'ipcontroller-%s-client.json' % cluster_id
url_file = pjoin(self._cd.security_dir, client_json)
short = compress_user(url_file)
if not os.path.exists(url_file):
print("Waiting for connection file: %s" % short)
for i in range(30):
time.sleep(1)
if os.path.exists(url_file):
break
if not os.path.exists(url_file):
msg = '\n'.join([
"Connection file %r not found." % short,
no_file_msg,
])
raise IOError(msg)
if url_file is None:
raise IOError(no_file_msg)
if not os.path.exists(url_file):
# Connection file explicitly specified, but not found
raise IOError("Connection file %r not found. Is a controller running?" % \
compress_user(url_file)
)
with open(url_file) as f:
cfg = json.load(f)
self._task_scheme = cfg['task_scheme']
# sync defaults from args, json:
if sshserver:
cfg['ssh'] = sshserver
location = cfg.setdefault('location', None)
proto,addr = cfg['interface'].split('://')
addr = util.disambiguate_ip_address(addr, location)
cfg['interface'] = "%s://%s" % (proto, addr)
# turn interface,port into full urls:
for key in ('control', 'task', 'mux', 'iopub', 'notification', 'registration'):
cfg[key] = cfg['interface'] + ':%i' % cfg[key]
url = cfg['registration']
if location is not None and addr == localhost():
# location specified, and connection is expected to be local
if not is_local_ip(location) and not sshserver:
# load ssh from JSON *only* if the controller is not on
# this machine
sshserver=cfg['ssh']
if not is_local_ip(location) and not sshserver:
# warn if no ssh specified, but SSH is probably needed
# This is only a warning, because the most likely cause
# is a local Controller on a laptop whose IP is dynamic
warnings.warn("""
Controller appears to be listening on localhost, but not on this machine.
If this is true, you should specify Client(...,sshserver='you@%s')
or instruct your controller to listen on an external IP."""%location,
RuntimeWarning)
elif not sshserver:
# otherwise sync with cfg
sshserver = cfg['ssh']
self._config = cfg
self._ssh = bool(sshserver or sshkey or password)
if self._ssh and sshserver is None:
# default to ssh via localhost
sshserver = addr
if self._ssh and password is None:
from zmq.ssh import tunnel
if tunnel.try_passwordless_ssh(sshserver, sshkey, paramiko):
password=False
else:
password = getpass("SSH Password for %s: "%sshserver)
ssh_kwargs = dict(keyfile=sshkey, password=password, paramiko=paramiko)
# configure and construct the session
try:
extra_args['packer'] = cfg['pack']
extra_args['unpacker'] = cfg['unpack']
extra_args['key'] = cast_bytes(cfg['key'])
extra_args['signature_scheme'] = cfg['signature_scheme']
except KeyError as exc:
msg = '\n'.join([
"Connection file is invalid (missing '{}'), possibly from an old version of IPython.",
"If you are reusing connection files, remove them and start ipcontroller again."
])
raise ValueError(msg.format(exc.message))
self.session = Session(**extra_args)
self._query_socket = self._context.socket(zmq.DEALER)
if self._ssh:
from zmq.ssh import tunnel
tunnel.tunnel_connection(self._query_socket, cfg['registration'], sshserver, **ssh_kwargs)
else:
self._query_socket.connect(cfg['registration'])
self.session.debug = self.debug
self._notification_handlers = {'registration_notification' : self._register_engine,
'unregistration_notification' : self._unregister_engine,
'shutdown_notification' : lambda msg: self.close(),
}
self._queue_handlers = {'execute_reply' : self._handle_execute_reply,
'apply_reply' : self._handle_apply_reply}
try:
self._connect(sshserver, ssh_kwargs, timeout)
except:
self.close(linger=0)
raise
# last step: setup magics, if we are in IPython:
try:
ip = get_ipython()
except NameError:
return
else:
if 'px' not in ip.magics_manager.magics:
# in IPython but we are the first Client.
# activate a default view for parallel magics.
self.activate()
def __del__(self):
"""cleanup sockets, but _not_ context."""
self.close()
def _setup_profile_dir(self, profile, profile_dir, ipython_dir):
if ipython_dir is None:
ipython_dir = get_ipython_dir()
if profile_dir is not None:
try:
self._cd = ProfileDir.find_profile_dir(profile_dir)
return
except ProfileDirError:
pass
elif profile is not None:
try:
self._cd = ProfileDir.find_profile_dir_by_name(
ipython_dir, profile)
return
except ProfileDirError:
pass
self._cd = None
def _update_engines(self, engines):
"""Update our engines dict and _ids from a dict of the form: {id:uuid}."""
for k,v in iteritems(engines):
eid = int(k)
if eid not in self._engines:
self._ids.append(eid)
self._engines[eid] = v
self._ids = sorted(self._ids)
if sorted(self._engines.keys()) != list(range(len(self._engines))) and \
self._task_scheme == 'pure' and self._task_socket:
self._stop_scheduling_tasks()
def _stop_scheduling_tasks(self):
"""Stop scheduling tasks because an engine has been unregistered
from a pure ZMQ scheduler.
"""
self._task_socket.close()
self._task_socket = None
msg = "An engine has been unregistered, and we are using pure " +\
"ZMQ task scheduling. Task farming will be disabled."
if self.outstanding:
msg += " If you were running tasks when this happened, " +\
"some `outstanding` msg_ids may never resolve."
warnings.warn(msg, RuntimeWarning)
def _build_targets(self, targets):
"""Turn valid target IDs or 'all' into two lists:
(int_ids, uuids).
"""
if not self._ids:
# flush notification socket if no engines yet, just in case
if not self.ids:
raise error.NoEnginesRegistered("Can't build targets without any engines")
if targets is None:
targets = self._ids
elif isinstance(targets, string_types):
if targets.lower() == 'all':
targets = self._ids
else:
raise TypeError("%r not valid str target, must be 'all'"%(targets))
elif isinstance(targets, int):
if targets < 0:
targets = self.ids[targets]
if targets not in self._ids:
raise IndexError("No such engine: %i"%targets)
targets = [targets]
if isinstance(targets, slice):
indices = list(range(len(self._ids))[targets])
ids = self.ids
targets = [ ids[i] for i in indices ]
if not isinstance(targets, (tuple, list, xrange)):
raise TypeError("targets by int/slice/collection of ints only, not %s"%(type(targets)))
return [cast_bytes(self._engines[t]) for t in targets], list(targets)
def _connect(self, sshserver, ssh_kwargs, timeout):
"""setup all our socket connections to the cluster. This is called from
__init__."""
# Maybe allow reconnecting?
if self._connected:
return
self._connected=True
def connect_socket(s, url):
if self._ssh:
from zmq.ssh import tunnel
return tunnel.tunnel_connection(s, url, sshserver, **ssh_kwargs)
else:
return s.connect(url)
self.session.send(self._query_socket, 'connection_request')
# use Poller because zmq.select has wrong units in pyzmq 2.1.7
poller = zmq.Poller()
poller.register(self._query_socket, zmq.POLLIN)
# poll expects milliseconds, timeout is seconds
evts = poller.poll(timeout*1000)
if not evts:
raise error.TimeoutError("Hub connection request timed out")
idents, msg = self.session.recv(self._query_socket, mode=0)
if self.debug:
pprint(msg)
content = msg['content']
# self._config['registration'] = dict(content)
cfg = self._config
if content['status'] == 'ok':
self._mux_socket = self._context.socket(zmq.DEALER)
connect_socket(self._mux_socket, cfg['mux'])
self._task_socket = self._context.socket(zmq.DEALER)
connect_socket(self._task_socket, cfg['task'])
self._notification_socket = self._context.socket(zmq.SUB)
self._notification_socket.setsockopt(zmq.SUBSCRIBE, b'')
connect_socket(self._notification_socket, cfg['notification'])
self._control_socket = self._context.socket(zmq.DEALER)
connect_socket(self._control_socket, cfg['control'])
self._iopub_socket = self._context.socket(zmq.SUB)
self._iopub_socket.setsockopt(zmq.SUBSCRIBE, b'')
connect_socket(self._iopub_socket, cfg['iopub'])
self._update_engines(dict(content['engines']))
else:
self._connected = False
raise Exception("Failed to connect!")
self._start_io_thread()
#--------------------------------------------------------------------------
# handlers and callbacks for incoming messages
#--------------------------------------------------------------------------
def _unwrap_exception(self, content):
"""unwrap exception, and remap engine_id to int."""
e = error.unwrap_exception(content)
# print e.traceback
if e.engine_info:
e_uuid = e.engine_info['engine_uuid']
eid = self._engines[e_uuid]
e.engine_info['engine_id'] = eid
return e
def _extract_metadata(self, msg):
header = msg['header']
parent = msg['parent_header']
msg_meta = msg['metadata']
content = msg['content']
md = {'msg_id' : parent['msg_id'],
'received' : datetime.now(),
'engine_uuid' : msg_meta.get('engine', None),
'follow' : msg_meta.get('follow', []),
'after' : msg_meta.get('after', []),
'status' : content['status'],
}
if md['engine_uuid'] is not None:
md['engine_id'] = self._engines.get(md['engine_uuid'], None)
if 'date' in parent:
md['submitted'] = parent['date']
if 'started' in msg_meta:
md['started'] = parse_date(msg_meta['started'])
if 'date' in header:
md['completed'] = header['date']
return md
def _register_engine(self, msg):
"""Register a new engine, and update our connection info."""
content = msg['content']
eid = content['id']
d = {eid : content['uuid']}
self._update_engines(d)
def _unregister_engine(self, msg):
"""Unregister an engine that has died."""
content = msg['content']
eid = int(content['id'])
if eid in self._ids:
self._ids.remove(eid)
uuid = self._engines.pop(eid)
self._handle_stranded_msgs(eid, uuid)
if self._task_socket and self._task_scheme == 'pure':
self._stop_scheduling_tasks()
def _handle_stranded_msgs(self, eid, uuid):
"""Handle messages known to be on an engine when the engine unregisters.
It is possible that this will fire prematurely - that is, an engine will
go down after completing a result, and the client will be notified
of the unregistration and later receive the successful result.
"""
outstanding = self._outstanding_dict[uuid]
for msg_id in list(outstanding):
if msg_id in self.results:
# we already
continue
try:
raise error.EngineError("Engine %r died while running task %r"%(eid, msg_id))
except:
content = error.wrap_exception()
# build a fake message:
msg = self.session.msg('apply_reply', content=content)
msg['parent_header']['msg_id'] = msg_id
msg['metadata']['engine'] = uuid
self._handle_apply_reply(msg)
def _handle_execute_reply(self, msg):
"""Save the reply to an execute_request into our results.
execute messages are never actually used. apply is used instead.
"""
parent = msg['parent_header']
msg_id = parent['msg_id']
future = self._futures.get(msg_id, None)
if msg_id not in self.outstanding:
if msg_id in self.history:
print("got stale result: %s"%msg_id)
else:
print("got unknown result: %s"%msg_id)
else:
self.outstanding.remove(msg_id)
content = msg['content']
header = msg['header']
# construct metadata:
md = self.metadata[msg_id]
md.update(self._extract_metadata(msg))
e_outstanding = self._outstanding_dict[md['engine_uuid']]
if msg_id in e_outstanding:
e_outstanding.remove(msg_id)
# construct result:
if content['status'] == 'ok':
self.results[msg_id] = ExecuteReply(msg_id, content, md)
elif content['status'] == 'aborted':
self.results[msg_id] = error.TaskAborted(msg_id)
# aborted tasks will not get output
out_future = self._output_futures.get(msg_id)
if out_future and not out_future.done():
out_future.set_result(None)
elif content['status'] == 'resubmitted':
# TODO: handle resubmission
pass
else:
self.results[msg_id] = self._unwrap_exception(content)
if content['status'] != 'ok' and not content.get('engine_info'):
# not an engine failure, don't expect output
out_future = self._output_futures.get(msg_id)
if out_future and not out_future.done():
out_future.set_result(None)
if future:
future.set_result(self.results[msg_id])
def _handle_apply_reply(self, msg):
"""Save the reply to an apply_request into our results."""
parent = msg['parent_header']
msg_id = parent['msg_id']
future = self._futures.get(msg_id, None)
if msg_id not in self.outstanding:
if msg_id in self.history:
print("got stale result: %s"%msg_id)
print(self.results[msg_id])
print(msg)
else:
print("got unknown result: %s"%msg_id)
else:
self.outstanding.remove(msg_id)
content = msg['content']
header = msg['header']
# construct metadata:
md = self.metadata[msg_id]
md.update(self._extract_metadata(msg))
e_outstanding = self._outstanding_dict[md['engine_uuid']]
if msg_id in e_outstanding:
e_outstanding.remove(msg_id)
# construct result:
if content['status'] == 'ok':
self.results[msg_id] = serialize.deserialize_object(msg['buffers'])[0]
elif content['status'] == 'aborted':
self.results[msg_id] = error.TaskAborted(msg_id)
out_future = self._output_futures.get(msg_id)
if out_future and not out_future.done():
out_future.set_result(None)
elif content['status'] == 'resubmitted':
# TODO: handle resubmission
pass
else:
self.results[msg_id] = self._unwrap_exception(content)
if content['status'] != 'ok' and not content.get('engine_info'):
# not an engine failure, don't expect output
out_future = self._output_futures.get(msg_id)
if out_future and not out_future.done():
out_future.set_result(None)
if future:
future.set_result(self.results[msg_id])
def _make_io_loop(self):
"""Make my IOLoop. Override with IOLoop.current to return"""
return IOLoop()
def _stop_io_thread(self):
"""Stop my IO thread"""
if self._io_loop:
self._io_loop.add_callback(self._io_loop.stop)
if self._io_thread and self._io_thread is not current_thread():
self._io_thread.join()
def _start_io_thread(self):
"""Start IOLoop in a background thread."""
self._io_loop = self._make_io_loop()
self._query_stream = ZMQStream(self._query_socket, self._io_loop)
self._query_stream.on_recv(self._dispatch_single_reply, copy=False)
self._control_stream = ZMQStream(self._control_socket, self._io_loop)
self._control_stream.on_recv(self._dispatch_single_reply, copy=False)
self._mux_stream = ZMQStream(self._mux_socket, self._io_loop)
self._mux_stream.on_recv(self._dispatch_reply, copy=False)
self._task_stream = ZMQStream(self._task_socket, self._io_loop)
self._task_stream.on_recv(self._dispatch_reply, copy=False)
self._iopub_stream = ZMQStream(self._iopub_socket, self._io_loop)
self._iopub_stream.on_recv(self._dispatch_iopub, copy=False)
self._notification_stream = ZMQStream(self._notification_socket, self._io_loop)
self._notification_stream.on_recv(self._dispatch_notification, copy=False)
self._io_thread = Thread(target=self._io_main)
self._io_thread.daemon = True
self._io_thread.start()
def _io_main(self):
"""main loop for background IO thread"""
self._io_loop.start()
self._io_loop.close()
@unpack_message
def _dispatch_single_reply(self, msg):
"""Dispatch single (non-execution) replies"""
msg_id = msg['parent_header'].get('msg_id', None)
future = self._futures.get(msg_id)
if future is not None:
future.set_result(msg)
@unpack_message
def _dispatch_notification(self, msg):
"""Dispatch notification messages"""
msg_type = msg['header']['msg_type']
handler = self._notification_handlers.get(msg_type, None)
if handler is None:
raise KeyError("Unhandled notification message type: %s" % msg_type)
else:
handler(msg)
@unpack_message
def _dispatch_reply(self, msg):
"""handle execution replies waiting in ZMQ queue."""
msg_type = msg['header']['msg_type']
handler = self._queue_handlers.get(msg_type, None)
if handler is None:
raise KeyError("Unhandled reply message type: %s" % msg_type)
else:
handler(msg)
@unpack_message
def _dispatch_iopub(self, msg):
"""handler for IOPub messages"""
parent = msg['parent_header']
if not parent or parent['session'] != self.session.session:
# ignore IOPub messages not from here
return
msg_id = parent['msg_id']
content = msg['content']
header = msg['header']
msg_type = msg['header']['msg_type']
if msg_type == 'status' and msg_id not in self.metadata:
# ignore status messages if they aren't mine
return
# init metadata:
md = self.metadata[msg_id]
if msg_type == 'stream':
name = content['name']
s = md[name] or ''
md[name] = s + content['text']
elif msg_type == 'error':
md.update({'error' : self._unwrap_exception(content)})
elif msg_type == 'execute_input':
md.update({'execute_input' : content['code']})
elif msg_type == 'display_data':
md['outputs'].append(content)
elif msg_type == 'execute_result':
md['execute_result'] = content
elif msg_type == 'data_message':
data, remainder = serialize.deserialize_object(msg['buffers'])
md['data'].update(data)
elif msg_type == 'status':
# idle message comes after all outputs
if content['execution_state'] == 'idle':
future = self._output_futures.get(msg_id)
if future and not future.done():
# TODO: should probably store actual outputs on the Future
future.set_result(None)
else:
# unhandled msg_type (status, etc.)
pass
def _send(self, socket, msg_type, content=None, parent=None, ident=None,
buffers=None, track=False, header=None, metadata=None):
"""Send a message in the IO thread
returns msg object"""
if self._closed:
raise IOError("Connections have been closed.")
msg = self.session.msg(msg_type, content=content, parent=parent,
header=header, metadata=metadata)
msg_id = msg['header']['msg_id']
asyncresult = False
if msg_type in {'execute_request', 'apply_request'}:
asyncresult = True
# add future for output
self._output_futures[msg_id] = output = MessageFuture(msg_id)
# hook up metadata
output.metadata = self.metadata[msg_id]
self._futures[msg_id] = future = MessageFuture(msg_id, track=track)
futures = [future]
if asyncresult:
future.output = output
futures.append(output)
output.metadata['submitted'] = datetime.now()
def cleanup(f):
"""Purge caches on Future resolution"""
self.results.pop(msg_id, None)
self._futures.pop(msg_id, None)
self._output_futures.pop(msg_id, None)
self.metadata.pop(msg_id, None)
multi_future(futures).add_done_callback(cleanup)
def _really_send():
sent = self.session.send(socket, msg, track=track, buffers=buffers, ident=ident)
if track:
future.tracker.set_result(sent['tracker'])
# hand off actual send to IO thread
self._io_loop.add_callback(_really_send)
return future
def _send_recv(self, *args, **kwargs):
"""Send a message in the IO thread and return its reply"""
future = self._send(*args, **kwargs)
future.wait()
return future.result()
#--------------------------------------------------------------------------
# len, getitem
#--------------------------------------------------------------------------
def __len__(self):
"""len(client) returns # of engines."""
return len(self.ids)
def __getitem__(self, key):
"""index access returns DirectView multiplexer objects
Must be int, slice, or list/tuple/xrange of ints"""
if not isinstance(key, (int, slice, tuple, list, xrange)):
raise TypeError("key by int/slice/iterable of ints only, not %s"%(type(key)))
else:
return self.direct_view(key)
def __iter__(self):
"""Since we define getitem, Client is iterable
but unless we also define __iter__, it won't work correctly unless engine IDs
start at zero and are continuous.
"""
for eid in self.ids:
yield self.direct_view(eid)
#--------------------------------------------------------------------------
# Begin public methods
#--------------------------------------------------------------------------
@property
def ids(self):
"""Always up-to-date ids property."""
# always copy:
return list(self._ids)
def activate(self, targets='all', suffix=''):
"""Create a DirectView and register it with IPython magics
Defines the magics `%px, %autopx, %pxresult, %%px`
Parameters
----------
targets: int, list of ints, or 'all'
The engines on which the view's magics will run
suffix: str [default: '']
The suffix, if any, for the magics. This allows you to have
multiple views associated with parallel magics at the same time.
e.g. ``rc.activate(targets=0, suffix='0')`` will give you
the magics ``%px0``, ``%pxresult0``, etc. for running magics just
on engine 0.
"""
view = self.direct_view(targets)
view.block = True
view.activate(suffix)
return view
def close(self, linger=None):
"""Close my zmq Sockets
If `linger`, set the zmq LINGER socket option,
which allows discarding of messages.
"""
if self._closed:
return
self._stop_io_thread()
snames = [ trait for trait in self.trait_names() if trait.endswith("socket") ]
for name in snames:
socket = getattr(self, name)
if socket is not None and not socket.closed:
if linger is not None:
socket.close(linger=linger)
else:
socket.close()
self._closed = True
def spin_thread(self, interval=1):
"""DEPRECATED, DOES NOTHING"""
warnings.warn("Client.spin_thread is deprecated now that IO is always in a thread", DeprecationWarning)
def stop_spin_thread(self):
"""DEPRECATED, DOES NOTHING"""
warnings.warn("Client.spin_thread is deprecated now that IO is always in a thread", DeprecationWarning)
def spin(self):
"""DEPRECATED, DOES NOTHING"""
warnings.warn("Client.spin is deprecated now that IO is in a thread", DeprecationWarning)
def _await_futures(self, futures, timeout):
"""Wait for a collection of futures"""
if not futures:
return True
event = Event()
if timeout and timeout < 0:
timeout = None
f = multi_future(futures)
f.add_done_callback(lambda f: event.set())
return event.wait(timeout)
def _futures_for_msgs(self, msg_ids):
"""Turn msg_ids into Futures
msg_ids not in futures dict are presumed done.
"""
futures = []
for msg_id in msg_ids:
f = self._futures.get(msg_id, None)
if f:
futures.append(f)
return futures
def wait(self, jobs=None, timeout=-1):
"""waits on one or more `jobs`, for up to `timeout` seconds.
Parameters
----------
jobs : int, str, or list of ints and/or strs, or one or more AsyncResult objects
ints are indices to self.history
strs are msg_ids
default: wait on all outstanding messages
timeout : float
a time in seconds, after which to give up.
default is -1, which means no timeout
Returns
-------
True : when all msg_ids are done
False : timeout reached, some msg_ids still outstanding
"""
if jobs is None:
theids = self.outstanding
else:
if isinstance(jobs, string_types + (int, AsyncResult)):
jobs = [jobs]
theids = set()
for job in jobs:
if isinstance(job, int):
# index access
job = self.history[job]
elif isinstance(job, AsyncResult):
theids.update(job.msg_ids)
continue
theids.add(job)
if not theids.intersection(self.outstanding):
return True
futures = self._futures_for_msgs(theids)
return self._await_futures(futures, timeout)
def wait_interactive(self, jobs=None, interval=1., timeout=-1.):
"""Wait interactively for jobs
If no job is specified, will wait for all outstanding jobs to complete.
"""
if jobs is None:
# get futures for results
futures = [ f for f in self._futures.values() if hasattr(f, 'output') ]
ar = AsyncResult(self, futures, owner=False)
else:
ar = self._asyncresult_from_jobs(jobs, owner=False)
return ar.wait_interactive(interval=interval, timeout=timeout)
#--------------------------------------------------------------------------
# Control methods
#--------------------------------------------------------------------------
def clear(self, targets=None, block=None):
"""Clear the namespace in target(s)."""
block = self.block if block is None else block
targets = self._build_targets(targets)[0]
futures = []
for t in targets:
futures.append(self._send(self._control_socket, 'clear_request', content={}, ident=t))
if not block:
return multi_future(futures)
for future in futures:
future.wait()
msg = future.result()
if msg['content']['status'] != 'ok':
raise self._unwrap_exception(msg['content'])
def abort(self, jobs=None, targets=None, block=None):
"""Abort specific jobs from the execution queues of target(s).
This is a mechanism to prevent jobs that have already been submitted
from executing.
Parameters
----------
jobs : msg_id, list of msg_ids, or AsyncResult
The jobs to be aborted
If unspecified/None: abort all outstanding jobs.
"""
block = self.block if block is None else block
jobs = jobs if jobs is not None else list(self.outstanding)
targets = self._build_targets(targets)[0]
msg_ids = []
if isinstance(jobs, string_types + (AsyncResult,)):
jobs = [jobs]
bad_ids = [obj for obj in jobs if not isinstance(obj, string_types + (AsyncResult,))]
if bad_ids:
raise TypeError("Invalid msg_id type %r, expected str or AsyncResult"%bad_ids[0])
for j in jobs:
if isinstance(j, AsyncResult):
msg_ids.extend(j.msg_ids)
else:
msg_ids.append(j)
content = dict(msg_ids=msg_ids)
futures = []
for t in targets:
futures.append(self._send(self._control_socket, 'abort_request',
content=content, ident=t))
if not block:
return multi_future(futures)
else:
for f in futures:
f.wait()
msg = f.result()
if msg['content']['status'] != 'ok':
raise self._unwrap_exception(msg['content'])
def shutdown(self, targets='all', restart=False, hub=False, block=None):
"""Terminates one or more engine processes, optionally including the hub.
Parameters
----------
targets: list of ints or 'all' [default: all]
Which engines to shutdown.
hub: bool [default: False]
Whether to include the Hub. hub=True implies targets='all'.
block: bool [default: self.block]
Whether to wait for clean shutdown replies or not.
restart: bool [default: False]
NOT IMPLEMENTED
whether to restart engines after shutting them down.
"""
from ipyparallel.error import NoEnginesRegistered
if restart:
raise NotImplementedError("Engine restart is not yet implemented")
block = self.block if block is None else block
if hub:
targets = 'all'
try:
targets = self._build_targets(targets)[0]
except NoEnginesRegistered:
targets = []
futures = []
for t in targets:
futures.append(self._send(self._control_socket, 'shutdown_request',
content={'restart':restart},ident=t))
error = False
if block or hub:
for f in futures:
f.wait()
msg = f.result()
if msg['content']['status'] != 'ok':
error = self._unwrap_exception(msg['content'])
if hub:
# don't trigger close on shutdown notification, which will prevent us from receiving the reply
self._notification_handlers['shutdown_notification'] = lambda msg: None
msg = self._send_recv(self._query_socket, 'shutdown_request')
if msg['content']['status'] != 'ok':
error = self._unwrap_exception(msg['content'])
if not error:
self.close()
if error:
raise error
#--------------------------------------------------------------------------
# Execution related methods
#--------------------------------------------------------------------------
def _maybe_raise(self, result):
"""wrapper for maybe raising an exception if apply failed."""
if isinstance(result, error.RemoteError):
raise result
return result
def send_apply_request(self, socket, f, args=None, kwargs=None, metadata=None, track=False,
ident=None):
"""construct and send an apply message via a socket.
This is the principal method with which all engine execution is performed by views.
"""
if self._closed:
raise RuntimeError("Client cannot be used after its sockets have been closed")
# defaults:
args = args if args is not None else []
kwargs = kwargs if kwargs is not None else {}
metadata = metadata if metadata is not None else {}
# validate arguments
if not callable(f) and not isinstance(f, Reference):
raise TypeError("f must be callable, not %s"%type(f))
if not isinstance(args, (tuple, list)):
raise TypeError("args must be tuple or list, not %s"%type(args))
if not isinstance(kwargs, dict):
raise TypeError("kwargs must be dict, not %s"%type(kwargs))
if not isinstance(metadata, dict):
raise TypeError("metadata must be dict, not %s"%type(metadata))
bufs = serialize.pack_apply_message(f, args, kwargs,
buffer_threshold=self.session.buffer_threshold,
item_threshold=self.session.item_threshold,
)
future = self._send(socket, "apply_request", buffers=bufs, ident=ident,
metadata=metadata, track=track)
msg_id = future.msg_id
self.outstanding.add(msg_id)
if ident:
# possibly routed to a specific engine
if isinstance(ident, list):
ident = ident[-1]
if ident in self._engines.values():
# save for later, in case of engine death
self._outstanding_dict[ident].add(msg_id)
self.history.append(msg_id)
return future
def send_execute_request(self, socket, code, silent=True, metadata=None, ident=None):
"""construct and send an execute request via a socket.
"""
if self._closed:
raise RuntimeError("Client cannot be used after its sockets have been closed")
# defaults:
metadata = metadata if metadata is not None else {}
# validate arguments
if not isinstance(code, string_types):
raise TypeError("code must be text, not %s" % type(code))
if not isinstance(metadata, dict):
raise TypeError("metadata must be dict, not %s" % type(metadata))
content = dict(code=code, silent=bool(silent), user_expressions={})
future = self._send(socket, "execute_request", content=content, ident=ident,
metadata=metadata)
msg_id = future.msg_id
self.outstanding.add(msg_id)
if ident:
# possibly routed to a specific engine
if isinstance(ident, list):
ident = ident[-1]
if ident in self._engines.values():
# save for later, in case of engine death
self._outstanding_dict[ident].add(msg_id)
self.history.append(msg_id)
self.metadata[msg_id]['submitted'] = datetime.now()
return future
#--------------------------------------------------------------------------
# construct a View object
#--------------------------------------------------------------------------
def load_balanced_view(self, targets=None):
"""construct a DirectView object.
If no arguments are specified, create a LoadBalancedView
using all engines.
Parameters
----------
targets: list,slice,int,etc. [default: use all engines]
The subset of engines across which to load-balance execution
"""
if targets == 'all':
targets = None
if targets is not None:
targets = self._build_targets(targets)[1]
return LoadBalancedView(client=self, socket=self._task_socket, targets=targets)
def executor(self, targets=None):
"""Construct a PEP-3148 Executor with a LoadBalancedView
Parameters
----------
targets: list,slice,int,etc. [default: use all engines]
The subset of engines across which to load-balance execution
Returns
-------
executor: Executor
The Executor object
"""
return self.load_balanced_view(targets).executor
def direct_view(self, targets='all'):
"""construct a DirectView object.
If no targets are specified, create a DirectView using all engines.
rc.direct_view('all') is distinguished from rc[:] in that 'all' will
evaluate the target engines at each execution, whereas rc[:] will connect to
all *current* engines, and that list will not change.
That is, 'all' will always use all engines, whereas rc[:] will not use
engines added after the DirectView is constructed.
Parameters
----------
targets: list,slice,int,etc. [default: use all engines]
The engines to use for the View
"""
single = isinstance(targets, int)
# allow 'all' to be lazily evaluated at each execution
if targets != 'all':
targets = self._build_targets(targets)[1]
if single:
targets = targets[0]
return DirectView(client=self, socket=self._mux_socket, targets=targets)
#--------------------------------------------------------------------------
# Query methods
#--------------------------------------------------------------------------
def get_result(self, indices_or_msg_ids=None, block=None, owner=True):
"""Retrieve a result by msg_id or history index, wrapped in an AsyncResult object.
If the client already has the results, no request to the Hub will be made.
This is a convenient way to construct AsyncResult objects, which are wrappers
that include metadata about execution, and allow for awaiting results that
were not submitted by this Client.
It can also be a convenient way to retrieve the metadata associated with
blocking execution, since it always retrieves
Examples
--------
::
In [10]: r = client.apply()
Parameters
----------
indices_or_msg_ids : integer history index, str msg_id, AsyncResult,
or a list of same.
The indices or msg_ids of indices to be retrieved
block : bool
Whether to wait for the result to be done
owner : bool [default: True]
Whether this AsyncResult should own the result.
If so, calling `ar.get()` will remove data from the
client's result and metadata cache.
There should only be one owner of any given msg_id.
Returns
-------
AsyncResult
A single AsyncResult object will always be returned.
AsyncHubResult
A subclass of AsyncResult that retrieves results from the Hub
"""
block = self.block if block is None else block
if indices_or_msg_ids is None:
indices_or_msg_ids = -1
ar = self._asyncresult_from_jobs(indices_or_msg_ids, owner=owner)
if block:
ar.wait()
return ar
def resubmit(self, indices_or_msg_ids=None, metadata=None, block=None):
"""Resubmit one or more tasks.
in-flight tasks may not be resubmitted.
Parameters
----------
indices_or_msg_ids : integer history index, str msg_id, or list of either
The indices or msg_ids of indices to be retrieved
block : bool
Whether to wait for the result to be done
Returns
-------
AsyncHubResult
A subclass of AsyncResult that retrieves results from the Hub
"""
block = self.block if block is None else block
if indices_or_msg_ids is None:
indices_or_msg_ids = -1
theids = self._msg_ids_from_jobs(indices_or_msg_ids)
content = dict(msg_ids = theids)
reply = self._send_recv(self._query_socket, 'resubmit_request', content)
content = reply['content']
if content['status'] != 'ok':
raise self._unwrap_exception(content)
mapping = content['resubmitted']
new_ids = [ mapping[msg_id] for msg_id in theids ]
ar = AsyncHubResult(self, new_ids)
if block:
ar.wait()
return ar
def result_status(self, msg_ids, status_only=True):
"""Check on the status of the result(s) of the apply request with `msg_ids`.
If status_only is False, then the actual results will be retrieved, else
only the status of the results will be checked.
Parameters
----------
msg_ids : list of msg_ids
if int:
Passed as index to self.history for convenience.
status_only : bool (default: True)
if False:
Retrieve the actual results of completed tasks.
Returns
-------
results : dict
There will always be the keys 'pending' and 'completed', which will
be lists of msg_ids that are incomplete or complete. If `status_only`
is False, then completed results will be keyed by their `msg_id`.
"""
theids = self._msg_ids_from_jobs(msg_ids)
completed = []
local_results = {}
# comment this block out to temporarily disable local shortcut:
for msg_id in theids:
if msg_id in self.results:
completed.append(msg_id)
local_results[msg_id] = self.results[msg_id]
theids.remove(msg_id)
if theids: # some not locally cached
content = dict(msg_ids=theids, status_only=status_only)
reply = self._send_recv(self._query_socket, "result_request", content=content)
content = reply['content']
if content['status'] != 'ok':
raise self._unwrap_exception(content)
buffers = reply['buffers']
else:
content = dict(completed=[],pending=[])
content['completed'].extend(completed)
if status_only:
return content
failures = []
# load cached results into result:
content.update(local_results)
# update cache with results:
for msg_id in sorted(theids):
if msg_id in content['completed']:
rec = content[msg_id]
parent = extract_dates(rec['header'])
header = extract_dates(rec['result_header'])
rcontent = rec['result_content']
iodict = rec['io']
if isinstance(rcontent, str):
rcontent = self.session.unpack(rcontent)
md = self.metadata[msg_id]
md_msg = dict(
content=rcontent,
parent_header=parent,
header=header,
metadata=rec['result_metadata'],
)
md.update(self._extract_metadata(md_msg))
if rec.get('received'):
md['received'] = parse_date(rec['received'])
md.update(iodict)
if rcontent['status'] == 'ok':
if header['msg_type'] == 'apply_reply':
res,buffers = serialize.deserialize_object(buffers)
elif header['msg_type'] == 'execute_reply':
res = ExecuteReply(msg_id, rcontent, md)
else:
raise KeyError("unhandled msg type: %r" % header['msg_type'])
else:
res = self._unwrap_exception(rcontent)
failures.append(res)
self.results[msg_id] = res
content[msg_id] = res
if len(theids) == 1 and failures:
raise failures[0]
error.collect_exceptions(failures, "result_status")
return content
def queue_status(self, targets='all', verbose=False):
"""Fetch the status of engine queues.
Parameters
----------
targets : int/str/list of ints/strs
the engines whose states are to be queried.
default : all
verbose : bool
Whether to return lengths only, or lists of ids for each element
"""
if targets == 'all':
# allow 'all' to be evaluated on the engine
engine_ids = None
else:
engine_ids = self._build_targets(targets)[1]
content = dict(targets=engine_ids, verbose=verbose)
reply = self._send_recv(self._query_socket, "queue_request", content=content)
content = reply['content']
status = content.pop('status')
if status != 'ok':
raise self._unwrap_exception(content)
content = util.int_keys(content)
if isinstance(targets, int):
return content[targets]
else:
return content
def _msg_ids_from_target(self, targets=None):
"""Build a list of msg_ids from the list of engine targets"""
if not targets: # needed as _build_targets otherwise uses all engines
return []
target_ids = self._build_targets(targets)[0]
return [md_id for md_id in self.metadata if self.metadata[md_id]["engine_uuid"] in target_ids]
def _msg_ids_from_jobs(self, jobs=None):
"""Given a 'jobs' argument, convert it to a list of msg_ids.
Can be either one or a list of:
- msg_id strings
- integer indices to this Client's history
- AsyncResult objects
"""
if not isinstance(jobs, (list, tuple, set, types.GeneratorType)):
jobs = [jobs]
msg_ids = []
for job in jobs:
if isinstance(job, int):
msg_ids.append(self.history[job])
elif isinstance(job, string_types):
msg_ids.append(job)
elif isinstance(job, AsyncResult):
msg_ids.extend(job.msg_ids)
else:
raise TypeError("Expected msg_id, int, or AsyncResult, got %r" % job)
return msg_ids
def _asyncresult_from_jobs(self, jobs=None, owner=False):
"""Construct an AsyncResult from msg_ids or asyncresult objects"""
if not isinstance(jobs, (list, tuple, set, types.GeneratorType)):
single = True
jobs = [jobs]
else:
single = False
futures = []
msg_ids = []
for job in jobs:
if isinstance(job, int):
job = self.history[job]
if isinstance(job, string_types):
if job in self._futures:
futures.append(job)
elif job in self.results:
f = MessageFuture(job)
f.set_result(self.results[job])
f.output = Future()
f.output.metadata = self.metadata[job]
f.output.set_result(None)
futures.append(f)
else:
msg_ids.append(job)
elif isinstance(job, AsyncResult):
if job._children:
futures.extend(job._children)
else:
msg_ids.extend(job.msg_ids)
else:
raise TypeError("Expected msg_id, int, or AsyncResult, got %r" % job)
if msg_ids:
if single:
msg_ids = msg_ids[0]
return AsyncHubResult(self, msg_ids, owner=owner)
else:
if single and futures:
futures = futures[0]
return AsyncResult(self, futures, owner=owner)
def purge_local_results(self, jobs=[], targets=[]):
"""Clears the client caches of results and their metadata.
Individual results can be purged by msg_id, or the entire
history of specific targets can be purged.
Use `purge_local_results('all')` to scrub everything from the Clients's
results and metadata caches.
After this call all `AsyncResults` are invalid and should be discarded.
If you must "reget" the results, you can still do so by using
`client.get_result(msg_id)` or `client.get_result(asyncresult)`. This will
redownload the results from the hub if they are still available
(i.e `client.purge_hub_results(...)` has not been called.
Parameters
----------
jobs : str or list of str or AsyncResult objects
the msg_ids whose results should be purged.
targets : int/list of ints
The engines, by integer ID, whose entire result histories are to be purged.
Raises
------
RuntimeError : if any of the tasks to be purged are still outstanding.
"""
if not targets and not jobs:
raise ValueError("Must specify at least one of `targets` and `jobs`")
if jobs == 'all':
if self.outstanding:
raise RuntimeError("Can't purge outstanding tasks: %s" % self.outstanding)
self.results.clear()
self.metadata.clear()
self._futures.clear()
self._output_futures.clear()
else:
msg_ids = set()
msg_ids.update(self._msg_ids_from_target(targets))
msg_ids.update(self._msg_ids_from_jobs(jobs))
still_outstanding = self.outstanding.intersection(msg_ids)
if still_outstanding:
raise RuntimeError("Can't purge outstanding tasks: %s" % still_outstanding)
for mid in msg_ids:
self.results.pop(mid, None)
self.metadata.pop(mid, None)
self._futures.pop(mid, None)
self._output_futures.pop(mid, None)
def purge_hub_results(self, jobs=[], targets=[]):
"""Tell the Hub to forget results.
Individual results can be purged by msg_id, or the entire
history of specific targets can be purged.
Use `purge_results('all')` to scrub everything from the Hub's db.
Parameters
----------
jobs : str or list of str or AsyncResult objects
the msg_ids whose results should be forgotten.
targets : int/str/list of ints/strs
The targets, by int_id, whose entire history is to be purged.
default : None
"""
if not targets and not jobs:
raise ValueError("Must specify at least one of `targets` and `jobs`")
if targets:
targets = self._build_targets(targets)[1]
# construct msg_ids from jobs
if jobs == 'all':
msg_ids = jobs
else:
msg_ids = self._msg_ids_from_jobs(jobs)
content = dict(engine_ids=targets, msg_ids=msg_ids)
reply = self._send_recv(self._query_socket, "purge_request", content=content)
content = reply['content']
if content['status'] != 'ok':
raise self._unwrap_exception(content)
def purge_results(self, jobs=[], targets=[]):
"""Clears the cached results from both the hub and the local client
Individual results can be purged by msg_id, or the entire
history of specific targets can be purged.
Use `purge_results('all')` to scrub every cached result from both the Hub's and
the Client's db.
Equivalent to calling both `purge_hub_results()` and `purge_client_results()` with
the same arguments.
Parameters
----------
jobs : str or list of str or AsyncResult objects
the msg_ids whose results should be forgotten.
targets : int/str/list of ints/strs
The targets, by int_id, whose entire history is to be purged.
default : None
"""
self.purge_local_results(jobs=jobs, targets=targets)
self.purge_hub_results(jobs=jobs, targets=targets)
def purge_everything(self):
"""Clears all content from previous Tasks from both the hub and the local client
In addition to calling `purge_results("all")` it also deletes the history and
other bookkeeping lists.
"""
self.purge_results("all")
self.history = []
self.session.digest_history.clear()
def hub_history(self):
"""Get the Hub's history
Just like the Client, the Hub has a history, which is a list of msg_ids.
This will contain the history of all clients, and, depending on configuration,
may contain history across multiple cluster sessions.
Any msg_id returned here is a valid argument to `get_result`.
Returns
-------
msg_ids : list of strs
list of all msg_ids, ordered by task submission time.
"""
reply = self._send_recv(self._query_socket, "history_request", content={})
content = reply['content']
if content['status'] != 'ok':
raise self._unwrap_exception(content)
else:
return content['history']
def db_query(self, query, keys=None):
"""Query the Hub's TaskRecord database
This will return a list of task record dicts that match `query`
Parameters
----------
query : mongodb query dict
The search dict. See mongodb query docs for details.
keys : list of strs [optional]
The subset of keys to be returned. The default is to fetch everything but buffers.
'msg_id' will *always* be included.
"""
if isinstance(keys, string_types):
keys = [keys]
content = dict(query=query, keys=keys)
reply = self._send_recv(self._query_socket, "db_request", content=content)
content = reply['content']
if content['status'] != 'ok':
raise self._unwrap_exception(content)
records = content['records']
buffer_lens = content['buffer_lens']
result_buffer_lens = content['result_buffer_lens']
buffers = reply['buffers']
has_bufs = buffer_lens is not None
has_rbufs = result_buffer_lens is not None
for i,rec in enumerate(records):
# unpack datetime objects
for hkey in ('header', 'result_header'):
if hkey in rec:
rec[hkey] = extract_dates(rec[hkey])
for dtkey in ('submitted', 'started', 'completed', 'received'):
if dtkey in rec:
rec[dtkey] = parse_date(rec[dtkey])
# relink buffers
if has_bufs:
blen = buffer_lens[i]
rec['buffers'], buffers = buffers[:blen],buffers[blen:]
if has_rbufs:
blen = result_buffer_lens[i]
rec['result_buffers'], buffers = buffers[:blen],buffers[blen:]
return records
__all__ = [ 'Client' ]
| fzheng/codejam | lib/python2.7/site-packages/ipyparallel/client/client.py | Python | mit | 70,766 |
"""
# Copyright (C) 2007 Nathan Ramella (nar@remix.net)
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# For questions regarding this module contact
# Nathan Ramella <nar@remix.net> or visit http://www.remix.net
This script is based off the Ableton Live supplied MIDI Remote Scripts, customised
for OSC request delivery and response. This script can be run without any extra
Python libraries out of the box.
This is the second file that is loaded, by way of being instantiated through
__init__.py
"""
import Live
import LiveOSCCallbacks
import RemixNet
import OSC
import LiveUtils
import sys
from Logger import log
class LiveOSC:
__module__ = __name__
__doc__ = "Main class that establishes the LiveOSC Component"
prlisten = {}
plisten = {}
dlisten = {}
clisten = {}
slisten = {}
pplisten = {}
cnlisten = {}
cclisten = {}
wlisten = {}
llisten = {}
_send_pos = {}
mlisten = { "solo": {}, "mute": {}, "arm": {}, "panning": {}, "volume": {}, "sends": {}, "name": {}, "oml": {}, "omr": {} }
rlisten = { "solo": {}, "mute": {}, "panning": {}, "volume": {}, "sends": {}, "name": {} }
masterlisten = { "panning": {}, "volume": {}, "crossfader": {} }
scenelisten = {}
scene = 0
track = 0
def __init__(self, c_instance):
self._LiveOSC__c_instance = c_instance
self.basicAPI = 0
self.oscEndpoint = RemixNet.OSCEndpoint()
self.oscEndpoint.send('/remix/oscserver/startup', 1)
log("LiveOSC initialized")
# Visible tracks listener
if self.song().visible_tracks_has_listener(self.refresh_state) != 1:
self.song().add_visible_tracks_listener(self.refresh_state)
######################################################################
# Standard Ableton Methods
def connect_script_instances(self, instanciated_scripts):
"""
Called by the Application as soon as all scripts are initialized.
You can connect yourself to other running scripts here, as we do it
connect the extension modules
"""
return
def is_extension(self):
return False
def request_rebuild_midi_map(self):
"""
To be called from any components, as soon as their internal state changed in a
way, that we do need to remap the mappings that are processed directly by the
Live engine.
Dont assume that the request will immediately result in a call to
your build_midi_map function. For performance reasons this is only
called once per GUI frame.
"""
return
def update_display(self):
"""
This function is run every 100ms, so we use it to initiate our Song.current_song_time
listener to allow us to process incoming OSC commands as quickly as possible under
the current listener scheme.
"""
######################################################
# START OSC LISTENER SETUP
if self.basicAPI == 0:
# By default we have set basicAPI to 0 so that we can assign it after
# initialization. We try to get the current song and if we can we'll
# connect our basicAPI callbacks to the listener allowing us to
# respond to incoming OSC every 60ms.
#
# Since this method is called every 100ms regardless of the song time
# changing, we use both methods for processing incoming UDP requests
# so that from a resting state you can initiate play/clip triggering.
try:
doc = self.song()
except:
log('could not get song handle')
return
try:
self.basicAPI = LiveOSCCallbacks.LiveOSCCallbacks(self._LiveOSC__c_instance, self.oscEndpoint)
# Commented for stability
self.time = 0
doc.add_current_song_time_listener(self.current_song_time_changed)
except:
self.oscEndpoint.send('/remix/echo', 'setting up basicAPI failed')
log('setting up basicAPI failed');
return
# If our OSC server is listening, try processing incoming requests.
# Any 'play' initiation will trigger the current_song_time listener
# and bump updates from 100ms to 60ms.
if self.oscEndpoint:
# self.oscEndpoint.send('/live/ping', '')
try:
self.oscEndpoint.processIncomingUDP()
except:
log('error processing incoming UDP packets:', sys.exc_info());
# END OSC LISTENER SETUP
######################################################
def current_song_time_changed(self):
time = self.song().current_song_time
if int(time) != self.time:
self.time = int(time)
self.oscEndpoint.send("/live/beat", self.time)
def send_midi(self, midi_event_bytes):
"""
Use this function to send MIDI events through Live to the _real_ MIDI devices
that this script is assigned to.
"""
pass
def receive_midi(self, midi_bytes):
return
def can_lock_to_devices(self):
return False
def suggest_input_port(self):
return ''
def suggest_output_port(self):
return ''
def __handle_display_switch_ids(self, switch_id, value):
pass
######################################################################
# Useful Methods
def application(self):
"""returns a reference to the application that we are running in"""
return Live.Application.get_application()
def song(self):
"""returns a reference to the Live Song that we do interact with"""
return self._LiveOSC__c_instance.song()
def handle(self):
"""returns a handle to the c_interface that is needed when forwarding MIDI events via the MIDI map"""
return self._LiveOSC__c_instance.handle()
def getslots(self):
tracks = self.song().visible_tracks
clipSlots = []
for track in tracks:
clipSlots.append(track.clip_slots)
return clipSlots
def trBlock(self, trackOffset, blocksize):
block = []
tracks = self.song().visible_tracks
for track in range(0, blocksize):
block.extend([str(tracks[trackOffset+track].name)])
self.oscEndpoint.send("/live/name/trackblock", block)
######################################################################
# Used Ableton Methods
def disconnect(self):
self.rem_clip_listeners()
self.rem_mixer_listeners()
self.rem_scene_listeners()
self.rem_tempo_listener()
self.rem_overdub_listener()
self.rem_tracks_listener()
self.rem_device_listeners()
self.rem_transport_listener()
self.song().remove_visible_tracks_listener(self.refresh_state)
self.oscEndpoint.send('/remix/oscserver/shutdown', 1)
self.oscEndpoint.shutdown()
def build_midi_map(self, midi_map_handle):
self.refresh_state()
def refresh_state(self):
self.add_clip_listeners()
self.add_mixer_listeners()
self.add_scene_listeners()
self.add_tempo_listener()
self.add_overdub_listener()
self.add_tracks_listener()
self.add_device_listeners()
self.add_transport_listener()
trackNumber = 0
clipNumber = 0
for track in self.song().visible_tracks:
bundle = OSC.OSCBundle()
bundle.append("/live/name/track", (trackNumber, str(track.name), track.color, int(track.has_midi_input)))
# bundle.append("/live/name/track", (trackNumber, str(track.name),int(track.has_midi_input)))
for clipSlot in track.clip_slots:
if clipSlot.clip != None:
bundle.append("/live/name/clip", (trackNumber, clipNumber, str(clipSlot.clip.name), clipSlot.clip.color))
clipNumber = clipNumber + 1
clipNumber = 0
trackNumber = trackNumber + 1
self.oscEndpoint.sendMessage(bundle)
self.trBlock(0, len(self.song().visible_tracks))
######################################################################
# Add / Remove Listeners
def add_scene_listeners(self):
self.rem_scene_listeners()
if self.song().view.selected_scene_has_listener(self.scene_change) != 1:
self.song().view.add_selected_scene_listener(self.scene_change)
if self.song().view.selected_track_has_listener(self.track_change) != 1:
self.song().view.add_selected_track_listener(self.track_change)
def rem_scene_listeners(self):
if self.song().view.selected_scene_has_listener(self.scene_change) == 1:
self.song().view.remove_selected_scene_listener(self.scene_change)
if self.song().view.selected_track_has_listener(self.track_change) == 1:
self.song().view.remove_selected_track_listener(self.track_change)
def track_change(self):
selected_track = self.song().view.selected_track
tracks = self.song().visible_tracks
index = 0
selected_index = 0
for track in tracks:
index = index + 1
if track == selected_track:
selected_index = index
if selected_index != self.track:
self.track = selected_index
self.oscEndpoint.send("/live/track", (selected_index))
def scene_change(self):
selected_scene = self.song().view.selected_scene
scenes = self.song().scenes
index = 0
selected_index = 0
for scene in scenes:
index = index + 1
if scene == selected_scene:
selected_index = index
if selected_index != self.scene:
self.scene = selected_index
self.oscEndpoint.send("/live/scene", (selected_index))
def add_tempo_listener(self):
self.rem_tempo_listener()
print "add tempo listener"
if self.song().tempo_has_listener(self.tempo_change) != 1:
self.song().add_tempo_listener(self.tempo_change)
def rem_tempo_listener(self):
if self.song().tempo_has_listener(self.tempo_change) == 1:
self.song().remove_tempo_listener(self.tempo_change)
def tempo_change(self):
tempo = LiveUtils.getTempo()
self.oscEndpoint.send("/live/tempo", (tempo))
def add_transport_listener(self):
if self.song().is_playing_has_listener(self.transport_change) != 1:
self.song().add_is_playing_listener(self.transport_change)
def rem_transport_listener(self):
if self.song().is_playing_has_listener(self.transport_change) == 1:
self.song().remove_is_playing_listener(self.transport_change)
def transport_change(self):
self.oscEndpoint.send("/live/play", (self.song().is_playing and 2 or 1))
def add_overdub_listener(self):
self.rem_overdub_listener()
if self.song().overdub_has_listener(self.overdub_change) != 1:
self.song().add_overdub_listener(self.overdub_change)
def rem_overdub_listener(self):
if self.song().overdub_has_listener(self.overdub_change) == 1:
self.song().remove_overdub_listener(self.overdub_change)
def overdub_change(self):
overdub = LiveUtils.getSong().overdub
self.oscEndpoint.send("/live/overdub", (int(overdub) + 1))
def add_tracks_listener(self):
self.rem_tracks_listener()
if self.song().tracks_has_listener(self.tracks_change) != 1:
self.song().add_tracks_listener(self.tracks_change)
def rem_tracks_listener(self):
if self.song().tracks_has_listener(self.tempo_change) == 1:
self.song().remove_tracks_listener(self.tracks_change)
def tracks_change(self):
self.oscEndpoint.send("/live/refresh", (1))
def rem_clip_listeners(self):
for slot in self.slisten:
if slot != None:
if slot.has_clip_has_listener(self.slisten[slot]) == 1:
slot.remove_has_clip_listener(self.slisten[slot])
self.slisten = {}
for clip in self.clisten:
if clip != None:
if clip.playing_status_has_listener(self.clisten[clip]) == 1:
clip.remove_playing_status_listener(self.clisten[clip])
self.clisten = {}
for clip in self.pplisten:
if clip != None:
if clip.playing_position_has_listener(self.pplisten[clip]) == 1:
clip.remove_playing_position_listener(self.pplisten[clip])
self.pplisten = {}
for clip in self.cnlisten:
if clip != None:
if clip.name_has_listener(self.cnlisten[clip]) == 1:
clip.remove_name_listener(self.cnlisten[clip])
self.cnlisten = {}
for clip in self.cclisten:
if clip != None:
if clip.color_has_listener(self.cclisten[clip]) == 1:
clip.remove_color_listener(self.cclisten[clip])
self.cclisten = {}
for clip in self.wlisten:
if clip != None:
if clip.is_audio_clip:
if clip.warping_has_listener(self.wlisten[clip]) == 1:
clip.remove_warping_listener(self.wlisten[clip])
self.wlisten = {}
for clip in self.llisten:
if clip != None:
if clip.looping_has_listener(self.llisten[clip]) == 1:
clip.remove_looping_listener(self.llisten[clip])
self.llisten = {}
def add_clip_listeners(self):
self.rem_clip_listeners()
tracks = self.getslots()
for track in range(len(tracks)):
for clip in range(len(tracks[track])):
c = tracks[track][clip]
if c.clip != None:
self.add_cliplistener(c.clip, track, clip)
log("ClipLauncher: added clip listener tr: " + str(track) + " clip: " + str(clip));
self.add_slotlistener(c, track, clip)
def add_cliplistener(self, clip, tid, cid):
cb = lambda :self.clip_changestate(clip, tid, cid)
if self.clisten.has_key(clip) != 1:
clip.add_playing_status_listener(cb)
self.clisten[clip] = cb
cb2 = lambda :self.clip_position(clip, tid, cid)
if self.pplisten.has_key(clip) != 1:
clip.add_playing_position_listener(cb2)
self.pplisten[clip] = cb2
cb3 = lambda :self.clip_name(clip, tid, cid)
if self.cnlisten.has_key(clip) != 1:
clip.add_name_listener(cb3)
self.cnlisten[clip] = cb3
if self.cclisten.has_key(clip) != 1:
clip.add_color_listener(cb3)
self.cclisten[clip] = cb3
if clip.is_audio_clip:
cb4 = lambda: self.clip_warping(clip, tid, cid)
if self.wlisten.has_key(clip) != 1:
clip.add_warping_listener(cb4)
self.wlisten[clip] = cb4
cb5 = lambda: self.clip_looping(clip, tid, cid)
if self.llisten.has_key(clip) != 1:
clip.add_looping_listener(cb5)
self.llisten[clip] = cb5
def add_slotlistener(self, slot, tid, cid):
cb = lambda :self.slot_changestate(slot, tid, cid)
if self.slisten.has_key(slot) != 1:
slot.add_has_clip_listener(cb)
self.slisten[slot] = cb
def rem_mixer_listeners(self):
# Master Track
for type in ("volume", "panning", "crossfader"):
for tr in self.masterlisten[type]:
if tr != None:
cb = self.masterlisten[type][tr]
test = eval("tr.mixer_device." + type+ ".value_has_listener(cb)")
if test == 1:
eval("tr.mixer_device." + type + ".remove_value_listener(cb)")
# Normal Tracks
for type in ("arm", "solo", "mute"):
for tr in self.mlisten[type]:
if tr != None:
cb = self.mlisten[type][tr]
if type == "arm":
if tr.can_be_armed == 1:
if tr.arm_has_listener(cb) == 1:
tr.remove_arm_listener(cb)
else:
test = eval("tr." + type+ "_has_listener(cb)")
if test == 1:
eval("tr.remove_" + type + "_listener(cb)")
for type in ("volume", "panning"):
for tr in self.mlisten[type]:
if tr != None:
cb = self.mlisten[type][tr]
test = eval("tr.mixer_device." + type+ ".value_has_listener(cb)")
if test == 1:
eval("tr.mixer_device." + type + ".remove_value_listener(cb)")
for tr in self.mlisten["sends"]:
if tr != None:
for send in self.mlisten["sends"][tr]:
if send != None:
cb = self.mlisten["sends"][tr][send]
if send.value_has_listener(cb) == 1:
send.remove_value_listener(cb)
for tr in self.mlisten["name"]:
if tr != None:
cb = self.mlisten["name"][tr]
if tr.name_has_listener(cb) == 1:
tr.remove_name_listener(cb)
for tr in self.mlisten["oml"]:
if tr != None:
cb = self.mlisten["oml"][tr]
if tr.output_meter_left_has_listener(cb) == 1:
tr.remove_output_meter_left_listener(cb)
for tr in self.mlisten["omr"]:
if tr != None:
cb = self.mlisten["omr"][tr]
if tr.output_meter_right_has_listener(cb) == 1:
tr.remove_output_meter_right_listener(cb)
# Return Tracks
for type in ("solo", "mute"):
for tr in self.rlisten[type]:
if tr != None:
cb = self.rlisten[type][tr]
test = eval("tr." + type+ "_has_listener(cb)")
if test == 1:
eval("tr.remove_" + type + "_listener(cb)")
for type in ("volume", "panning"):
for tr in self.rlisten[type]:
if tr != None:
cb = self.rlisten[type][tr]
test = eval("tr.mixer_device." + type+ ".value_has_listener(cb)")
if test == 1:
eval("tr.mixer_device." + type + ".remove_value_listener(cb)")
for tr in self.rlisten["sends"]:
if tr != None:
for send in self.rlisten["sends"][tr]:
if send != None:
cb = self.rlisten["sends"][tr][send]
if send.value_has_listener(cb) == 1:
send.remove_value_listener(cb)
for tr in self.rlisten["name"]:
if tr != None:
cb = self.rlisten["name"][tr]
if tr.name_has_listener(cb) == 1:
tr.remove_name_listener(cb)
self.mlisten = { "solo": {}, "mute": {}, "arm": {}, "panning": {}, "volume": {}, "sends": {}, "name": {}, "oml": {}, "omr": {} }
self.rlisten = { "solo": {}, "mute": {}, "panning": {}, "volume": {}, "sends": {}, "name": {} }
self.masterlisten = { "panning": {}, "volume": {}, "crossfader": {} }
def add_mixer_listeners(self):
self.rem_mixer_listeners()
# Master Track
tr = self.song().master_track
for type in ("volume", "panning", "crossfader"):
self.add_master_listener(0, type, tr)
self.add_meter_listener(0, tr, 2)
# Normal Tracks
tracks = self.song().visible_tracks
for track in range(len(tracks)):
tr = tracks[track]
self.add_trname_listener(track, tr, 0)
if tr.has_audio_output:
self.add_meter_listener(track, tr)
for type in ("arm", "solo", "mute"):
if type == "arm":
if tr.can_be_armed == 1:
self.add_mixert_listener(track, type, tr)
else:
self.add_mixert_listener(track, type, tr)
for type in ("volume", "panning"):
self.add_mixerv_listener(track, type, tr)
for sid in range(len(tr.mixer_device.sends)):
self.add_send_listener(track, tr, sid, tr.mixer_device.sends[sid])
# Return Tracks
tracks = self.song().return_tracks
for track in range(len(tracks)):
tr = tracks[track]
self.add_trname_listener(track, tr, 1)
self.add_meter_listener(track, tr, 1)
for type in ("solo", "mute"):
self.add_retmixert_listener(track, type, tr)
for type in ("volume", "panning"):
self.add_retmixerv_listener(track, type, tr)
for sid in range(len(tr.mixer_device.sends)):
self.add_retsend_listener(track, tr, sid, tr.mixer_device.sends[sid])
# Add track listeners
def add_send_listener(self, tid, track, sid, send):
if self.mlisten["sends"].has_key(track) != 1:
self.mlisten["sends"][track] = {}
if self.mlisten["sends"][track].has_key(send) != 1:
cb = lambda :self.send_changestate(tid, track, sid, send)
self.mlisten["sends"][track][send] = cb
send.add_value_listener(cb)
def add_mixert_listener(self, tid, type, track):
if self.mlisten[type].has_key(track) != 1:
cb = lambda :self.mixert_changestate(type, tid, track)
self.mlisten[type][track] = cb
eval("track.add_" + type + "_listener(cb)")
def add_mixerv_listener(self, tid, type, track):
if self.mlisten[type].has_key(track) != 1:
cb = lambda :self.mixerv_changestate(type, tid, track)
self.mlisten[type][track] = cb
eval("track.mixer_device." + type + ".add_value_listener(cb)")
# Add master listeners
def add_master_listener(self, tid, type, track):
if self.masterlisten[type].has_key(track) != 1:
cb = lambda :self.mixerv_changestate(type, tid, track, 2)
self.masterlisten[type][track] = cb
eval("track.mixer_device." + type + ".add_value_listener(cb)")
# Add return listeners
def add_retsend_listener(self, tid, track, sid, send):
if self.rlisten["sends"].has_key(track) != 1:
self.rlisten["sends"][track] = {}
if self.rlisten["sends"][track].has_key(send) != 1:
cb = lambda :self.send_changestate(tid, track, sid, send, 1)
self.rlisten["sends"][track][send] = cb
send.add_value_listener(cb)
def add_retmixert_listener(self, tid, type, track):
if self.rlisten[type].has_key(track) != 1:
cb = lambda :self.mixert_changestate(type, tid, track, 1)
self.rlisten[type][track] = cb
eval("track.add_" + type + "_listener(cb)")
def add_retmixerv_listener(self, tid, type, track):
if self.rlisten[type].has_key(track) != 1:
cb = lambda :self.mixerv_changestate(type, tid, track, 1)
self.rlisten[type][track] = cb
eval("track.mixer_device." + type + ".add_value_listener(cb)")
# Track name listener
def add_trname_listener(self, tid, track, ret = 0):
cb = lambda :self.trname_changestate(tid, track, ret)
if ret == 1:
if self.rlisten["name"].has_key(track) != 1:
self.rlisten["name"][track] = cb
else:
if self.mlisten["name"].has_key(track) != 1:
self.mlisten["name"][track] = cb
track.add_name_listener(cb)
# Output Meter Listeners
def add_meter_listener(self, tid, track, r = 0):
cb = lambda :self.meter_changestate(tid, track, 0, r)
if self.mlisten["oml"].has_key(track) != 1:
self.mlisten["oml"][track] = cb
track.add_output_meter_left_listener(cb)
cb = lambda :self.meter_changestate(tid, track, 1, r)
if self.mlisten["omr"].has_key(track) != 1:
self.mlisten["omr"][track] = cb
track.add_output_meter_right_listener(cb)
######################################################################
# Listener Callbacks
# Clip Callbacks
def clip_warping(self, clip, tid, cid):
self.oscEndpoint.send('/live/clip/warping', (tid, cid, int(clip.warping)))
def clip_looping(self, clip, tid, cid):
self.oscEndpoint.send('/live/clip/loopstate', (tid, cid, int(clip.looping)))
def clip_name(self, clip, tid, cid):
self.oscEndpoint.send('/live/name/clip', (tid, cid, str(clip.name), clip.color))
def clip_position(self, clip, tid, cid):
send = self._send_pos.has_key(tid) and self._send_pos[tid] or 0
if self.check_md(1) or (self.check_md(5) and send):
if clip.is_playing:
if send > 0:
self._send_pos[tid] -= 1
self.oscEndpoint.send('/live/clip/position', (tid, cid, clip.playing_position, clip.length, clip.loop_start, clip.loop_end))
def slot_changestate(self, slot, tid, cid):
tmptrack = LiveUtils.getTrack(tid)
armed = tmptrack.arm and 1 or 0
# Added new clip
if slot.clip != None:
self.add_cliplistener(slot.clip, tid, cid)
playing = 1
if slot.clip.is_playing == 1:
playing = 2
if slot.clip.is_triggered == 1:
playing = 3
length = slot.clip.loop_end - slot.clip.loop_start
self.oscEndpoint.send('/live/track/info', (tid, armed, cid, playing, length))
self.oscEndpoint.send('/live/name/clip', (tid, cid, str(slot.clip.name), slot.clip.color))
else:
if self.clisten.has_key(slot.clip) == 1:
slot.clip.remove_playing_status_listener(self.clisten[slot.clip])
if self.pplisten.has_key(slot.clip) == 1:
slot.clip.remove_playing_position_listener(self.pplisten[slot.clip])
if self.cnlisten.has_key(slot.clip) == 1:
slot.clip.remove_name_listener(self.cnlisten[slot.clip])
if self.cclisten.has_key(slot.clip) == 1:
slot.clip.remove_color_listener(self.cclisten[slot.clip])
self.oscEndpoint.send('/live/track/info', (tid, armed, cid, 0, 0.0))
self.oscEndpoint.send('/live/clip/info', (tid, cid, 0))
#log("Slot changed" + str(self.clips[tid][cid]))
def clip_changestate(self, clip, x, y):
log("Listener: x: " + str(x) + " y: " + str(y));
playing = 1
if clip.is_playing == 1:
playing = 2
if clip.is_triggered == 1:
playing = 3
self.oscEndpoint.send('/live/clip/info', (x, y, playing))
self._send_pos[x] = 3
#log("Clip changed x:" + str(x) + " y:" + str(y) + " status:" + str(playing))
# Mixer Callbacks
def mixerv_changestate(self, type, tid, track, r = 0):
val = eval("track.mixer_device." + type + ".value")
types = { "panning": "pan", "volume": "volume", "crossfader": "crossfader" }
if r == 2:
self.oscEndpoint.send('/live/master/' + types[type], (float(val)))
elif r == 1:
self.oscEndpoint.send('/live/return/' + types[type], (tid, float(val)))
else:
self.oscEndpoint.send('/live/' + types[type], (tid, float(val)))
def mixert_changestate(self, type, tid, track, r = 0):
val = eval("track." + type)
if r == 1:
self.oscEndpoint.send('/live/return/' + type, (tid, int(val)))
else:
self.oscEndpoint.send('/live/' + type, (tid, int(val)))
def send_changestate(self, tid, track, sid, send, r = 0):
val = send.value
if r == 1:
self.oscEndpoint.send('/live/return/send', (tid, sid, float(val)))
else:
self.oscEndpoint.send('/live/send', (tid, sid, float(val)))
# Track name changestate
def trname_changestate(self, tid, track, r = 0):
if r == 1:
self.oscEndpoint.send('/live/name/return', (tid, str(track.name)))
else:
# self.oscEndpoint.send('/live/name/track', (tid, str(track.name), track.color))
self.oscEndpoint.send("/live/name/track", (tid, str(track.name), track.color, int(track.has_midi_input)))
self.trBlock(0, len(LiveUtils.getTracks()))
# Meter Changestate
def meter_changestate(self, tid, track, lr, r = 0):
if r == 2:
if self.check_md(2):
if lr == 0:
self.oscEndpoint.send('/live/master/meter', (0, float(track.output_meter_left)))
else:
self.oscEndpoint.send('/live/master/meter', (1, float(track.output_meter_right)))
elif r == 1:
if self.check_md(3):
if lr == 0:
self.oscEndpoint.send('/live/return/meter', (tid, 0, float(track.output_meter_left)))
else:
self.oscEndpoint.send('/live/return/meter', (tid, 1, float(track.output_meter_right)))
else:
if self.check_md(4):
if lr == 0:
self.oscEndpoint.send('/live/track/meter', (tid, 0, float(track.output_meter_left)))
else:
self.oscEndpoint.send('/live/track/meter', (tid, 1, float(track.output_meter_right)))
def check_md(self, param):
devices = self.song().master_track.devices
if len(devices) > 0:
if devices[0].parameters[param].value > 0:
return 1
else:
return 0
else:
return 0
# Device Listeners
def add_device_listeners(self):
self.rem_device_listeners()
self.do_add_device_listeners(self.song().tracks,0)
self.do_add_device_listeners(self.song().return_tracks,1)
self.do_add_device_listeners([self.song().master_track],2)
def do_add_device_listeners(self, tracks, type):
for i in range(len(tracks)):
self.add_devicelistener(tracks[i], i, type)
if len(tracks[i].devices) >= 1:
for j in range(len(tracks[i].devices)):
self.add_devpmlistener(tracks[i].devices[j])
if len(tracks[i].devices[j].parameters) >= 1:
for k in range (len(tracks[i].devices[j].parameters)):
par = tracks[i].devices[j].parameters[k]
self.add_paramlistener(par, i, j, k, type)
def rem_device_listeners(self):
for pr in self.prlisten:
ocb = self.prlisten[pr]
if pr != None:
if pr.value_has_listener(ocb) == 1:
pr.remove_value_listener(ocb)
self.prlisten = {}
for tr in self.dlisten:
ocb = self.dlisten[tr]
if tr != None:
if tr.view.selected_device_has_listener(ocb) == 1:
tr.view.remove_selected_device_listener(ocb)
self.dlisten = {}
for de in self.plisten:
ocb = self.plisten[de]
if de != None:
if de.parameters_has_listener(ocb) == 1:
de.remove_parameters_listener(ocb)
self.plisten = {}
def add_devpmlistener(self, device):
cb = lambda :self.devpm_change()
if self.plisten.has_key(device) != 1:
device.add_parameters_listener(cb)
self.plisten[device] = cb
def devpm_change(self):
self.refresh_state()
def add_paramlistener(self, param, tid, did, pid, type):
cb = lambda :self.param_changestate(param, tid, did, pid, type)
if self.prlisten.has_key(param) != 1:
param.add_value_listener(cb)
self.prlisten[param] = cb
def param_changestate(self, param, tid, did, pid, type):
if type == 2:
self.oscEndpoint.send('/live/master/device/param', (did, pid, param.value, str(param.name), param.min, param.max))
elif type == 1:
self.oscEndpoint.send('/live/return/device/param', (tid, did, pid, param.value, str(param.name), param.min, param.max))
else:
self.oscEndpoint.send('/live/device/param', (tid, did, pid, param.value, str(param.name), param.min, param.max ))
def add_devicelistener(self, track, tid, type):
cb = lambda :self.device_changestate(track, tid, type)
if self.dlisten.has_key(track) != 1:
track.view.add_selected_device_listener(cb)
self.dlisten[track] = cb
def device_changestate(self, track, tid, type):
did = self.tuple_idx(track.devices, track.view.selected_device)
if type == 2:
self.oscEndpoint.send('/live/master/devices/selected', (did))
elif type == 1:
self.oscEndpoint.send('/live/return/device/selected', (tid, did))
else:
self.oscEndpoint.send('/live/device/selected', (tid, did))
def tuple_idx(self, tuple, obj):
for i in xrange(0,len(tuple)):
if (tuple[i] == obj):
return i
| avroshk/VRDAW | VRDAW_working/LiveOSC.py | Python | gpl-3.0 | 37,220 |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PCQM4M-LSC datasets."""
import functools
import pickle
from typing import Dict, List, Tuple, Union
import numpy as np
from ogb import lsc
NUM_VALID_SAMPLES = 380_670
NUM_TEST_SAMPLES = 377_423
NORMALIZE_TARGET_MEAN = 5.690944545356371
NORMALIZE_TARGET_STD = 1.1561347795107815
def load_splits() -> Dict[str, List[int]]:
"""Loads dataset splits."""
dataset = _get_pcq_dataset(only_smiles=True)
return dataset.get_idx_split()
def load_kth_fold_indices(data_root: str, k_fold_split_id: int) -> List[int]:
"""Loads k-th fold indices."""
fname = f"{data_root}/k_fold_splits/{k_fold_split_id}.pkl"
return list(map(int, _load_pickle(fname)))
def load_all_except_kth_fold_indices(data_root: str, k_fold_split_id: int,
num_k_fold_splits: int) -> List[int]:
"""Loads indices except for the kth fold."""
if k_fold_split_id is None:
raise ValueError("Expected integer value for `k_fold_split_id`.")
indices = []
for index in range(num_k_fold_splits):
if index != k_fold_split_id:
indices += load_kth_fold_indices(data_root, index)
return indices
def load_smile_strings(
with_labels=False) -> List[Union[str, Tuple[str, np.ndarray]]]:
"""Loads the smile strings in the PCQ dataset."""
dataset = _get_pcq_dataset(only_smiles=True)
smiles = []
for i in range(len(dataset)):
smile, label = dataset[i]
if with_labels:
smiles.append((smile, label))
else:
smiles.append(smile)
return smiles
@functools.lru_cache()
def load_cached_conformers(cached_fname: str) -> Dict[str, np.ndarray]:
"""Returns cached dict mapping smile strings to conformer features."""
return _load_pickle(cached_fname)
@functools.lru_cache()
def _get_pcq_dataset(only_smiles: bool):
return lsc.PCQM4MDataset(only_smiles=only_smiles)
def _load_pickle(fname: str):
with open(fname, "rb") as f:
return pickle.load(f)
| deepmind/deepmind-research | ogb_lsc/pcq/datasets.py | Python | apache-2.0 | 2,504 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2017 F5 Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: bigip_configsync_actions
short_description: Perform different actions related to config-sync.
description:
- Allows one to run different config-sync actions. These actions allow
you to manually sync your configuration across multiple BIG-IPs when
those devices are in an HA pair.
version_added: "2.4"
options:
device_group:
description:
- The device group that you want to perform config-sync actions on.
required: True
sync_device_to_group:
description:
- Specifies that the system synchronizes configuration data from this
device to other members of the device group. In this case, the device
will do a "push" to all the other devices in the group. This option
is mutually exclusive with the C(sync_group_to_device) option.
choices:
- yes
- no
sync_most_recent_to_device:
description:
- Specifies that the system synchronizes configuration data from the
device with the most recent configuration. In this case, the device
will do a "pull" from the most recently updated device. This option
is mutually exclusive with the C(sync_device_to_group) options.
choices:
- yes
- no
overwrite_config:
description:
- Indicates that the sync operation overwrites the configuration on
the target.
default: no
choices:
- yes
- no
notes:
- Requires the f5-sdk Python package on the host. This is as easy as pip
install f5-sdk.
- Requires the objectpath Python package on the host. This is as easy as pip
install objectpath.
requirements:
- f5-sdk >= 2.2.3
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = '''
- name: Sync configuration from device to group
bigip_configsync_actions:
device_group: "foo-group"
sync_device_to_group: yes
server: "lb01.mydomain.com"
user: "admin"
password: "secret"
validate_certs: no
delegate_to: localhost
- name: Sync configuration from most recent device to the current host
bigip_configsync_actions:
device_group: "foo-group"
sync_most_recent_to_device: yes
server: "lb01.mydomain.com"
user: "admin"
password: "secret"
validate_certs: no
delegate_to: localhost
- name: Perform an initial sync of a device to a new device group
bigip_configsync_actions:
device_group: "new-device-group"
sync_device_to_group: yes
server: "lb01.mydomain.com"
user: "admin"
password: "secret"
validate_certs: no
delegate_to: localhost
'''
RETURN = '''
# only common fields returned
'''
import time
import re
try:
from objectpath import Tree
HAS_OBJPATH = True
except ImportError:
HAS_OBJPATH = False
from ansible.module_utils.basic import BOOLEANS_TRUE
from ansible.module_utils.f5_utils import (
AnsibleF5Client,
AnsibleF5Parameters,
HAS_F5SDK,
F5ModuleError,
iControlUnexpectedHTTPError
)
class Parameters(AnsibleF5Parameters):
api_attributes = []
returnables = []
@property
def direction(self):
if self.sync_device_to_group:
return 'to-group'
else:
return 'from-group'
@property
def sync_device_to_group(self):
result = self._cast_to_bool(self._values['sync_device_to_group'])
return result
@property
def sync_group_to_device(self):
result = self._cast_to_bool(self._values['sync_group_to_device'])
return result
@property
def force_full_push(self):
if self.overwrite_config:
return 'force-full-load-push'
else:
return ''
@property
def overwrite_config(self):
result = self._cast_to_bool(self._values['overwrite_config'])
return result
def _cast_to_bool(self, value):
if value is None:
return None
elif value in BOOLEANS_TRUE:
return True
else:
return False
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
def api_params(self):
result = {}
for api_attribute in self.api_attributes:
if self.api_map is not None and api_attribute in self.api_map:
result[api_attribute] = getattr(self, self.api_map[api_attribute])
else:
result[api_attribute] = getattr(self, api_attribute)
result = self._filter_params(result)
return result
class ModuleManager(object):
def __init__(self, client):
self.client = client
self.want = Parameters(self.client.module.params)
def exec_module(self):
result = dict()
try:
changed = self.present()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
result.update(dict(changed=changed))
return result
def present(self):
if not self._device_group_exists():
raise F5ModuleError(
"The specified 'device_group' not not exist."
)
if self._sync_to_group_required():
raise F5ModuleError(
"This device group needs an initial sync. Please use "
"'sync_device_to_group'"
)
if self.exists():
return False
else:
return self.execute()
def _sync_to_group_required(self):
resource = self.read_current_from_device()
status = self._get_status_from_resource(resource)
if status == 'Awaiting Initial Sync' and self.want.sync_group_to_device:
return True
return False
def _device_group_exists(self):
result = self.client.api.tm.cm.device_groups.device_group.exists(
name=self.want.device_group
)
return result
def execute(self):
self.execute_on_device()
self._wait_for_sync()
return True
def exists(self):
resource = self.read_current_from_device()
status = self._get_status_from_resource(resource)
if status == 'In Sync':
return True
else:
return False
def execute_on_device(self):
sync_cmd = 'config-sync {0} {1} {2}'.format(
self.want.direction,
self.want.device_group,
self.want.force_full_push
)
self.client.api.tm.cm.exec_cmd(
'run',
utilCmdArgs=sync_cmd
)
def _wait_for_sync(self):
# Wait no more than half an hour
resource = self.read_current_from_device()
for x in range(1, 180):
time.sleep(3)
status = self._get_status_from_resource(resource)
# Changes Pending:
# The existing device has changes made to it that
# need to be sync'd to the group.
#
# Awaiting Initial Sync:
# This is a new device group and has not had any sync
# done yet. You _must_ `sync_device_to_group` in this
# case.
#
# Not All Devices Synced:
# A device group will go into this state immediately
# after starting the sync and stay until all devices finish.
#
if status in ['Changes Pending']:
details = self._get_details_from_resource(resource)
self._validate_pending_status(details)
pass
elif status in ['Awaiting Initial Sync', 'Not All Devices Synced']:
pass
elif status == 'In Sync':
return
else:
raise F5ModuleError(status)
def read_current_from_device(self):
result = self.client.api.tm.cm.sync_status.load()
return result
def _get_status_from_resource(self, resource):
resource.refresh()
entries = resource.entries.copy()
k, v = entries.popitem()
status = v['nestedStats']['entries']['status']['description']
return status
def _get_details_from_resource(self, resource):
resource.refresh()
stats = resource.entries.copy()
tree = Tree(stats)
details = list(tree.execute('$..*["details"]["description"]'))
result = details[::-1]
return result
def _validate_pending_status(self, details):
"""Validate the content of a pending sync operation
This is a hack. The REST API is not consistent with its 'status' values
so this method is here to check the returned strings from the operation
and see if it reported any of these inconsistencies.
:param details:
:raises F5ModuleError:
"""
pattern1 = r'.*(?P<msg>Recommended\s+action.*)'
for detail in details:
matches = re.search(pattern1, detail)
if matches:
raise F5ModuleError(matches.group('msg'))
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
self.argument_spec = dict(
sync_device_to_group=dict(
type='bool'
),
sync_most_recent_to_device=dict(
type='bool'
),
overwrite_config=dict(
type='bool',
default='no'
),
device_group=dict(
required=True
)
)
self.f5_product_name = 'bigip'
self.required_one_of = [
['sync_device_to_group', 'sync_most_recent_to_device']
]
self.mutually_exclusive = [
['sync_device_to_group', 'sync_most_recent_to_device']
]
self.required_one_of = [
['sync_device_to_group', 'sync_most_recent_to_device']
]
def main():
if not HAS_F5SDK:
raise F5ModuleError("The python f5-sdk module is required")
if not HAS_OBJPATH:
raise F5ModuleError("The python objectpath module is required")
spec = ArgumentSpec()
client = AnsibleF5Client(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
mutually_exclusive=spec.mutually_exclusive,
required_one_of=spec.required_one_of,
f5_product_name=spec.f5_product_name
)
try:
mm = ModuleManager(client)
results = mm.exec_module()
client.module.exit_json(**results)
except F5ModuleError as e:
client.module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| e-gob/plataforma-kioscos-autoatencion | scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/modules/network/f5/bigip_configsync_actions.py | Python | bsd-3-clause | 11,670 |
#!/home/thumbimigwe/Documents/aGithubThosh/magame/myvenv/bin/python3
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| Thoshh/magame | myvenv/bin/django-admin.py | Python | mit | 175 |
from . import nccof
from ..dataset.arff import ArffFile
import numpy as np
import copy
from constraint import *
class NCCLR(object):
"""
NCCLR implements the naive credal classification method using the IDM for
Label ranking problem with label-wise decomposition.
"""
def __init__(self):
self.nb_clazz = 0
self.set_nccof = dict()
self.clazz = []
self.ranking_utility = None
@staticmethod
def __create_rank_utility_matrix(size):
utility = np.zeros([size, size])
for i in range(0, size):
for j in range(i, size):
utility[i, j] = size + i - j - 1
utility[j, i] = size + i - j - 1
return utility
def learn(self, learn_data_set):
try:
classes = learn_data_set.attribute_data['L'][:]
self.nb_clazz = len(classes)
self.clazz = classes
self.ranking_utility = NCCLR.__create_rank_utility_matrix(self.nb_clazz)
rankings = [str(i + 1) for i in range(self.nb_clazz)]
for class_value in classes:
# print("Building ranking classifier %s" % class_value)
model = nccof.NCCOF()
datarep = ArffFile()
datarep.attribute_data = learn_data_set.attribute_data.copy()
datarep.attribute_types = learn_data_set.attribute_types.copy()
datarep.data = copy.deepcopy(learn_data_set.data)
datarep.relation = learn_data_set.relation
datarep.attributes = copy.copy(learn_data_set.attributes)
datarep.comment = copy.copy(learn_data_set.comment)
datarep.define_attribute(name="class", atype="nominal", data=rankings)
for number, instance in enumerate(datarep.data):
label_ranking = instance[-1].split(">")
if len(label_ranking) == 0 or len(label_ranking) < self.nb_clazz:
raise Exception("Error: Number labels for ranking is not correct in sample " + str(number))
instance.append(str(label_ranking.index(class_value) + 1))
datarep.remove_col('L')
model.learn(datarep)
self.set_nccof[class_value] = model
except KeyError:
raise Exception("Error: The name of ranking attribute should be called 'L'.")
def evaluate(self, test_data_set, ncc_s_param=2):
answers = []
for item in test_data_set:
ans_lw_ranking = dict()
for clazz in self.clazz:
ans_lw_ranking[clazz] = \
self.set_nccof[clazz].evaluate([item], ncc_epsilon=0.001, ncc_s_param=ncc_s_param)[0]
answers.append(ans_lw_ranking)
return answers
def inference_CSP(self, evaluates):
solutions = []
for evaluate in evaluates:
problem = Problem()
for clazz, classifier in evaluate.items():
maxDecision = classifier.getmaximaldecision(self.ranking_utility)
problem.addVariable(clazz, np.where(maxDecision > 0)[0])
problem.addConstraint(AllDifferentConstraint())
solutionsCSP = problem.getSolutions()
solutions.append(None if len(solutionsCSP) == 0 else solutionsCSP)
return solutions
| sdestercke/classifip | classifip/models/ncclr.py | Python | gpl-2.0 | 3,368 |
# -*- coding: utf-8 -*-
from shop.views.product import ProductDetailView
from shop.util.cart import get_or_create_cart
from shop_optiongroups.views import ProductOptionGroupsViewMixin
from shop_textoptions.views import ProductTextOptionsViewMixin
from models import DiaryProduct, CalendarProduct
class DiaryDetailView(ProductOptionGroupsViewMixin, \
ProductTextOptionsViewMixin, ProductDetailView):
"""
This view handles displaying the detail view for test product Diary
"""
model = DiaryProduct
def post(self, *args, **kwargs):
super(DiaryDetailView, self).post(*args, **kwargs)
if self.request.POST['product_action'] == 'add_to_cart':
self.add_to_cart()
def add_to_cart(self):
variation = self.get_variation()
product_quantity = self.request.POST.get('add_item_quantity')
if not product_quantity:
product_quantity = 1
product = self.get_object()
cart = get_or_create_cart(self.request)
cart.add_product(product, product_quantity, variation)
cart.save()
class CalendarDetailView(ProductOptionGroupsViewMixin, ProductDetailView):
"""
This view handles displaying the detail view for test product Diary
"""
model = CalendarProduct
def post(self, *args, **kwargs):
super(CalendarDetailView, self).post(*args, **kwargs)
if self.request.POST['product_action'] == 'add_to_cart':
self.add_to_cart()
def add_to_cart(self):
variation = self.get_variation()
product_quantity = self.request.POST.get('add_item_quantity')
if not product_quantity:
product_quantity = 1
product = self.get_object()
cart = get_or_create_cart(self.request)
cart.add_product(product, product_quantity, variation)
cart.save()
| jrief/django-shop-productvariations | tests/testapp/project/views.py | Python | bsd-3-clause | 1,863 |
# -*- coding: utf-8 -*-
# © <YEAR(S)> <AUTHOR(S)>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp import _, api, fields, models
class TypeAdjustment(models.Model):
_name = 'type.adjustment'
_description = 'Type of adjustment'
_order = 'consecutive asc'
_rec_name = 'description'
@api.model
def _default_consecutive(self):
last_id = 0
get_count = self.search_count([(1, '=', 1)])
if get_count:
last_id = get_count + 1
else:
last_id = 1
consecutive = str(last_id).rjust(5, '0')
return consecutive
consecutive = fields.Char(
string=_(u'Key'), size=5,
default=_default_consecutive,
help=_(u'Key type of adjustment'),
)
description = fields.Char(
string=_(u'Description'), size=150,
help=_(u'Description type of adjustment')
)
type_adjustment = fields.Selection(
[('input', _(u'Input')),
('output', _(u'Output'))],
string=_(u"Type of adjustment"),
)
type_calculation = fields.Selection(
[('none', _(u'None')),
('extra_outputs', _(u'Extra outputs')),
('net_changes', _(u'Net changes')),
('extra_inputs', _(u'Extra inputs'))],
string=_(u"Type of calculation"),
)
account_id = fields.Many2one(
'account.account', string=_(u'Account'),
)
company_id = fields.Many2one(
'res.company', string=_(u'Company'),
)
active = fields.Boolean(
default=True,
help="Set active to false to hide the tax without removing it.")
_sql_constraints = [
('_check_consecutive_uniq', 'unique (consecutive)',
_(u'This field must be unique!'))
]
| Gebesa-Dev/Addons-gebesa | stock_picking_type/models/type_adjustment.py | Python | agpl-3.0 | 1,769 |
#!/usr/bin/env python
# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Error codes for JavaScript style checker."""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)')
def ByName(name):
"""Get the error code for the given error name.
Args:
name: The name of the error
Returns:
The error code
"""
return globals()[name]
# "File-fatal" errors - these errors stop further parsing of a single file
FILE_NOT_FOUND = -1
FILE_DOES_NOT_PARSE = -2
# Spacing
EXTRA_SPACE = 1
MISSING_SPACE = 2
EXTRA_LINE = 3
MISSING_LINE = 4
ILLEGAL_TAB = 5
WRONG_INDENTATION = 6
WRONG_BLANK_LINE_COUNT = 7
# Semicolons
MISSING_SEMICOLON = 10
MISSING_SEMICOLON_AFTER_FUNCTION = 11
ILLEGAL_SEMICOLON_AFTER_FUNCTION = 12
REDUNDANT_SEMICOLON = 13
# Miscellaneous
ILLEGAL_PROTOTYPE_MEMBER_VALUE = 100
LINE_TOO_LONG = 110
LINE_STARTS_WITH_OPERATOR = 120
COMMA_AT_END_OF_LITERAL = 121
MULTI_LINE_STRING = 130
UNNECESSARY_DOUBLE_QUOTED_STRING = 131
UNUSED_PRIVATE_MEMBER = 132
UNUSED_LOCAL_VARIABLE = 133
# Requires, provides
GOOG_REQUIRES_NOT_ALPHABETIZED = 140
GOOG_PROVIDES_NOT_ALPHABETIZED = 141
MISSING_GOOG_REQUIRE = 142
MISSING_GOOG_PROVIDE = 143
EXTRA_GOOG_REQUIRE = 144
EXTRA_GOOG_PROVIDE = 145
# JsDoc
INVALID_JSDOC_TAG = 200
INVALID_USE_OF_DESC_TAG = 201
NO_BUG_NUMBER_AFTER_BUG_TAG = 202
MISSING_PARAMETER_DOCUMENTATION = 210
EXTRA_PARAMETER_DOCUMENTATION = 211
WRONG_PARAMETER_DOCUMENTATION = 212
MISSING_JSDOC_TAG_TYPE = 213
MISSING_JSDOC_TAG_DESCRIPTION = 214
MISSING_JSDOC_PARAM_NAME = 215
OUT_OF_ORDER_JSDOC_TAG_TYPE = 216
MISSING_RETURN_DOCUMENTATION = 217
UNNECESSARY_RETURN_DOCUMENTATION = 218
MISSING_BRACES_AROUND_TYPE = 219
MISSING_MEMBER_DOCUMENTATION = 220
MISSING_PRIVATE = 221
EXTRA_PRIVATE = 222
INVALID_OVERRIDE_PRIVATE = 223
INVALID_INHERIT_DOC_PRIVATE = 224
MISSING_JSDOC_TAG_THIS = 225
UNNECESSARY_BRACES_AROUND_INHERIT_DOC = 226
INVALID_AUTHOR_TAG_DESCRIPTION = 227
JSDOC_PREFER_QUESTION_TO_PIPE_NULL = 230
JSDOC_ILLEGAL_QUESTION_WITH_PIPE = 231
JSDOC_MISSING_OPTIONAL_TYPE = 232
JSDOC_MISSING_OPTIONAL_PREFIX = 233
JSDOC_MISSING_VAR_ARGS_TYPE = 234
JSDOC_MISSING_VAR_ARGS_NAME = 235
# TODO(robbyw): Split this in to more specific syntax problems.
INCORRECT_SUPPRESS_SYNTAX = 250
INVALID_SUPPRESS_TYPE = 251
UNNECESSARY_SUPPRESS = 252
# File ending
FILE_MISSING_NEWLINE = 300
FILE_IN_BLOCK = 301
# Interfaces
INTERFACE_CONSTRUCTOR_CANNOT_HAVE_PARAMS = 400
INTERFACE_METHOD_CANNOT_HAVE_CODE = 401
# Comments
MISSING_END_OF_SCOPE_COMMENT = 500
MALFORMED_END_OF_SCOPE_COMMENT = 501
# goog.scope - Namespace aliasing
# TODO(nnaze) Add additional errors here and in aliaspass.py
INVALID_USE_OF_GOOG_SCOPE = 600
EXTRA_GOOG_SCOPE_USAGE = 601
# Added by Shaka Player devs
MISMATCHED_PROPERTY_DOCUMENTATION = 900
# ActionScript specific errors:
# TODO(user): move these errors to their own file and move all JavaScript
# specific errors to their own file as well.
# All ActionScript specific errors should have error number at least 1000.
FUNCTION_MISSING_RETURN_TYPE = 1132
PARAMETER_MISSING_TYPE = 1133
VAR_MISSING_TYPE = 1134
PARAMETER_MISSING_DEFAULT_VALUE = 1135
IMPORTS_NOT_ALPHABETIZED = 1140
IMPORT_CONTAINS_WILDCARD = 1141
UNUSED_IMPORT = 1142
INVALID_TRACE_SEVERITY_LEVEL = 1250
MISSING_TRACE_SEVERITY_LEVEL = 1251
MISSING_TRACE_MESSAGE = 1252
REMOVE_TRACE_BEFORE_SUBMIT = 1253
REMOVE_COMMENT_BEFORE_SUBMIT = 1254
# End of list of ActionScript specific errors.
NEW_ERRORS = frozenset([
# Errors added after 2.0.2:
WRONG_INDENTATION,
MISSING_SEMICOLON,
# Errors added after 2.3.9:
JSDOC_MISSING_VAR_ARGS_TYPE,
JSDOC_MISSING_VAR_ARGS_NAME,
# Errors added after 2.3.13:
])
| russitto/shaka-player | third_party/gjslint/closure_linter-2.3.13/closure_linter/errors.py | Python | apache-2.0 | 4,254 |
from django.test import TestCase
from eksiapp.entry_creation import create_entry_with_title
# Create your tests here.
from .models import Title, Entry
# class TestEntryCreation(TestCase):
#
# def test_entry_creation(self):
# create_entry_with_title('Deneme', 'Denemenin tanimi')
# create_entry_with_title('Deneme', 'Denemenin tanimi')
#
# assert Title.objects.filter(title_text='Deneme').exists()
# assert Entry.objects.filter(title__title_text='Deneme').count() == 2
| hanakamer/eskisozluk-clone | App/eksi/eksiapp/tests.py | Python | gpl-2.0 | 505 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import jsonfield.fields
import mptt.fields
import django.db.models.deletion
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('auth', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Group',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255)),
('codename', models.SlugField(max_length=255, blank=True)),
('description', models.TextField(default=b'', blank=True)),
('comment', models.TextField(default=b'', blank=True)),
('full_name', models.CharField(default=b'', max_length=255, blank=True)),
('properties', jsonfield.fields.JSONField(default={}, blank=True)),
('django_auth_sync', models.BooleanField(default=True)),
('lft', models.PositiveIntegerField(editable=False, db_index=True)),
('rght', models.PositiveIntegerField(editable=False, db_index=True)),
('tree_id', models.PositiveIntegerField(editable=False, db_index=True)),
('level', models.PositiveIntegerField(editable=False, db_index=True)),
('django_group', models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, blank=True, to='auth.Group', null=True)),
],
options={
'ordering': ('name',),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='GroupEntity',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('label', models.CharField(max_length=255)),
('codename', models.SlugField(unique=True, max_length=255, blank=True)),
],
options={
'ordering': ('label',),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='GroupMember',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('group', models.ForeignKey(related_name='group_membership', to='groups_manager.Group', on_delete=models.CASCADE)),
],
options={
'ordering': ('group', 'member'),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='GroupMemberRole',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('label', models.CharField(max_length=255)),
('codename', models.SlugField(unique=True, max_length=255, blank=True)),
],
options={
'ordering': ('label',),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='GroupType',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('label', models.CharField(max_length=255)),
('codename', models.SlugField(unique=True, max_length=255, blank=True)),
],
options={
'ordering': ('label',),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Member',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('first_name', models.CharField(max_length=255)),
('last_name', models.CharField(max_length=255)),
('username', models.CharField(default=b'', max_length=255, blank=True)),
('email', models.EmailField(default=b'', max_length=255, blank=True)),
('django_auth_sync', models.BooleanField(default=True)),
('django_user', models.ForeignKey(related_name='groups_manager_member', on_delete=django.db.models.deletion.SET_NULL, blank=True, to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'ordering': ('last_name', 'first_name'),
},
bases=(models.Model,),
),
migrations.AddField(
model_name='groupmember',
name='member',
field=models.ForeignKey(related_name='group_membership', to='groups_manager.Member', on_delete=models.CASCADE),
preserve_default=True,
),
migrations.AddField(
model_name='groupmember',
name='roles',
field=models.ManyToManyField(to='groups_manager.GroupMemberRole', null=True, blank=True),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='groupmember',
unique_together=set([('group', 'member')]),
),
migrations.AddField(
model_name='group',
name='group_entities',
field=models.ManyToManyField(related_name='groups', null=True, to='groups_manager.GroupEntity', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='group',
name='group_members',
field=models.ManyToManyField(related_name='groups', through='groups_manager.GroupMember', to='groups_manager.Member'),
preserve_default=True,
),
migrations.AddField(
model_name='group',
name='group_type',
field=models.ForeignKey(related_name='groups', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='groups_manager.GroupType', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='group',
name='parent',
field=mptt.fields.TreeForeignKey(related_name='subgroups', on_delete=models.CASCADE, blank=True, to='groups_manager.Group', null=True),
preserve_default=True,
),
]
| vittoriozamboni/django-groups-manager | groups_manager/migrations/0001_initial.py | Python | mit | 6,381 |
#!/usr/bin/env python3
# coding: utf-8
"""
scheduler service
dependencies: logbook, interphone
TODO:
* toutes les 5mn, aller grabber une image sur le net pour
- sera utilisé sur la page web d'accueil! :)
+ faire en sorte que scheduler puisse lire mon agenda et me rappeler les trucs pour lesquels j'ai mis un TAG spécifique genre "BASECAMP-1d", et du coup, il me rappelle le RV la veille au soir, à mon retour (ou par SMS, ou les deux). Ou bien "BASECAMP-3h" et il me rappelle le RV 3h avant... :)
+ mettre le calendrier des poubelles! :)
+ pour les annonces d'heure, ajouter des variantes marrantes, et aussi un commentaire... "et tout va bien ici..." ou bien "On a quelques soucis, ici, quand tu auras un moment... merci!". Scanner la présence BT par exemple pour varier en ajoutant les Alias (Nico, Natacha)...
+ conditionner les annonces vocales d'heure en mode cocoon exclusivement.
"""
import requests
import time
import schedule
import datetime
from random import randint
# BC_commons import
from threading import Timer
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import basecamp.tools as tools
# =======================================================
# helpers
# =======================================================
def alive_check():
# tools.log.info("*** performing alive check() ***")
t = Timer(tools.alive_frequency, alive_check)
t.start()
try:
requests.get(tools.alive_url, params={'service': tools.service_name, 'version': tools.service_version},
timeout=tools.alive_timeout)
except Exception as e:
tools.log.error(e.__str__())
tools.log.error("*** ERROR reaching alive_url on "+str(tools.alive_url)+" ***")
tools.notify("ERROR", "*** ERROR reaching alive_url on "+str(tools.alive_url)+" ***")
return
def one_of(m_list):
return m_list[randint(0, len(m_list)-1)]
# =======================================================
# time announce job
def update_holidays_flag():
# are we on holidays?
global holidays_flag
try:
r = requests.get(status_url, timeout=20)
except Exception as e:
tools.log.error(e.__str__())
tools.log.error("*** ERROR reaching status_url on "+str(status_url)+" ***")
tools.notify("ERROR", "*** ERROR reaching status_url on "+str(status_url)+" ***")
if r.json()[status_field] == status_value:
holidays_flag = True
else:
holidays_flag = False
# =======================================================
# time announce job
def job(h, m, normal_day, holiday_day):
# are we on holidays?
global holidays_flag
if holidays_flag:
day_config = holiday_day
else:
day_config = normal_day
now = datetime.datetime.today()
now = now.replace(hour=int(h), minute=int(m))
res = False
for _slice in day_config:
beg = now.replace(hour=int(_slice[0:2]), minute=int(_slice[3:5]))
end = now.replace(hour=int(_slice[6:8]), minute=int(_slice[9:11]))
if ((now >= beg) and (now < end)):
res = True
break
if res is True:
print()
# choose an announcement
m_announce = one_of(day_config[_slice])
if (m == "00"):
m_time = h+"h"
else:
m_time = h+"h"+m
tools.log.debug("announcing time: "+m_announce.format(m_time))
try:
requests.get(interphone_url, params={'service': tools.service_name, 'announce': m_announce.format(m_time)}, timeout=interphone_timeout)
except Exception as e:
tools.log.error(e.__str__())
tools.log.error("*** ERROR reaching interphone on "+str(interphone_url)+" ***")
tools.notify("ERROR", "*** ERROR reaching interphone on "+str(interphone_url)+" ***")
else:
print("(not inside any timeframe)")
return
# =======================================================
# main stuff
if __name__ == "__main__":
# initialize config/logs
tools.load_config()
tools.init_logs()
# .ini
startup_wait = tools.config.getint('startup', 'wait')
# also: getfloat, getint, getboolean
interphone_url = tools.config.get('interphone', 'interphone_url')
interphone_timeout = tools.config.getint('interphone', 'interphone_timeout')
status_url = tools.config.get('holidays_flag', 'status_url')
status_field = tools.config.get('holidays_flag', 'status_field')
status_value = tools.config.get('holidays_flag', 'status_value')
hello_world = eval(tools.config.get('announcements', 'hello_world'))
# announcements_time_week = eval(tools.config.get('announcements', 'announcements_time_week'))
# announcements_time_weekend = eval(tools.config.get('announcements', 'announcements_time_weekend'))
hour_marks = eval(tools.config.get('announcements', 'hour_marks'))
monday_work = eval(tools.config.get('announcements', 'monday_work'))
tuesday_work = eval(tools.config.get('announcements', 'tuesday_work'))
wednesday_work = eval(tools.config.get('announcements', 'wednesday_work'))
thursday_work = eval(tools.config.get('announcements', 'thursday_work'))
friday_work = eval(tools.config.get('announcements', 'friday_work'))
saturday = eval(tools.config.get('announcements', 'saturday'))
sunday = eval(tools.config.get('announcements', 'sunday'))
anyday_holiday = eval(tools.config.get('announcements', 'anyday_holiday'))
print(monday_work)
print(type(monday_work))
for key in monday_work:
print(key, type(monday_work[key]))
update_holidays_flag()
# startup sync & notification
tools.log.info("--= Restarting =--")
tools.log.info("sleeping {} seconds for startup sync between services...".format(startup_wait))
time.sleep(startup_wait)
tools.notify("WARNING", tools.service_version+" - (re)started!")
requests.get(interphone_url, params={'service': tools.service_name, 'announce': one_of(hello_world)}, timeout=interphone_timeout)
# run baby, run!
alive_check()
# scheduler init
hours = []
# udpate the holiday flag everyday @5am
schedule.every().day.at("05:00").do(update_holidays_flag)
# debug
# job("09", "45", monday_work, anyday_holiday)
# exit(0)
# during the week, announce between 6h-22h
for hour in range(6, 22):
hours.append('{0:01d}'.format(hour))
for hour in hours:
for mn in hour_marks:
#schedule.every().day.at(hour+":"+mn).do(job, hour, mn)
schedule.every().monday.at(hour+":"+mn).do(job, hour, mn, monday_work, anyday_holiday)
schedule.every().tuesday.at(hour+":"+mn).do(job, hour, mn, tuesday_work, anyday_holiday)
schedule.every().wednesday.at(hour+":"+mn).do(job, hour, mn, wednesday_work, anyday_holiday)
schedule.every().thursday.at(hour+":"+mn).do(job, hour, mn, thursday_work, anyday_holiday)
schedule.every().friday.at(hour+":"+mn).do(job, hour, mn, friday_work, anyday_holiday)
# during the weekend, announce between 9h-22h
hours = []
for hour in range(9, 22):
hours.append('{0:01d}'.format(hour))
for hour in hours:
for mn in hour_marks:
#schedule.every().day.at(hour+":"+mn).do(job, hour, mn)
schedule.every().saturday.at(hour+":"+mn).do(job, hour, mn, saturday, anyday_holiday)
schedule.every().sunday.at(hour+":"+mn).do(job, hour, mn, sunday, anyday_holiday)
"""
schedule.every(10).seconds.do(job)
schedule.every().hour.do(job)
schedule.every().day.at("10:30").do(job)
schedule.every().monday.do(job)
schedule.every().wednesday.at("13:15").do(job)
"""
# time.sleep(10)
# schedule.run_all(delay_seconds=2)
while True:
schedule.run_pending()
time.sleep(10)
| nio101/BASECAMP | source/scheduler/scheduler.py | Python | gpl-3.0 | 7,902 |
import codecs
import os
import pwd
import tempfile
import mockssh
import pytest
from containercluster import utils
def test_multiple_error_message():
err = utils.MultipleError(KeyError("foo"), ValueError("bar"))
assert str(err) == "'foo'\nbar"
def test_errors_in_parallel():
def divide_by_zero(n):
return n / 0
def key_error(k):
return {}[k]
try:
divide_by_zero(42)
except ZeroDivisionError as err:
divide_by_zero_err = str(err)
with pytest.raises(utils.MultipleError) as err:
utils.parallel(((utils.run, "echo foo"),
(key_error, "no-such-key"),
(divide_by_zero, 42),
(utils.run, "echo bar")))
errors = set(str(exc) for exc in err.value)
assert errors == set(("'no-such-key'", divide_by_zero_err))
def ssh_private_key_path():
ssh_dir = os.path.expanduser("~/.ssh")
for fname in ("id_rsa",):
fname = os.path.join(ssh_dir, fname)
if os.access(fname, os.F_OK):
return fname
needs_ssh_private_key = pytest.mark.skipif(ssh_private_key_path() is None,
reason="Missing SSH private key")
@pytest.yield_fixture(scope="function")
def ssh_session():
uid = pwd.getpwuid(os.geteuid()).pw_name
private_key_path = ssh_private_key_path()
with mockssh.Server({uid: private_key_path}) as s:
with utils.SshSession(uid, s.host, s.port, private_key_path) as session:
yield session
@needs_ssh_private_key
def test_ssh_session(ssh_session):
_, stdout, _ = ssh_session.exec_command("ls /")
assert "etc" in (codecs.decode(bit, "utf8")
for bit in stdout.read().split())
@needs_ssh_private_key
def test_sftp_session(ssh_session):
target_dir = tempfile.mkdtemp()
target_fname = os.path.join(target_dir, "foo")
assert not os.access(target_fname, os.F_OK)
ssh_session.open_sftp().put(__file__, target_fname)
assert os.access(target_fname, os.F_OK)
| carletes/container-cluster | containercluster/test_utils.py | Python | mit | 2,046 |
from pprint import pprint
from django.contrib.sessions.models import Session
from django.db.models import Max
from django.utils import timezone
from utils.classes import AcessoInterno
from base.models import Colaborador
def get_current_users():
active_sessions = Session.objects.filter(expire_date__gte=timezone.now())
user_id_list = []
for session in active_sessions:
data = session.get_decoded()
user_id_list.append(data.get('_auth_user_id', None))
return Colaborador.objects.filter(user__id__in=user_id_list)
def get_current_users_requisicao():
return get_current_users().annotate(
ult_acao=Max('requisicao__quando'))
def get_origem_do_ip(request):
acesso_interno = AcessoInterno()
try:
ip_interno = acesso_interno.current_interno
ip_externo = not ip_interno
ip_de_acesso = True
except Exception:
ip_interno = False
ip_externo = False
ip_de_acesso = False
context = {
'base_ip_interno': ip_interno,
'base_ip_externo': ip_externo,
'base_ip_de_acesso': ip_de_acesso,
}
return context
def get_logged_count(request):
queryset = get_current_users()
context = {
'logged_count': queryset.count(),
}
return context
def main(request):
context = {}
context.update(get_origem_do_ip(request))
context.update(get_logged_count(request))
return context
| anselmobd/fo2 | src/base/pages_context.py | Python | mit | 1,437 |
class Filter(object):
def __init__(self, filter_str):
self.params = dict(map(lambda x: x.split('='), filter_str.split(':')))
for k in self.params:
self.params[k] = self.params[k].split(',')
def skip(self, bench):
for k in self.params:
if k in bench.__dict__:
present = False
for v in self.params[k]:
val = type(bench.__dict__[k])(v)
if val == bench.__dict__[k]:
present = True
break
if not present:
return True
return False
def create_filter(filter_str):
return Filter(filter_str)
class EmptyFilter(Filter):
def __init__(self):
pass
def skip(self, bench):
return False
| planetA/interference | scripts/manager/filter.py | Python | mit | 830 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-02-21 06:56
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('judge', '0059_problem_is_manually_managed'),
]
operations = [
migrations.CreateModel(
name='ProblemClarification',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.TextField(verbose_name='clarification body')),
('date', models.DateTimeField(auto_now_add=True, verbose_name='clarification timestamp')),
],
),
migrations.AddField(
model_name='contest',
name='use_clarifications',
field=models.BooleanField(default=True, help_text='Use clarification system instead of comments.', verbose_name='no comments'),
),
migrations.AlterField(
model_name='problem',
name='is_manually_managed',
field=models.BooleanField(db_index=True, default=False, help_text='Whether judges should be allowed to manage data or not', verbose_name='manually managed'),
),
migrations.AlterField(
model_name='problemtestcase',
name='type',
field=models.CharField(choices=[(b'C', 'Normal case'), (b'S', 'Batch start'), (b'E', 'Batch end')], default=b'C', max_length=1, verbose_name='case type'),
),
migrations.AddField(
model_name='problemclarification',
name='problem',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='judge.Problem', verbose_name='clarified problem'),
),
]
| Phoenix1369/site | judge/migrations/0060_contest_clarifications.py | Python | agpl-3.0 | 1,832 |
#!/usr/bin/env python
import collections
import functools
import multiprocessing
import traceback
import Queue
from ct.client import entry_decoder
from ct.client import log_client
from ct.crypto import cert
from ct.crypto import error
from ct.proto import client_pb2
# Messages types:
# Special queue messages to stop the subprocesses.
_WORKER_STOPPED = "WORKER_STOPPED"
_ERROR_PARSING_ENTRY = "ERROR_PARSING_ENTRY"
_ENTRY_MATCHING = "ENTRY_MATCHING"
_PROGRESS_REPORT = "PROGRESS_REPORT"
class QueueMessage(object):
def __init__(self, msg_type, msg=None, certificates_scanned=1,
matcher_output=None):
self.msg_type = msg_type
self.msg = msg
# Number of certificates scanned.
self.certificates_scanned = certificates_scanned
self.matcher_output = matcher_output
# This is only used on the entries input queue
_STOP_WORKER = "STOP_WORKER"
_BATCH_SIZE = 1000
def process_entries(entry_queue, output_queue, match_callback):
stopped = False
total_processed = 0
while not stopped:
count, entry = entry_queue.get()
if entry == _STOP_WORKER:
stopped = True
# Each worker signals when they've picked up their
# "STOP_WORKER" message.
output_queue.put(QueueMessage(
_WORKER_STOPPED,
certificates_scanned=total_processed))
else:
entry_response = client_pb2.EntryResponse()
entry_response.ParseFromString(entry)
parsed_entry = entry_decoder.decode_entry(entry_response)
ts_entry = parsed_entry.merkle_leaf.timestamped_entry
total_processed += 1
c = None
if ts_entry.entry_type == client_pb2.X509_ENTRY:
der_cert = ts_entry.asn1_cert
else:
# The original, signed precertificate.
der_cert = (parsed_entry.extra_data.precert_chain_entry.pre_certificate)
try:
c = cert.Certificate(der_cert)
except error.Error as e:
try:
c = cert.Certificate(der_cert, strict_der=False)
except error.Error as e:
output_queue.put(QueueMessage(
_ERROR_PARSING_ENTRY,
"Error parsing entry %d:\n%s" %
(count, e)))
else:
output_queue.put(QueueMessage(
_ERROR_PARSING_ENTRY,
"Entry %d failed strict parsing:\n%s" %
(count, c)))
except Exception as e:
print "Unknown parsing failure for entry %d:\n%s" % (
count, e)
traceback.print_exc()
output_queue.put(QueueMessage(
_ERROR_PARSING_ENTRY,
"Entry %d failed parsing with an unknown error:\n%s" %
(count, e)))
if c:
match_result = match_callback(
c, ts_entry.entry_type, parsed_entry.extra_data, count)
if match_result:
output_queue.put(QueueMessage(
_ENTRY_MATCHING,
"Entry %d:\n%s" % (count, c),
matcher_output=match_result))
if not total_processed % _BATCH_SIZE:
output_queue.put(QueueMessage(
_PROGRESS_REPORT,
"Scanned %d entries" % total_processed,
certificates_scanned=_BATCH_SIZE))
def _scan(entry_queue, log_url, range_description):
range_start, range_end = range_description
range_end -= 1
client = log_client.LogClient(log_url)
try:
entries = client.get_entries(range_start, range_end)
scanned = range_start
for entry in entries:
# Can't pickle protocol buffers with protobuf module version < 2.5.0
# (https://code.google.com/p/protobuf/issues/detail?id=418)
# so send serialized entry.
entry_queue.put((scanned, entry.SerializeToString()))
scanned += 1
except Exception as e:
print "Exception when fetching range %d to %d:\n%s" % (
range_start, range_end, e)
traceback.print_exc()
ScanResults = collections.namedtuple(
'ScanResults', ['total', 'matches', 'errors'])
def _get_tree_size(log_url):
client = log_client.LogClient(log_url)
sth = client.get_sth()
print "Got STH: %s" % sth
return sth.tree_size
def _send_stop_to_workers(to_queue, num_instances):
for _ in range(num_instances):
to_queue.put((0, _STOP_WORKER))
def _process_worker_messages(
workers_input_queue, workers_output_queue, scanners_done, num_workers,
matcher_output_handler):
total_scanned = 0
total_matches = 0
total_errors = 0
scan_progress = 0
workers_done = 0
stop_sent = False
while workers_done < num_workers:
try:
msg = workers_output_queue.get(block=True, timeout=3)
if msg.msg_type == _WORKER_STOPPED:
workers_done += 1
total_scanned += msg.certificates_scanned
elif msg.msg_type == _ERROR_PARSING_ENTRY:
total_errors += 1
elif msg.msg_type == _ENTRY_MATCHING:
total_matches += 1
if matcher_output_handler:
matcher_output_handler(msg.matcher_output)
elif (msg.msg_type == _PROGRESS_REPORT and
msg.certificates_scanned > 0):
scan_progress += msg.certificates_scanned
print msg.msg, " Total: %d" % scan_progress
else:
print msg.msg
except Queue.Empty:
are_active = ""
if scanners_done.value:
are_active = "NOT"
print "Scanners are %s active, Workers done: %d" % (
are_active, workers_done)
# Done handling the message, now let's check if the scanners
# are done and if so stop the workers
if scanners_done.value and not stop_sent:
print "All scanners done, stopping."
_send_stop_to_workers(workers_input_queue, num_workers)
# To avoid re-sending stop
stop_sent = True
return ScanResults(total_scanned, total_matches, total_errors)
def scan_log(match_callback, log_url,total_processes=2,
matcher_output_handler=None, start_entry=0):
# (index, entry) tuples
m = multiprocessing.Manager()
R = 2
assert total_processes >= R
num_workers = total_processes // R
num_scanners = total_processes - num_workers
entry_queue = m.Queue(num_scanners * _BATCH_SIZE)
output_queue = multiprocessing.Queue(10000)
print "Allocating %d fetchers and %d processing workers" % (
num_scanners, num_workers)
tree_size = _get_tree_size(log_url)
workers_done = multiprocessing.Value('b', 0)
# Must use a flag rather than submitting STOP to the queue directly
# since if the queue will be full there'll be a deadlock.
def stop_workers_callback(_):
workers_done.value = 1
bound_scan = functools.partial(_scan, entry_queue, log_url)
scan_start_range = range(start_entry, tree_size, _BATCH_SIZE)
scan_range = zip(scan_start_range, scan_start_range[1:] + [tree_size])
scanners_pool = multiprocessing.Pool(num_scanners)
res = scanners_pool.map_async(bound_scan, scan_range,
callback=stop_workers_callback)
scanners_pool.close()
workers = [
multiprocessing.Process(
target=process_entries,
args=(entry_queue, output_queue, match_callback))
for _ in range(num_workers)]
for w in workers:
w.start()
try:
res = _process_worker_messages(
entry_queue, output_queue, workers_done, num_workers,
matcher_output_handler)
# Do not hang the interpreter upon ^C.
except (KeyboardInterrupt, SystemExit):
for w in workers:
w.terminate()
scanners_pool.terminate()
m.shutdown()
raise
scanners_pool.join()
for w in workers:
w.join()
m.shutdown()
return res
| php-coder/origin | vendor/github.com/google/certificate-transparency/python/ct/client/scanner.py | Python | apache-2.0 | 8,352 |
import sublime, sublime_plugin
import os, re
from CComplete.ccomplete import CComplete
from CComplete.tokenizer import Tokenizer
CCP = None
class CCompletePlugin(sublime_plugin.EventListener):
def __init__(self):
global CCP
if CCP is None:
CCP = self
else:
print("ERROR")
return
self.ready = False
self.init = False
self.prevword = None
def plugin_loaded(self):
print("Plugin loaded!")
self.settings = sublime.load_settings("ccomplete")
cachepath = sublime.cache_path() + "/ccomplete_cache"
if not os.path.exists(cachepath):
os.mkdir(cachepath)
self.cc = CComplete(self.settings.get('cache', 500), cachepath)
self.currentfile = None
self.ready = False
self.extensions = self.settings.get("extensions", ["c", "cpp", "cxx", "h", "hpp", "hxx"])
self.load_matching = self.settings.get("load_matching", True)
self.init = True
@staticmethod
def showprogress(view, i, total):
view.set_status("ctcomplete", "Loading completions (%d/%d)..." % (i, total))
def load(self, view):
if self.init == False:
self.plugin_loaded()
filename = view.file_name()
view.erase_status("ctcomplete")
self.ready = False
if not filename:
return
loadOk = False
base = ""
for ext in self.extensions:
if filename.endswith("." + ext):
base = filename[0:-len(ext)]
loadOk = True
break
if not loadOk:
return
extra = []
if self.load_matching:
for ext in self.extensions:
if filename.endswith(ext):
continue
if os.path.isfile(base + ext):
extra.append(base + ext)
basepaths, syspaths = self.getProjectPaths(filename)
if self.currentfile == filename and self.cc.is_valid(filename, basepaths, syspaths, extra):
print("Valid")
self.ready = True
return
print("Loading")
view.set_status("ctcomplete", "Loading completions...")
self.cc.load_file(filename, basepaths, syspaths, extra, lambda a, b: CCompletePlugin.showprogress(view, a, b))
view.erase_status("ctcomplete")
self.currentfile = filename
self.ready = True
def getProjectPaths(self, filename):
# No valid filename
if not filename or not os.path.isfile(filename):
return ([], [])
folders = []
projectfolder = os.path.dirname(sublime.active_window().project_file_name())
data = sublime.active_window().project_data()
for folder in data["folders"]:
path = os.path.join(projectfolder, folder["path"])
folders.append(path)
return (folders, [])
def current_function(self, view):
sel = view.sel()[0]
functions = view.find_by_selector('meta.function.c')
func = "";
for f in functions:
if f.contains(sel.a):
funcname=view.substr(sublime.Region(f.a, view.line(f.a).b))
funcname=funcname.split("(",1)[0]
return funcname.strip()
@staticmethod
def get_type(line):
return line.lstrip().split()[0]
def get_base_type(self, type):
type = type.lower()
if type in self.cc.tokens:
token = self.cc.tokens[type]
if token[Tokenizer.T_KIND] == "t" or token[Tokenizer.T_KIND] == "m":
if "typeref" in token[Tokenizer.T_EXTRA]:
ref=token[Tokenizer.T_EXTRA]['typeref']
if ref.startswith("struct:") or ref.startswith("union:"):
ref = ref.split(":",1)[1]
if ref == type:
return type
return self.get_base_type(ref)
else:
ref = CCompletePlugin.get_type(token[Tokenizer.T_SEARCH])
return self.get_base_type(ref)
return type
def traverse_members(self, view, pos, full = False):
filename = self.currentfile
line = view.line(pos)
line.b=pos
line=view.substr(line)
oldline=""
while oldline != line:
oldline = line
line = re.sub(r'\[[^\[]*\]', '', line)
print(line)
line = re.split(',|;|\(|\[|\s+', line.strip())[-1].strip()
print(line)
chain = [x.split("[", 1)[0] for x in re.split('->|\.|::', line.strip())]
print(chain)
func = self.current_function(view)
if not filename in self.cc.functiontokens or not func in self.cc.functiontokens[filename]:
print("Not in a filled function (%s, %s)" % (filename, func))
return []
tokens = [x for x in self.cc.functiontokens[filename][func] if x[Tokenizer.T_NAME] == chain[0]]
token = None
if len(tokens) > 0:
token = tokens[0]
else:
token = self.cc.tokens[chain[0].lower()]
if not token or token[Tokenizer.T_KIND] != Tokenizer.K_VARIABLE:
return []
type=""
if token[Tokenizer.T_KIND] == Tokenizer.K_PARAM:
type = token[Tokenizer.T_EXTRA]["type"]
else:
type = Tokenizer.parsevariable(token[Tokenizer.T_SEARCH])[1]
type = self.get_base_type(type)
pchain = chain[1:]
if not full:
pchain = pchain[0:-1]
for newtype in pchain:
type = type + "::" + newtype
type = self.get_base_type(type)
members = self.cc.search_tokens(type + "::")
goodmembers = [x for x in members if x[Tokenizer.T_NAME][len(type)+2:].find("::") == -1]
return goodmembers
def get_sel_token(self, view):
if len(view.sel()) < 1:
return (None, None)
selword = view.word(view.sel()[0].end())
i = selword.begin()
word = view.substr(selword)
if i>2 and (view.substr(sublime.Region(i-2, i)) == "->" or view.substr(sublime.Region(i-1, i)) == "." or view.substr(sublime.Region(i-2, i)) == "::"):
members = self.traverse_members(view, selword.end())
for m in members:
if m[Tokenizer.T_NAME].endswith("::" + word):
return (word, m)
return (word, None)
func = self.current_function(view)
filename = self.currentfile
if filename in self.cc.functiontokens and func in self.cc.functiontokens[filename] and self.cc.functiontokens[filename][func]:
tokens = [x for x in self.cc.functiontokens[filename][func] if x[Tokenizer.T_NAME] == word]
if len(tokens) > 0:
return (word, Tokenizer.best_match(tokens))
if word.lower() in self.cc.tokens:
return (word, self.cc.tokens[word.lower()])
return (word, None)
def on_activated_async(self, view):
self.load(view)
def on_post_save_async(self, view):
self.load(view)
def on_query_completions(self, view, search, locations):
if not self.ready:
return
i=locations[0]-len(search)
if i>2 and (view.substr(sublime.Region(i-2, i)) == "->" or view.substr(sublime.Region(i-1, i)) == "." or view.substr(sublime.Region(i-2, i)) == "::"):
members = self.traverse_members(view, locations[0])
completions = [i[Tokenizer.T_EXTRA]["completion"] for i in members]
return (completions, sublime.INHIBIT_WORD_COMPLETIONS)
validtokens = [x for x in self.cc.search_tokens(search)]
completions = []
func = self.current_function(view)
if func:
completions.extend([x[Tokenizer.T_EXTRA]["completion"] for x in self.cc.functiontokens[self.currentfile][func]])
completions.extend([x[Tokenizer.T_EXTRA]["completion"] for x in validtokens if x[Tokenizer.T_KIND] != Tokenizer.K_MEMBER])
return (completions, sublime.INHIBIT_WORD_COMPLETIONS)
def show_number(self, view, word):
num=None
try:
if word[0:2] == "0x":
num=int(word, 16)
elif word[0:1] == "0":
num=int(word, 8)
else:
num=int(word)
view.set_status("ctcomplete", "Integer: HEX=0x%s DEC=%s OCT=%s" % (format(num, "X"), int(num), format(num, "#o")))
except:
view.erase_status("ctcomplete")
def on_selection_modified_async(self, view):
if not self.ready:
return
block = sublime.active_window().active_view().scope_name(sublime.active_window().active_view().sel()[0].begin()).split()[-1].split(".")[0]
if block == "comment" or block == "string":
return
selword = view.word(view.sel()[0].end())
if selword == self.prevword:
return
else:
self.prevword = selword
word, token = self.get_sel_token(view)
if token:
selword = view.word(view.sel()[0].end())
word = view.substr(selword)
view.set_status("ctcomplete", token[Tokenizer.T_EXTRA]["status"].replace("$#", word))
elif len(word.strip()):
self.show_number(view, word.strip())
else:
view.erase_status("ctcomplete")
def jump_token_definition(self, token, word = None):
offset = 0
if word and token[Tokenizer.T_SEARCH].find(word) != -1:
offset = token[Tokenizer.T_SEARCH].find(word)+len(word)+1
flags = sublime.ENCODED_POSITION
line = token[Tokenizer.T_LINE]
file = token[Tokenizer.T_FILENAME]
sublime.active_window().open_file(file+":"+str(line)+":"+str(offset), flags)
class ccomplete_jump_definition(sublime_plugin.TextCommand):
def run(self, edit):
global CCP
if not CCP.ready:
return
view = sublime.active_window().active_view()
selword = view.word(view.sel()[0].end())
word = view.substr(selword)
_, token=CCP.get_sel_token(view)
CCP.jump_token_definition(token, word)
class ccomplete_show_symbols(sublime_plugin.TextCommand):
def run(self, edit):
global CCP
if not CCP.ready:
return
global active_ctags_listener
view = sublime.active_window().active_view()
filename = CCP.currentfile
func = CCP.current_function(view)
tokens = []
if func in CCP.cc.functiontokens[filename]:
tokens.extend(CCP.cc.functiontokens[filename][func])
tokens.extend(CCP.cc.tokens.values())
def on_done(i):
if i == -1:
return
token = tokens[i]
CCP.jump_token_definition(token, token[Tokenizer.T_NAME])
tokenlist = [[x[Tokenizer.T_NAME], x[Tokenizer.T_FILENAME] + ":" + str(x[Tokenizer.T_LINE])] for x in tokens]
sublime.active_window().show_quick_panel(tokenlist, on_done, 0, 0) | roycyt/CComplete | ccomplete_plugin.py | Python | gpl-2.0 | 11,107 |
# Copyright (c) 2014-2016 Marcello Salvati
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#
from plugins.plugin import Plugin
class Spoof(Plugin):
name = "Spoof"
optname = "spoof"
desc = "Redirect/Modify traffic using ICMP, ARP, DHCP or DNS"
version = "0.6"
def initialize(self, options):
'''Called if plugin is enabled, passed the options namespace'''
self.options = options
self.protocol_instances = []
from core.utils import iptables, shutdown, set_ip_forwarding
#Makes scapy more verbose
debug = False
if options.arp:
if not options.gateway:
shutdown("[Spoof] --arp argument requires --gateway")
from core.poisoners.ARP import ARPpoisoner
arp = ARPpoisoner(options)
arp.debug = debug
self.tree_info.append('ARP spoofing enabled')
self.protocol_instances.append(arp)
elif options.dhcp:
from core.poisoners.DHCP import DHCPpoisoner
if options.targets:
shutdown("[Spoof] --targets argument invalid when DCHP spoofing")
dhcp = DHCPpoisoner(options)
dhcp.debug = debug
self.tree_info.append('DHCP spoofing enabled')
self.protocol_instances.append(dhcp)
elif options.icmp:
from core.poisoners.ICMP import ICMPpoisoner
if not options.gateway:
shutdown("[Spoof] --icmp argument requires --gateway")
if not options.targets:
shutdown("[Spoof] --icmp argument requires --targets")
icmp = ICMPpoisoner(options)
icmp.debug = debug
self.tree_info.append('ICMP spoofing enabled')
self.protocol_instances.append(icmp)
if options.dns:
self.tree_info.append('DNS spoofing enabled')
if iptables().dns is False:
iptables().DNS(self.config['MITMf']['DNS']['port'])
if not options.arp and not options.icmp and not options.dhcp and not options.dns:
shutdown("[Spoof] Spoof plugin requires --arp, --icmp, --dhcp or --dns")
set_ip_forwarding(1)
if iptables().http is False:
iptables().HTTP(options.listen_port)
for protocol in self.protocol_instances:
protocol.start()
def options(self, options):
group = options.add_mutually_exclusive_group(required=False)
group.add_argument('--arp', dest='arp', action='store_true', help='Redirect traffic using ARP spoofing')
group.add_argument('--icmp', dest='icmp', action='store_true', help='Redirect traffic using ICMP redirects')
group.add_argument('--dhcp', dest='dhcp', action='store_true', help='Redirect traffic using DHCP offers')
options.add_argument('--dns', dest='dns', action='store_true', help='Proxy/Modify DNS queries')
options.add_argument('--netmask', dest='netmask', type=str, default='255.255.255.0', help='The netmask of the network')
options.add_argument('--shellshock', type=str, metavar='PAYLOAD', dest='shellshock', help='Trigger the Shellshock vuln when spoofing DHCP, and execute specified command')
options.add_argument('--gateway', dest='gateway', help='Specify the gateway IP')
options.add_argument('--gatewaymac', dest='gatewaymac', help='Specify the gateway MAC [will auto resolve if ommited]')
options.add_argument('--targets', dest='targets', help='Specify host/s to poison [if ommited will default to subnet]')
options.add_argument('--ignore', dest='ignore', help='Specify host/s not to poison')
options.add_argument('--arpmode',type=str, dest='arpmode', default='rep', choices=["rep", "req"], help=' ARP Spoofing mode: replies (rep) or requests (req) [default: rep]')
def on_shutdown(self):
from core.utils import iptables, set_ip_forwarding
for protocol in self.protocol_instances:
if hasattr(protocol, 'stop'):
protocol.stop()
iptables().flush()
set_ip_forwarding(0)
| Lh4cKg/MITMf | plugins/spoof.py | Python | gpl-3.0 | 4,797 |
#python2 & python3
class Node:
def __init__(self,x,y):
self.token_value = x
self.code_value = y
self.children = []
self.index=None
def set_children(self,y):
try:
assert isinstance(y,list)
for i in y:
self.children.append(i)
except:
self.children.append(y)
class Parser:
nodes_table={}
tmp_index=0
edges_table=[]
def __init__(self):
self.token=str
self.tokens_list=['identifier',':=','identifier','+','number']
self.code_list=['x',':=','x','+','5']
self.tmp_index = 0
self.token=self.tokens_list[self.tmp_index]
self.parse_tree=None
self.nodes_table=None
self.edges_table=None
def set_tokens_list_and_code_list(self,x,y):
self.code_list = y
self.tokens_list=x
self.tmp_index = 0
self.token = self.tokens_list[self.tmp_index]
def next_token(self):
if(self.tmp_index==len(self.tokens_list)-1):
return False # we have reachd the end of the list
self.tmp_index = self.tmp_index + 1
self.token=self.tokens_list[self.tmp_index]
return True
def match(self,x):
if self.token==x:
self.next_token()
return True
else:
raise ValueError('Token Mismatch',self.token)
def stmt_sequence(self):
t=self.statement()
p=t
while self.token==';':
q=Node(None,None)
self.match(';')
q=self.statement()
if q == None:
break
else:
if t==None:
t=p=q
else:
p.set_children(q)
p=q
return t
def statement(self):
if self.token=='if':
t=self.if_stmt()
return t
elif self.token=='repeat':
t=self.repeat_stmt()
return t
elif self.token=='identifier':
t=self.assign_stmt()
return t
elif self.token=='read':
t=self.read_stmt()
return t
elif self.token=='write':
t=self.write_stmt()
return t
else:
raise ValueError('SyntaxError',self.token)
##Error here
def if_stmt(self):
t=Node('if',self.code_list[self.tmp_index])
if self.token=='if':
self.match('if')
t.set_children(self.exp())
self.match('then')
t.set_children(self.stmt_sequence())
if self.token=='else':
t.set_children(self.stmt_sequence())
self.match('end')
return t
def exp(self):
t=self.simple_exp()
if self.token=='<' or self.token=='>' or self.token=='=':
p=Node(self.token,self.code_list[self.tmp_index])
p.set_children(t)
t=p
self.comparison_op()
t.set_children(self.simple_exp())
return t
def comparison_op(self):
if self.token=='<':
self.match('<')
elif self.token=='>':
self.match('>')
elif self.token=='=':
self.match('=')
def simple_exp(self):
t=self.term()
while self.token=='+' or self.token=='-':
p=Node('Opk',self.code_list[self.tmp_index])
p.set_children(t)
t=p
self.addop()
t.set_children(self.term())
return t
def addop(self):
if self.token=='+':
self.match('+')
elif self.token=='-':
self.match('-')
def term(self):
t=self.factor()
while self.token=='*' or self.token=='/':
p=Node('Opk',self.code_list[self.tmp_index])
p.set_children(t)
t=p
self.mulop()
p.set_children(self.factor())
return t
def mulop(self):
if self.token=='*':
self.match('*')
elif self.token=='/':
self.match('/')
def factor(self):
if self.token=='(':
self.match('(')
t=self.exp()
self.match(')')
elif self.token=='number':
t=Node('ConstK',self.code_list[self.tmp_index])
self.match('number')
elif self.token=='identifier':
t=Node('Idk',self.code_list[self.tmp_index])
self.match('identifier')
else:
raise ValueError('SyntaxError',self.token)
return False
return t
def repeat_stmt(self):
t=Node('repeat',self.code_list[self.tmp_index])
if self.token=='repeat':
self.match('repeat')
t.set_children(self.stmt_sequence())
self.match('until')
t.set_children(self.exp())
return t
def assign_stmt(self):
t=Node('assign',self.code_list[self.tmp_index])
self.match('identifier')
self.match(':=')
t.set_children(self.exp())
return t
def read_stmt(self):
t=Node('read',self.code_list[self.tmp_index])
self.match('read')
self.match('identifier')
return t
def write_stmt(self):
t=Node('write',self.code_list[self.tmp_index])
self.match('write')
t.set_children(self.exp())
return t
def create_nodes_table(self,args=None):
if args==None:
self.parse_tree.index=Parser.tmp_index
Parser.nodes_table.update({Parser.tmp_index:self.parse_tree.code_value})
Parser.tmp_index=Parser.tmp_index+1
if len(self.parse_tree.children) !=0:
for i in self.parse_tree.children:
self.create_nodes_table(i)
else:
args.index=Parser.tmp_index
Parser.nodes_table.update({Parser.tmp_index:args.code_value})
Parser.tmp_index=Parser.tmp_index+1
if len(args.children) !=0:
for i in args.children:
self.create_nodes_table(i)
def create_edges_table(self,args=None):
if args==None:
if len(self.parse_tree.children)!=0:
for i in self.parse_tree.children:
Parser.edges_table.append((self.parse_tree.index,i.index))
for j in self.parse_tree.children:
self.create_edges_table(j)
else:
if len(args.children)!=0:
for i in args.children:
Parser.edges_table.append((args.index,i.index))
for j in args.children:
self.create_edges_table(j)
def run(self):
self.parse_tree=self.stmt_sequence() #create parse tree
self.create_nodes_table() #create nodes_table
self.create_edges_table() #create edges_table
self.edges_table=Parser.edges_table #save edges_table
self.nodes_table=Parser.nodes_table #save nodes_table
if self.tmp_index==len(self.tokens_list)-1:
print('success')
elif self.tmp_index<len(self.tokens_list):
raise ValueError('SyntaxError',self.token)
def clear_tables(self):
self.nodes_table.clear()
self.edges_table.clear()
| muhakh/TinyParser | tinyparser.py | Python | gpl-3.0 | 7,577 |
# tarpayload.py
# Tar archive software payload management.
#
# Copyright (C) 2012 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
"""
TODO
- error handling!!!
- document all methods
"""
import functools
import logging
log = logging.getLogger("anaconda")
try:
import tarfile
except ImportError:
log.error("import of tarfile failed")
tarfile = None
from pyanaconda.packaging import ArchivePayload, PayloadError, versionCmp
from pyanaconda import iutil
# TarPayload is not yet fully implemented
# pylint: disable=abstract-method
class TarPayload(ArchivePayload):
""" A TarPayload unpacks a single tar archive onto the target system. """
def __init__(self, data):
if tarfile is None:
raise PayloadError("unsupported payload type")
super(TarPayload, self).__init__(data)
self.archive = None
self.image_file = None
def setup(self, storage, instClass):
super(TarPayload, self).setup(storage, instClass)
try:
self.archive = tarfile.open(self.image_file)
except (tarfile.ReadError, tarfile.CompressionError) as e:
# maybe we only need to catch ReadError and CompressionError here
log.error("opening tar archive %s: %s", self.image_file, e)
raise PayloadError("invalid payload format")
def unsetup(self):
super(TarPayload, self).unsetup()
self.archive = None
@property
def requiredSpace(self):
byte_count = sum(m.size for m in self.archive.getmembers())
return byte_count / (1024.0 * 1024.0) # FIXME: Size
@property
def kernelVersionList(self):
names = self.archive.getnames()
# Strip out vmlinuz- from the names
return sorted((n.split("/")[-1][8:] for n in names if "boot/vmlinuz-" in n),
key=functools.cmp_to_key(versionCmp))
def install(self):
try:
self.archive.extractall(path=iutil.getSysroot())
except (tarfile.ExtractError, tarfile.CompressionError) as e:
log.error("extracting tar archive %s: %s", self.image_file, e)
| AdamWill/anaconda | pyanaconda/packaging/tarpayload.py | Python | gpl-2.0 | 3,014 |
import numpy as np
import pandas.util.testing as tm
from pandas import (Series, date_range, DatetimeIndex, Index, RangeIndex,
Float64Index)
from .pandas_vb_common import setup # noqa
class SetOperations(object):
goal_time = 0.2
params = (['datetime', 'date_string', 'int', 'strings'],
['intersection', 'union', 'symmetric_difference'])
param_names = ['dtype', 'method']
def setup(self, dtype, method):
N = 10**5
dates_left = date_range('1/1/2000', periods=N, freq='T')
fmt = '%Y-%m-%d %H:%M:%S'
date_str_left = Index(dates_left.strftime(fmt))
int_left = Index(np.arange(N))
str_left = tm.makeStringIndex(N)
data = {'datetime': {'left': dates_left, 'right': dates_left[:-1]},
'date_string': {'left': date_str_left,
'right': date_str_left[:-1]},
'int': {'left': int_left, 'right': int_left[:-1]},
'strings': {'left': str_left, 'right': str_left[:-1]}}
self.left = data[dtype]['left']
self.right = data[dtype]['right']
def time_operation(self, dtype, method):
getattr(self.left, method)(self.right)
class SetDisjoint(object):
goal_time = 0.2
def setup(self):
N = 10**5
B = N + 20000
self.datetime_left = DatetimeIndex(range(N))
self.datetime_right = DatetimeIndex(range(N, B))
def time_datetime_difference_disjoint(self):
self.datetime_left.difference(self.datetime_right)
class Datetime(object):
goal_time = 0.2
def setup(self):
self.dr = date_range('20000101', freq='D', periods=10000)
def time_is_dates_only(self):
self.dr._is_dates_only
class Ops(object):
sample_time = 0.2
params = ['float', 'int']
param_names = ['dtype']
def setup(self, dtype):
N = 10**6
indexes = {'int': 'makeIntIndex', 'float': 'makeFloatIndex'}
self.index = getattr(tm, indexes[dtype])(N)
def time_add(self, dtype):
self.index + 2
def time_subtract(self, dtype):
self.index - 2
def time_multiply(self, dtype):
self.index * 2
def time_divide(self, dtype):
self.index / 2
def time_modulo(self, dtype):
self.index % 2
class Range(object):
goal_time = 0.2
def setup(self):
self.idx_inc = RangeIndex(start=0, stop=10**7, step=3)
self.idx_dec = RangeIndex(start=10**7, stop=-1, step=-3)
def time_max(self):
self.idx_inc.max()
def time_max_trivial(self):
self.idx_dec.max()
def time_min(self):
self.idx_dec.min()
def time_min_trivial(self):
self.idx_inc.min()
class IndexAppend(object):
goal_time = 0.2
def setup(self):
N = 10000
self.range_idx = RangeIndex(0, 100)
self.int_idx = self.range_idx.astype(int)
self.obj_idx = self.int_idx.astype(str)
self.range_idxs = []
self.int_idxs = []
self.object_idxs = []
for i in range(1, N):
r_idx = RangeIndex(i * 100, (i + 1) * 100)
self.range_idxs.append(r_idx)
i_idx = r_idx.astype(int)
self.int_idxs.append(i_idx)
o_idx = i_idx.astype(str)
self.object_idxs.append(o_idx)
def time_append_range_list(self):
self.range_idx.append(self.range_idxs)
def time_append_int_list(self):
self.int_idx.append(self.int_idxs)
def time_append_obj_list(self):
self.obj_idx.append(self.object_idxs)
class Indexing(object):
goal_time = 0.2
params = ['String', 'Float', 'Int']
param_names = ['dtype']
def setup(self, dtype):
N = 10**6
self.idx = getattr(tm, 'make{}Index'.format(dtype))(N)
self.array_mask = (np.arange(N) % 3) == 0
self.series_mask = Series(self.array_mask)
self.sorted = self.idx.sort_values()
half = N // 2
self.non_unique = self.idx[:half].append(self.idx[:half])
self.non_unique_sorted = self.sorted[:half].append(self.sorted[:half])
self.key = self.sorted[N // 4]
def time_boolean_array(self, dtype):
self.idx[self.array_mask]
def time_boolean_series(self, dtype):
self.idx[self.series_mask]
def time_get(self, dtype):
self.idx[1]
def time_slice(self, dtype):
self.idx[:-1]
def time_slice_step(self, dtype):
self.idx[::2]
def time_get_loc(self, dtype):
self.idx.get_loc(self.key)
def time_get_loc_sorted(self, dtype):
self.sorted.get_loc(self.key)
def time_get_loc_non_unique(self, dtype):
self.non_unique.get_loc(self.key)
def time_get_loc_non_unique_sorted(self, dtype):
self.non_unique_sorted.get_loc(self.key)
class Float64IndexMethod(object):
# GH 13166
goal_time = 0.2
def setup(self):
N = 100000
a = np.arange(N)
self.ind = Float64Index(a * 4.8000000418824129e-08)
def time_get_loc(self):
self.ind.get_loc(0)
| kdebrab/pandas | asv_bench/benchmarks/index_object.py | Python | bsd-3-clause | 5,080 |
# -*- coding:utf-8 -*-
#
# Copyright (c) 2017 mooncake. All Rights Reserved
####
# @brief
# @author Eric Yue ( hi.moonlight@gmail.com )
# @version 0.0.1
from distutils.core import setup
V = "0.7"
setup(
name = 'mooncake_utils',
packages = ['mooncake_utils'],
version = V,
description = 'just a useful utils for mooncake personal project.',
author = 'mooncake',
author_email = 'hi.moonlight@gmail.com',
url = 'https://github.com/ericyue/mooncake_utils',
download_url = 'https://github.com/ericyue/mooncake_utils/archive/%s.zip' % V,
keywords = ['utils','data','machine-learning'], # arbitrary keywords
classifiers = [],
)
| ericyue/mooncake_utils | setup.py | Python | apache-2.0 | 646 |
"""
Widget/logic for dataset ordering.
"""
from PyQt5 import QtWidgets
import sas.qtgui.Utilities.GuiUtils as GuiUtils
# Local UI
from sas.qtgui.Perspectives.Fitting.UI.OrderWidgetUI import Ui_OrderWidgetUI
class OrderWidget(QtWidgets.QWidget, Ui_OrderWidgetUI):
def __init__(self, parent=None, all_data=None):
super(OrderWidget, self).__init__()
self.setupUi(self)
self.all_data = all_data
self.order = {}
self.setupTable()
def updateData(self, all_data):
"""
Read in new datasets and update the view
"""
self.all_data = all_data
self.lstOrder.clear()
self.setupTable()
def setupTable(self):
"""
Populate the widget with dataset names in original order
"""
if self.all_data is None: return
for item in self.all_data:
if not hasattr(item, 'data'): continue
dataset = GuiUtils.dataFromItem(item)
if dataset is None: continue
dataset_name = dataset.name
self.order[dataset_name] = item
self.lstOrder.addItem(dataset_name)
def ordering(self):
"""
Returns the current ordering of the datasets
"""
order = []
for row in range(self.lstOrder.count()):
item_name = self.lstOrder.item(row).text()
order.append(self.order[item_name])
return order
| SasView/sasview | src/sas/qtgui/Perspectives/Fitting/OrderWidget.py | Python | bsd-3-clause | 1,434 |
from revscoring.features import user
from revscoring.features.modifiers import not_, log
from ..features import diff, revision
class properties:
"""
Mapping of english descriptions to property identifiers
"""
IMAGE = 'P18'
SEX_OR_GENDER = 'P21'
COUNTRY_OF_CITIZENSHIP = 'P27'
INSTANCE_OF = 'P31'
MEMBER_OF_SPORTS_TEAM = 'P54'
SIGNATURE = 'P109'
COMMONS_CATEGORY = 'P373'
DATE_OF_BIRTH = 'P569'
DATE_OF_DEATH = 'P570'
OFFICIAL_WEBSITE = 'P856'
class items:
"""
Mapping of english descriptions to item idenifiers
"""
HUMAN = 'Q5'
# Comment features
is_client_delete = revision.comment_matches(r"^\/\* clientsitelink\-remove\:",
name='revision.is_client_delete')
is_client_move = revision.comment_matches(r"^\/\* clientsitelink\-update\:",
name='revision.is_client_move')
is_merge_into = revision.comment_matches(r"^\/\* wbmergeitems\-to\:",
name='revision.is_merge_into')
is_merge_from = revision.comment_matches(r"^\/\* wbmergeitems\-from\:",
name='revision.is_merge_from')
is_revert = \
revision.comment_matches(r"^Reverted edits by \[\[Special\:Contributions",
name='revision.is_revert')
is_rollback = revision.comment_matches(r"^Undid revision ",
name='revision.is_rollback')
is_restore = revision.comment_matches(r"^Restored revision ",
name='revision.is_restore')
is_item_creation = revision.comment_matches(r"^\/\* (wbsetentity|"
r"wbeditentity-create\:0\|) \*\/",
name='revision.is_item_creation')
# Properties changed
sex_or_gender_changed = \
diff.property_changed(properties.SEX_OR_GENDER,
name='diff.sex_or_gender_changed')
country_of_citizenship_changed = \
diff.property_changed(properties.COUNTRY_OF_CITIZENSHIP,
name='diff.country_of_citizenship_changed')
member_of_sports_team_changed = \
diff.property_changed(properties.MEMBER_OF_SPORTS_TEAM,
name='diff.member_of_sports_team_changed')
date_of_birth_changed = \
diff.property_changed(properties.DATE_OF_BIRTH,
name='diff.date_of_birth_changed')
image_changed = \
diff.property_changed(properties.IMAGE,
name='diff.image_changed')
signature_changed = \
diff.property_changed(properties.SIGNATURE,
name='diff.signature_changed')
commons_category_changed = \
diff.property_changed(properties.COMMONS_CATEGORY,
name='diff.commons_category_changed')
official_website_changed = \
diff.property_changed(properties.OFFICIAL_WEBSITE,
name='diff.official_website_changed')
# Status
is_human = \
revision.has_property_value(properties.INSTANCE_OF, items.HUMAN,
name='revision.is_human')
has_birthday = \
revision.has_property(properties.DATE_OF_BIRTH,
name='revision.has_birthday')
dead = \
revision.has_property(properties.DATE_OF_DEATH,
name='revision.dead')
is_blp = has_birthday.and_(not_(dead))
reverted = [
# revscoring.features.diff.longest_repeated_char_added,
# revscoring.features.diff.longest_token_added,
# log(revscoring.features.diff.numeric_chars_added + 1),
# log(revscoring.features.diff.numeric_chars_removed + 1),
# revscoring.features.diff.proportion_of_chars_added,
# revscoring.features.diff.proportion_of_chars_removed,
# revscoring.features.diff.proportion_of_numeric_chars_added,
# revscoring.features.diff.proportion_of_symbolic_chars_added,
# revscoring.features.diff.proportion_of_uppercase_chars_added,
# log(revscoring.features.diff.symbolic_chars_added + 1),
# log(revscoring.features.diff.symbolic_chars_removed + 1),
# log(revscoring.features.diff.uppercase_chars_added + 1),
# log(revscoring.features.diff.uppercase_chars_removed + 1),
# revscoring.features.diff.bytes_changed + 1,
# revscoring.featuresdiff.bytes_changed_ratio,
# page.is_content_namespace,
# parent_revision.was_same_user,
log(user.age + 1),
diff.number_added_sitelinks,
diff.number_removed_sitelinks,
diff.number_changed_sitelinks,
diff.number_added_labels,
diff.number_removed_labels,
diff.number_changed_labels,
diff.number_added_descriptions,
diff.number_removed_descriptions,
diff.number_changed_descriptions,
diff.number_added_aliases,
diff.number_removed_aliases,
diff.number_added_claims,
diff.number_removed_claims,
diff.number_changed_claims,
diff.number_changed_identifiers,
diff.en_label_touched,
diff.number_added_sources,
diff.number_removed_sources,
diff.number_added_qualifiers,
diff.number_removed_qualifiers,
diff.number_added_badges,
diff.number_removed_badges,
# diff.mean_distance_descriptions,
# diff.mean_distance_labels,
diff.proportion_of_qid_added,
diff.proportion_of_language_added,
diff.proportion_of_links_added,
is_client_move,
is_client_delete,
is_merge_into,
is_merge_from,
is_revert,
is_rollback,
is_restore,
is_item_creation,
sex_or_gender_changed,
country_of_citizenship_changed,
member_of_sports_team_changed,
date_of_birth_changed,
image_changed,
signature_changed,
commons_category_changed,
official_website_changed,
log(revision.number_claims + 1),
log(revision.number_aliases + 1),
log(revision.number_sources + 1),
log(revision.number_qualifiers + 1),
log(revision.number_badges + 1),
log(revision.number_labels + 1),
log(revision.number_sitelinks + 1),
log(revision.number_descriptions + 1),
is_human,
is_blp,
user.is_bot,
user.is_anon,
]
| wiki-ai/wb-vandalism | wb_vandalism/feature_lists/wikidata.py | Python | mit | 6,094 |
# encoding: utf-8
import nose
import mock
import ckan.tests.helpers as helpers
import ckan.plugins as p
import ckanext.datastore.interfaces as interfaces
import ckanext.datastore.plugin as plugin
DatastorePlugin = plugin.DatastorePlugin
assert_equal = nose.tools.assert_equal
assert_raises = nose.tools.assert_raises
class TestPluginLoadingOrder(object):
def setup(self):
if p.plugin_loaded('datastore'):
p.unload('datastore')
if p.plugin_loaded('sample_datastore_plugin'):
p.unload('sample_datastore_plugin')
def test_loading_datastore_first_works(self):
p.load('datastore')
p.load('sample_datastore_plugin')
p.unload('sample_datastore_plugin')
p.unload('datastore')
def test_loading_datastore_last_doesnt_work(self):
# This test is complicated because we can't import
# ckanext.datastore.plugin before running it. If we did so, the
# DatastorePlugin class would be parsed which breaks the reason of our
# test.
p.load('sample_datastore_plugin')
thrown_exception = None
try:
p.load('datastore')
except Exception as e:
thrown_exception = e
idatastores = [x.__class__.__name__ for x
in p.PluginImplementations(interfaces.IDatastore)]
p.unload('sample_datastore_plugin')
assert thrown_exception is not None, \
('Loading "datastore" after another IDatastore plugin was'
'loaded should raise DatastoreException')
assert_equal(thrown_exception.__class__.__name__,
plugin.DatastoreException.__name__)
assert plugin.DatastorePlugin.__name__ not in idatastores, \
('You shouldn\'t be able to load the "datastore" plugin after'
'another IDatastore plugin was loaded')
class TestPluginDatastoreSearch(object):
@classmethod
def setup_class(cls):
p.load('datastore')
@classmethod
def teardown_class(cls):
p.unload('datastore')
@helpers.change_config('ckan.datastore.default_fts_lang', None)
def test_english_is_default_fts_language(self):
expected_ts_query = ', plainto_tsquery(\'english\', \'foo\') "query"'
data_dict = {
'q': 'foo',
}
result = self._datastore_search(data_dict=data_dict)
assert_equal(result['ts_query'], expected_ts_query)
@helpers.change_config('ckan.datastore.default_fts_lang', 'simple')
def test_can_overwrite_default_fts_lang_using_config_variable(self):
expected_ts_query = ', plainto_tsquery(\'simple\', \'foo\') "query"'
data_dict = {
'q': 'foo',
}
result = self._datastore_search(data_dict=data_dict)
assert_equal(result['ts_query'], expected_ts_query)
@helpers.change_config('ckan.datastore.default_fts_lang', 'simple')
def test_lang_parameter_overwrites_default_fts_lang(self):
expected_ts_query = ', plainto_tsquery(\'french\', \'foo\') "query"'
data_dict = {
'q': 'foo',
'lang': 'french',
}
result = self._datastore_search(data_dict=data_dict)
assert_equal(result['ts_query'], expected_ts_query)
def test_fts_rank_column_uses_lang_when_casting_to_tsvector(self):
expected_select_content = u'to_tsvector(\'french\', cast("country" as text))'
data_dict = {
'q': {'country': 'Brazil'},
'lang': 'french',
}
result = self._datastore_search(data_dict=data_dict)
assert expected_select_content in result['select'][0], result['select']
def test_adds_fts_on_full_text_field_when_q_is_a_string(self):
expected_where = [(u'_full_text @@ "query"',)]
data_dict = {
'q': 'foo',
}
result = self._datastore_search(data_dict=data_dict)
assert_equal(result['where'], expected_where)
def test_ignores_fts_searches_on_inexistent_fields(self):
data_dict = {
'q': {'inexistent-field': 'value'},
}
result = self._datastore_search(data_dict=data_dict, fields_types={})
assert_equal(result['where'], [])
@helpers.change_config('ckan.datastore.default_fts_lang', None)
def test_fts_where_clause_lang_uses_english_by_default(self):
expected_where = [(u'to_tsvector(\'english\', cast("country" as text))'
u' @@ "query country"',)]
data_dict = {
'q': {'country': 'Brazil'},
}
fields_types = {
'country': 'text',
}
result = self._datastore_search(data_dict=data_dict,
fields_types=fields_types)
assert_equal(result['where'], expected_where)
@helpers.change_config('ckan.datastore.default_fts_lang', 'simple')
def test_fts_where_clause_lang_can_be_overwritten_by_config(self):
expected_where = [(u'to_tsvector(\'simple\', cast("country" as text))'
u' @@ "query country"',)]
data_dict = {
'q': {'country': 'Brazil'},
}
fields_types = {
'country': 'text',
}
result = self._datastore_search(data_dict=data_dict,
fields_types=fields_types)
assert_equal(result['where'], expected_where)
@helpers.change_config('ckan.datastore.default_fts_lang', 'simple')
def test_fts_where_clause_lang_can_be_overwritten_using_lang_param(self):
expected_where = [(u'to_tsvector(\'french\', cast("country" as text))'
u' @@ "query country"',)]
data_dict = {
'q': {'country': 'Brazil'},
'lang': 'french',
}
fields_types = {
'country': 'text',
}
result = self._datastore_search(data_dict=data_dict,
fields_types=fields_types)
assert_equal(result['where'], expected_where)
@mock.patch('ckanext.datastore.helpers.should_fts_index_field_type')
def test_fts_adds_where_clause_on_full_text_when_querying_non_indexed_fields(self, should_fts_index_field_type):
should_fts_index_field_type.return_value = False
expected_where = [('_full_text @@ "query country"',),
(u'to_tsvector(\'english\', cast("country" as text))'
u' @@ "query country"',)]
data_dict = {
'q': {'country': 'Brazil'},
'lang': 'english',
}
fields_types = {
'country': 'non-indexed field type',
}
result = self._datastore_search(data_dict=data_dict,
fields_types=fields_types)
assert_equal(result['where'], expected_where)
def _datastore_search(self, context={}, data_dict={}, fields_types={}, query_dict={}):
_query_dict = {
'select': [],
'sort': [],
'where': [],
}
_query_dict.update(query_dict)
return DatastorePlugin().datastore_search(context, data_dict,
fields_types, _query_dict)
| NicoVarg99/daf-recipes | ckan/ckan/ckan/ckanext/datastore/tests/test_plugin.py | Python | gpl-3.0 | 7,256 |
from django.test import TestCase
from organization.models.relationship import OrganizationRelationship, \
MilitaryStance
from organization.models.capability import Capability
from organization.models.election import PositionElection, PositionCandidacy
from organization.models.organization import Organization
from world.models.geography import Tile
from character.models import Character
class TestOrganizationModel(TestCase):
fixtures = ["simple_world"]
def test_get_descendants_excluding_self(self):
kingdom = Organization.objects.get(name="Small Kingdom")
descendants = kingdom.get_descendants_list()
self.assertEqual(len(descendants), 3)
self.assertIn(Organization.objects.get(name="Governor of some forest"), descendants)
self.assertIn(Organization.objects.get(name="Governor of some plains"), descendants)
self.assertIn(Organization.objects.get(name="Helper of the governor of some plains"), descendants)
def test_get_descendants_including_self(self):
kingdom = Organization.objects.get(name="Governor of some plains")
descendants = kingdom.get_descendants_list(including_self=True)
self.assertEqual(len(descendants), 2)
self.assertIn(Organization.objects.get(name="Governor of some plains"), descendants)
self.assertIn(Organization.objects.get(name="Helper of the governor of some plains"), descendants)
def test_get_membership_including_descendants(self):
kingdom = Organization.objects.get(name="Governor of some plains")
membership = kingdom.get_membership_including_descendants()
self.assertEqual(len(membership), 1)
self.assertIn(Character.objects.get(id=2), membership)
def test_get_membership_including_descendants2(self):
kingdom = Organization.objects.get(name="Small Kingdom")
membership = kingdom.get_membership_including_descendants()
self.assertEqual(len(membership), 2)
self.assertIn(Character.objects.get(id=1), membership)
self.assertIn(Character.objects.get(id=2), membership)
def test_organizations_character_can_apply_capabilities_to_this_with(self):
king = Character.objects.get(id=1)
king_position = Organization.objects.get(name="Small King")
kingdom = Organization.objects.get(name="Small Kingdom")
result = kingdom.organizations_character_can_apply_capabilities_to_this_with(king, Capability.BAN)
self.assertEqual(len(result), 1)
self.assertIn(king_position, result)
def test_organizations_character_can_apply_capabilities_to_this_with2(self):
king = Character.objects.get(id=2)
kingdom = Organization.objects.get(name="Small Kingdom")
result = kingdom.organizations_character_can_apply_capabilities_to_this_with(king, Capability.BAN)
self.assertEqual(len(result), 0)
def test_organizations_character_can_apply_capabilities_to_this_with3(self):
king = Character.objects.get(id=1)
king_position = Organization.objects.get(name="Small King")
kingdom = Organization.objects.get(name="Small Kingdom")
result = kingdom.organizations_character_can_apply_capabilities_to_this_with(king, Capability.CONSCRIPT)
self.assertEqual(len(result), 2)
self.assertIn(king_position, result)
self.assertIn(kingdom, result)
def test_character_is_member(self):
king = Character.objects.get(id=1)
other_guy = Character.objects.get(id=2)
kingdom = Organization.objects.get(name="Small Kingdom")
self.assertTrue(kingdom.character_is_member(king))
self.assertTrue(kingdom.character_is_member(other_guy))
def test_is_part_of_violence_monopoly(self):
organization = Organization.objects.get(name="Small Kingdom")
self.assertEqual(organization.get_violence_monopoly(), organization)
def test_is_part_of_violence_monopoly2(self):
organization = Organization.objects.get(name="Small King")
result = Organization.objects.get(name="Small Kingdom")
self.assertEqual(organization.get_violence_monopoly(), result)
def test_is_part_of_violence_monopoly3(self):
organization = Organization.objects.get(name="Governor of some plains")
result = Organization.objects.get(name="Small Kingdom")
self.assertEqual(organization.get_violence_monopoly(), result)
def test_is_part_of_violence_monopoly4(self):
organization = Organization.objects.get(name="Helper of the governor of some plains")
result = Organization.objects.get(name="Small Kingdom")
self.assertEqual(organization.get_violence_monopoly(), result)
def test_is_part_of_violence_monopoly5(self):
organization = Organization.objects.get(name="Small Federation")
self.assertIsNone(organization.get_violence_monopoly())
def test_is_part_of_violence_monopoly6(self):
organization = Organization.objects.get(name="President of the Small Federation")
self.assertIsNone(organization.get_violence_monopoly())
def test_is_part_of_violence_monopoly7(self):
organization = Organization.objects.get(name="Horde")
self.assertEqual(organization.get_violence_monopoly(), organization)
def test_get_all_controlled_tiles(self):
organization = Organization.objects.get(name="Small Kingdom")
controlled_tiles = organization.get_all_controlled_tiles()
self.assertEqual(len(controlled_tiles), 2)
self.assertIn(Tile.objects.get(name="Some plains"), controlled_tiles)
self.assertIn(Tile.objects.get(name="Some forest"), controlled_tiles)
def test_get_all_controlled_tiles2(self):
organization = Organization.objects.get(name="Governor of some plains")
controlled_tiles = organization.get_all_controlled_tiles()
self.assertEqual(len(controlled_tiles), 1)
self.assertIn(Tile.objects.get(name="Some plains"), controlled_tiles)
def test_get_all_controlled_tiles3(self):
organization = Organization.objects.get(name="Helper of the governor of some plains")
controlled_tiles = organization.get_all_controlled_tiles()
self.assertEqual(len(controlled_tiles), 0)
def test_external_capabilities_to_this(self):
organization = Organization.objects.get(name="Small King")
external_capabilities = organization.external_capabilities_to_this()
self.assertEqual(len(external_capabilities), 0)
def test_external_capabilities_to_this2(self):
organization = Organization.objects.get(name="Small Kingdom")
external_capabilities = organization.external_capabilities_to_this()
self.assertEqual(len(external_capabilities), 11)
self.assertTrue(external_capabilities.filter(type=Capability.BAN, organization__name="Small King").exists())
def test_get_position_occupier(self):
organization = Organization.objects.get(name="Small King")
self.assertEqual(organization.get_position_occupier().id, 1)
def test_get_position_occupier2(self):
organization = Organization.objects.get(name="Small Kingdom")
self.assertEqual(organization.get_position_occupier(), None)
def test_get_relationship_to(self):
organization1 = Organization.objects.get(name="Small Kingdom")
organization2 = Organization.objects.get(name="Small Commonwealth")
self.assertEqual(organization1.get_relationship_to(organization2).relationship, OrganizationRelationship.PEACE)
def test_get_relationship_from(self):
organization1 = Organization.objects.get(name="Small Kingdom")
organization2 = Organization.objects.get(name="Small King")
self.assertEqual(organization1.get_relationship_from(organization2).relationship, OrganizationRelationship.PEACE)
def test_get_war_relationship_to(self):
organization1 = Organization.objects.get(name="Horde")
organization2 = Organization.objects.get(name="Small Commonwealth")
self.assertEqual(organization1.get_relationship_to(organization2).relationship, OrganizationRelationship.WAR)
def test_get_war_relationship_from(self):
organization1 = Organization.objects.get(name="Small Commonwealth")
organization2 = Organization.objects.get(name="Horde")
self.assertEqual(organization1.get_relationship_from(organization2).relationship, OrganizationRelationship.WAR)
def test_convoke_elections(self):
democracy = Organization.objects.get(name="Small Democracy")
president = democracy.leader
president.convoke_elections()
self.assertEqual(PositionElection.objects.count(), 1)
election = PositionElection.objects.get(id=1)
self.assertEqual(election.position, president)
self.assertEqual(election.turn, 6)
self.assertEqual(election.closed, False)
self.assertEqual(election.winner, None)
self.assertEqual(election.open_candidacies().count(), 1)
self.assertEqual(election.last_turn_to_present_candidacy(), 3)
self.assertEqual(election.can_present_candidacy(), True)
self.assertEqual(election.get_results().count(), 1)
candidacy = PositionCandidacy.objects.get(id=1)
self.assertEqual(candidacy.election, election)
self.assertEqual(candidacy.candidate, president.get_position_occupier())
self.assertEqual(candidacy.retired, False)
def test_get_html_name(self):
for organization in Organization.objects.all():
html_name = organization.get_html_name()
self.assertIn(organization.name, html_name)
self.assertIn(organization.color, html_name)
if organization.get_position_occupier():
self.assertIn(organization.get_position_occupier().name, html_name)
def test_get_default_stances(self):
organization0 = Organization.objects.get(name="Small Democracy")
organization1 = Organization.objects.get(name="Small Kingdom")
organization2 = Organization.objects.get(name="Small Commonwealth")
self.assertEqual(organization0.get_default_stance_to(organization1).get_stance(), MilitaryStance.DEFENSIVE)
self.assertEqual(organization0.get_default_stance_to(organization2).get_stance(), MilitaryStance.DEFENSIVE)
self.assertEqual(organization2.get_default_stance_to(organization0).get_stance(), MilitaryStance.DEFENSIVE)
def test_get_diplomatically_based_default_stances(self):
organization0 = Organization.objects.get(name="Small Democracy")
organization1 = Organization.objects.get(name="Small Kingdom")
relationship = organization0.get_relationship_to(organization1)
relationship.set_relationship(OrganizationRelationship.WAR)
self.assertEqual(organization0.get_default_stance_to(organization1).get_stance(), MilitaryStance.AGGRESSIVE)
relationship.set_relationship(OrganizationRelationship.ALLIANCE)
self.assertEqual(organization0.get_default_stance_to(organization1).get_stance(), MilitaryStance.AVOID_BATTLE)
def test_get_diplomatically_based_default_stances2(self):
organization0 = Organization.objects.get(name="Horde")
organization1 = Organization.objects.get(name="Small Commonwealth")
self.assertEqual(organization0.get_default_stance_to(organization1).get_stance(), MilitaryStance.AGGRESSIVE)
self.assertEqual(organization1.get_default_stance_to(organization0).get_stance(), MilitaryStance.AGGRESSIVE)
def test_specially_set_stances(self):
organization0 = Organization.objects.get(name="Small Democracy")
organization1 = Organization.objects.get(name="Small Kingdom")
relationship = organization0.get_relationship_to(organization1)
relationship.set_relationship(OrganizationRelationship.WAR)
stance = organization0.get_default_stance_to(organization1)
stance.stance_type = MilitaryStance.AVOID_BATTLE
stance.save()
self.assertEqual(organization0.get_default_stance_to(organization1).get_stance(), MilitaryStance.AVOID_BATTLE)
def test_region_stance(self):
organization0 = Organization.objects.get(name="Small Democracy")
organization1 = Organization.objects.get(name="Small Kingdom")
tile = Tile.objects.get(name="Some plains")
stance = MilitaryStance.objects.create(
from_organization=organization0,
to_organization=organization1,
region=tile,
stance_type=MilitaryStance.AGGRESSIVE
)
result = organization0.get_region_stances_to(organization1)
self.assertEqual(result.count(), 1)
self.assertEqual(result[0], stance)
result = organization1.get_region_stances_to(organization0)
self.assertEqual(result.count(), 0)
result = organization0.get_region_stance_to(organization1, tile)
self.assertEqual(result.get_stance(), MilitaryStance.AGGRESSIVE)
result = organization0.get_region_stance_to(organization1, Tile.objects.get(name="Some forest"))
self.assertEqual(result.get_stance(), MilitaryStance.DEFENSIVE)
| jardiacaj/finem_imperii | organization/test/test_organization_model.py | Python | agpl-3.0 | 13,191 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# Odoo, Open Source Management Solution
# This module copyright (C) 2010 Savoir-faire Linux
# (<http://www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import re
import logging
from urllib import quote_plus
import urllib
import unicodedata
import requests
try:
from gitlab3 import GitLab
from gitlab3.exceptions import ResourceNotFound
except ImportError as exc:
# don't fail at load if gitlab module is not available
pass
from openerp import models, fields, api, exceptions
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
from openerp.tools.translate import _
logger = logging.getLogger(__name__)
GITLAB_CI_SETTINGS_URL = '%s/api/v3/projects/%s/services/gitlab-ci'
branch_name_subs = [
(' ', '-'),
(',', '-'),
('.', '-'),
('[', ''),
(']', ''),
('#', ''),
]
def strip_accents(unicode_string):
"""Remove accents and greek letters from string
:param unicode_string: String with possible accents
:type unicode_string: unicode
:return: String of unicode_string without accents
:rtype: unicode
"""
return ''.join(
char for char in unicodedata.normalize('NFD', unicode_string)
if not unicodedata.combining(char)
)
def escape_branch_name(branch_name):
for subs in branch_name_subs:
branch_name = branch_name.replace(*subs)
return urllib.quote_plus(strip_accents(branch_name))
def gitlab_api(func):
"""Decorator for functions which should be overwritten only if
uses_gitlab is enabled in repo.
"""
def gitlab_func(self, *args, **kwargs):
if self.uses_gitlab:
return func(self, *args, **kwargs)
else:
regular_func = getattr(super(RunbotRepo, self), func.func_name)
return regular_func(*args, **kwargs)
return gitlab_func
def get_gitlab_params(base):
mo = re.search(r'([^/]+)(/(\d+))?/([^/]+)/([^/.]+)(\.git)?', base)
if not mo:
return
domain = mo.group(1)
port = mo.group(3)
namespace = mo.group(4)
name = mo.group(5)
prefix = 'http' if base.startswith('http/') else 'https'
if port:
domain += ":%d" % int(port)
domain = "%s://%s" % (prefix, domain)
name = '%s/%s' % (namespace, name)
return domain, name
def get_gitlab_project(base, token, project_id=None):
"""Retrieve gitlab project using either id or name
:param str base: url base of project containing domain and project name
:param str token: gitlab user's token
:param int or None project_id: optional id of project to get
:returns gitlab3.Project: Gitlab Project
:raises exceptions.ValidationError: Repo couldn't be found by name or id
"""
domain, name = get_gitlab_params(base)
gl = GitLab(domain, token)
if project_id:
res = gl.project(project_id)
else:
res = gl.find_project(path_with_namespace=name)
if not res:
raise exceptions.ValidationError(
_('Could not find repo with ') +
(_("id=%d") % project_id if project_id else _("name=%s") % name)
)
return res
def set_gitlab_ci_conf(token, gitlab_url, runbot_domain, repo_id):
if not token:
raise models.except_orm(
_('Error!'),
_('Gitlab repo requires an API token from a user with '
'admin access to repo.')
)
domain, name = get_gitlab_params(gitlab_url.replace(':', '/'))
url = GITLAB_CI_SETTINGS_URL % (domain, quote_plus(name))
project_url = "http://%s/gitlab-ci/%s" % (runbot_domain, repo_id)
data = {
"token": token,
"project_url": project_url,
}
headers = {
"PRIVATE-TOKEN": token,
}
requests.put(url, data=data, headers=headers)
class RunbotRepo(models.Model):
_inherit = "runbot.repo"
uses_gitlab = fields.Boolean(string='Use Gitlab')
mr_only = fields.Boolean(
string="MR Only",
default=True,
help="Build only merge requests and skip regular branches")
sticky_protected = fields.Boolean(
string="Sticky for Protected Branches",
default=True,
help="Set all protected branches on the repository as sticky")
@api.model
def create(self, vals):
repo_id = super(RunbotRepo, self).create(vals)
set_gitlab_ci_conf(
vals.get('token'),
vals.get('name'),
self.domain(),
repo_id.id,
)
return repo_id
@api.multi
def write(self, vals):
super(RunbotRepo, self).write(vals)
set_gitlab_ci_conf(
vals.get('token', self.token),
vals.get('name', self.name),
self.domain(),
self.id,
)
@api.one
@gitlab_api
def github(self, url, payload=None, ignore_errors=False, delete=False):
if payload:
logger.info(
"Wanted to post payload %s at %s" % (url, payload)
)
r = {}
elif delete:
logger.info("Wanted to delete %s" % url)
r = {}
else:
logger.info("Wanted to get %s" % url)
r = {}
return r
@api.one
@gitlab_api
def update(self):
branch_obj = self.env['runbot.branch']
project = get_gitlab_project(self.base, self.token)
merge_requests = project.find_merge_request(
find_all=True
)
# Find new MRs and new builds
for mr in project.find_merge_request(
find_all=True,
cached=merge_requests,
state='opened'):
try:
source_project = get_gitlab_project(
self.base, self.token, mr.source_project_id
)
except ResourceNotFound:
# Origin project no longer exists, ignore
continue
source_branch = source_project.branch(mr.source_branch)
commit = source_branch.commit
sha = commit['id']
date = commit['committed_date']
# TODO: TMP workaround for tzinfo bug
# https://github.com/alexvh/python-gitlab3/issues/15
date.tzinfo.dst = lambda _: None
# In earlier versions of gitlab3, author and committer were a keys
# newer versions have author_name and committer_name
try:
author = commit['author']['name']
except KeyError:
author = commit['author_name']
try:
committer = commit['committer']['name']
except KeyError:
committer = commit['committer_name']
subject = commit['message']
title = mr.title
# Create or get branch
branch_ids = branch_obj.search([
('repo_id', '=', self.id),
('project_id', '=', project.id),
('merge_request_id', '=', mr.iid),
])
if branch_ids:
branch_id = branch_ids[0]
else:
logger.debug('repo %s found new Merge Proposal %s',
self.name, title)
branch_id = branch_obj.create({
'repo_id': self.id,
'name': title,
'project_id': project.id,
'merge_request_id': mr.iid,
})
# Create build (and mark previous builds as skipped) if not found
build_ids = self.env['runbot.build'].search([
('branch_id', '=', branch_id.id),
('name', '=', sha),
])
if not build_ids:
logger.debug(
'repo %s merge request %s new build found commit %s',
branch_id.repo_id.name,
branch_id.name,
sha,
)
self.env['runbot.build'].create({
'branch_id': branch_id.id,
'name': sha,
'author': author,
'committer': committer,
'subject': subject,
'date': date.strftime(DEFAULT_SERVER_DATETIME_FORMAT),
'modules': branch_id.repo_id.modules,
})
# Clean-up old MRs
closed_mrs = list(i.id for i in project.find_merge_request(
find_all=True,
cached=merge_requests,
state='closed'
))
closed_mrs = branch_obj.search([
('merge_request_id', 'in', closed_mrs),
])
for mr in closed_mrs:
mr.unlink()
super(RunbotRepo, self).update()
# Avoid TransactionRollbackError due to serialization issues
self._cr.commit()
self._cr.autocommit(True)
if self.sticky_protected:
# Put all protected branches as sticky
protected_branches = set(b.name for b in project.find_branch(
find_all=True, protected=True)
)
protected_branches.add(project.default_branch)
sticky_protected_branches = branch_obj.search([
('branch_name', 'in', list(protected_branches)),
('sticky', '=', False),
])
sticky_protected_branches.write({'sticky': True})
if self.mr_only:
# Skip non-sticky non-merge proposal builds
branches = branch_obj.search([
('sticky', '=', False),
('repo_id', 'in', self.ids),
('project_id', '=', False),
('merge_request_id', '=', False),
])
for build in self.env['runbot.build'].search([
('branch_id', 'in', branches.ids)]):
build.skip()
| depfac/runbot-addons | runbot_gitlab/runbot_repo.py | Python | agpl-3.0 | 10,666 |
from datetime import datetime
import warnings
from subprocess import Popen, PIPE
import re
import sys
import GRID_LRT.auth.grid_credentials as grid_creds
from GRID_LRT.auth.get_picas_credentials import get_picas_cred
from GRID_LRT import token
from GRID_LRT.Staging.srmlist import srmlist
from GRID_LRT.storage.gsifile import GSIFile
def get_srmdir_from_token_task(token_type, view, key = 'RESULTS_DIR'):
"""Creates a list of files from a set of tokens. Returns a GSIFile object"""
pc=get_picas_cred()
th=Token.Token_Handler(t_type=token_type, uname=pc.user, pwd=pc.password, dbn=pc.database)
tokens=th.list_tokens_from_view(view)
srmdir, OBSID, pipeline_step = None, None, None
for t in tokens: #TODO: Do this with a proper view
if not pipeline_step:
pipeline_step = th.database[t['id']]['pipeline_step']
OBSID = th.database[t['id']]['OBSID']
if srmdir is not None: break
if OBSID:
srmdir_location = str(th.database[t['id']][key])+"/"+pipeline_step+"/"+str(OBSID)
srmdir = GSIFile(srmdir_location)
return srmdir
def make_srmlist_from_srmdir(srmdir):
slist = srmlist()
for i in srmdir.list_dir():
slist.append(i.location)
return slist
| apmechev/GRID_LRT | GRID_LRT/storage/utils.py | Python | gpl-3.0 | 1,259 |
# -*- coding: utf-8 -*-
"""
All unit tests for the newspaper library should be contained in this file.
"""
import sys
import os
import unittest
import time
import codecs
TEST_DIR = os.path.abspath(os.path.dirname(__file__))
PARENT_DIR = os.path.join(TEST_DIR, '..')
# tests is a separate module, insert parent dir manually
sys.path.insert(0, PARENT_DIR)
URLS_FN = os.path.join(TEST_DIR, 'data/100K_urls.txt')
TEXT_FN = os.path.join(TEST_DIR, 'data/body_text')
HTML_FN = os.path.join(TEST_DIR, 'data/html')
import newspaper
from newspaper import Article, Source, ArticleException, news_pool
from newspaper import Config
from newspaper.network import multithread_request
from newspaper.configuration import Configuration
from newspaper.utils.encoding import smart_str, smart_unicode
from newspaper.utils import encodeValue
def print_test(method):
"""utility method for print verbalizing test suite, prints out
time taken for test and functions name, and status"""
def run(*args, **kw):
ts = time.time()
print '\ttesting function %r' % method.__name__
method(*args, **kw)
te = time.time()
print '\t[OK] in %r %2.2f sec' % (method.__name__, te-ts)
return run
def read_urls(base_fn=URLS_FN, amount=100):
"""utility funct which extracts out a listing of sample urls"""
f = codecs.open(base_fn, 'r', 'utf8')
lines = f.readlines()
lines = [l.strip() for l in lines]
return lines[:amount]
class ArticleTestCase(unittest.TestCase):
def runTest(self):
print 'testing article unit'
self.test_url()
self.test_download_html()
self.test_pre_download_parse()
self.test_parse_html()
self.test_pre_parse_nlp()
self.test_nlp_body()
def setUp(self):
"""called before the first test case of this unit begins"""
self.article = Article(
url='http://www.cnn.com/2013/11/27/travel/weather-thanksgiving/index.html?iref=allsearch')
def tearDown(self):
"""called after all test cases finish of this unit"""
pass
@print_test
def test_url(self):
assert self.article.url == u'http://www.cnn.com/2013/11/27/travel/weather-thanksgiving/index.html'
@print_test
def test_download_html(self):
self.article.download()
# can't compare html because it changes on every page as time goes on
assert len(self.article.html) > 5000
@print_test
def test_pre_download_parse(self):
"""before we download an article you should not be parsing!"""
a2 = Article(url='http://www.cnn.com/2013/11/27/travel/weather-thanksgiving/index.html')
def failfunc():
a2.parse()
self.assertRaises(ArticleException, failfunc)
@print_test
def test_parse_html(self):
TOP_IMG = 'http://i2.cdn.turner.com/cnn/dam/assets/131129200805-01-weather-1128-story-top.jpg'
DOMAIN = 'www.cnn.com'
SCHEME = 'http'
AUTHORS = ['Dana Ford', 'Tom Watkins']
TITLE = 'After storm, forecasters see smooth sailing for Thanksgiving'
LEN_IMGS = 47 # list is too big, we just check size of images arr
self.article.parse()
with open(os.path.join(TEST_DIR, 'data/body_example.txt'), 'r') as f:
assert self.article.text == f.read()
assert self.article.top_img == TOP_IMG
assert self.article.authors == AUTHORS
assert self.article.title == TITLE
print 'we now have ', len(self.article.imgs), 'images'
assert len(self.article.imgs) == LEN_IMGS
@print_test
def test_pre_parse_nlp(self):
a2 = Article(url='http://www.cnn.com/2013/11/27/travel/weather-thanksgiving/index.html')
a2.download()
a3 = Article(url='http://www.cnn.com/2013/11/27/travel/weather-thanksgiving/index.html')
def failfunc():
a2.nlp()
def failfunc2():
a3.nlp()
self.assertRaises(ArticleException, failfunc)
self.assertRaises(ArticleException, failfunc2)
@print_test
def test_nlp_body(self):
SUMMARY = """Wish the forecasters were wrong all the time :)"Though the worst of the storm has passed, winds could still pose a problem.\r\nForecasters see mostly smooth sailing into Thanksgiving.\r\nThe forecast has left up in the air the fate of the balloons in Macy's Thanksgiving Day Parade.\r\nThe storm caused some complications and inconveniences, but no major delays or breakdowns.\r\n"That's good news for people like Latasha Abney, who joined the more than 43 million Americans expected by AAA to travel over the Thanksgiving holiday weekend."""
KEYWORDS = [u'great', u'good', u'flight', u'sailing', u'delays', u'smooth', u'thanksgiving',
u'snow', u'weather', u'york', u'storm', u'winds', u'balloons', u'forecasters']
self.article.nlp()
# print self.article.summary
# print self.article.keywords
assert self.article.summary == SUMMARY
assert self.article.keywords == KEYWORDS
class SourceTestCase(unittest.TestCase):
def runTest(self):
print 'testing source unit'
self.source_url_input_none()
self.test_cache_categories()
self.test_source_build()
@print_test
def source_url_input_none(self):
def failfunc():
__ = Source(url=None)
self.assertRaises(Exception, failfunc)
@print_test
def test_source_build(self):
"""
builds a source object, validates it has no errors, prints out
all valid categories and feed urls
"""
DESC = """CNN.com delivers the latest breaking news and information on the latest top stories, weather, business, entertainment, politics, and more. For in-depth coverage, CNN.com provides special reports, video, audio, photo galleries, and interactive guides."""
BRAND = 'cnn'
config = Configuration()
config.verbose = False
s = Source('http://cnn.com', config=config)
s.clean_memo_cache()
s.build()
assert s.brand == BRAND
assert s.description == DESC
# For this test case and a few more, I don't believe you can actually
# assert two values to equal eachother because some values are ever changing.
# Insead, i'm just going to print some stuff out so it is just as easy to take
# a glance and see if it looks OK.
print '\t\tWe have %d articles currently!' % s.size()
print
print '\t\t%s categories are: %s' % (s.url, str(s.category_urls()))
# We are printing the contents of a source instead of
# assert checking because the results are always varying
# s.print_summary()
@print_test
def test_cache_categories(self):
"""
builds two same source objects in a row examines speeds of both
"""
s = Source('http://yahoo.com')
s.download()
s.parse()
s.set_categories()
saved_urls = s.category_urls()
s.categories = [] # reset and try again with caching
s.set_categories()
assert sorted(s.category_urls()) == sorted(saved_urls)
class UrlTestCase(unittest.TestCase):
def runTest(self):
print 'testing url unit'
self.test_valid_urls()
@print_test
def test_valid_urls(self):
"""
prints out a list of urls with our heuristic guess if it is a
valid news url purely based on the url
"""
from newspaper.urls import valid_url
with open(os.path.join(TEST_DIR, 'data/test_urls.txt'), 'r') as f:
lines = f.readlines()
test_tuples = [tuple(l.strip().split(' ')) for l in lines]
# tuples are ('1', 'url_goes_here') form, '1' means valid, '0' otherwise
for tup in test_tuples:
lst = int(tup[0])
url = tup[1]
assert len(tup) == 2
truth_val = True if lst == 1 else False
try:
assert truth_val == valid_url(url, test=True)
except AssertionError, e:
print '\t\turl: %s is supposed to be %s' % (url, truth_val)
raise
@print_test
def test_prepare_url(self):
"""
normalizes a url, removes arguments, hashtags. If a relative url, it
merges it with the source domain to make an abs url, etc
"""
pass
class APITestCase(unittest.TestCase):
def runTest(self):
print 'testing API unit'
# self.test_source_build()
self.test_article_build()
self.test_hot_trending()
self.test_popular_urls()
@print_test
def test_source_build(self):
huff_paper = newspaper.build('http://www.huffingtonpost.com/', dry=True)
assert isinstance(huff_paper, Source) == True
@print_test
def test_article_build(self):
url = 'http://abcnews.go.com/blogs/politics/2013/12/states-cite-surge-in-obamacare-sign-ups-ahead-of-first-deadline/'
article = newspaper.build_article(url)
assert isinstance(article, Article) == True
article.download()
article.parse()
article.nlp()
# print article.title
# print article.summary
# print article.keywords
@print_test
def test_hot_trending(self):
"""
grab google trending, just make sure this runs
"""
newspaper.hot()
@print_test
def test_popular_urls(self):
"""
just make sure this runs
"""
newspaper.popular_urls()
class EncodingTestCase(unittest.TestCase):
def runTest(self):
self.uni_string = u"∆ˆˆø∆ßåßlucas yang˜"
self.normal_string = "∆ƒˆƒ´´lucas yang"
self.test_encode_val()
self.test_smart_unicode()
self.test_smart_str()
@print_test
def test_encode_val(self):
assert encodeValue(self.uni_string) == self.uni_string
assert encodeValue(self.normal_string) == u'∆ƒˆƒ´´lucas yang'
@print_test
def test_smart_unicode(self):
assert smart_unicode(self.uni_string) == self.uni_string
assert smart_unicode(self.normal_string) == u'∆ƒˆƒ´´lucas yang'
@print_test
def test_smart_str(self):
assert smart_str(self.uni_string) == "∆ˆˆø∆ßåßlucas yang˜"
assert smart_str(self.normal_string) == "∆ƒˆƒ´´lucas yang"
class MThreadingTestCase(unittest.TestCase):
def runTest(self):
self.test_download_works()
@print_test
def test_download_works(self):
"""
"""
config = Configuration()
config.memoize_articles = False
slate_paper = newspaper.build('http://slate.com', config)
tc_paper = newspaper.build('http://techcrunch.com', config)
espn_paper = newspaper.build('http://espn.com', config)
print 'slate has %d articles tc has %d articles espn has %d articles' \
% (slate_paper.size(), tc_paper.size(), espn_paper.size())
papers = [slate_paper, tc_paper, espn_paper]
news_pool.set(papers, threads_per_source=2)
news_pool.join()
print 'Downloaded slate mthread len', len(slate_paper.articles[0].html)
print 'Downloaded espn mthread len', len(espn_paper.articles[-1].html)
print 'Downloaded tc mthread len', len(tc_paper.articles[1].html)
class ConfigBuildTestCase(unittest.TestCase):
def runTest(self):
self.test_config_build()
@print_test
def test_config_build(self):
"""
Test if our **kwargs to config building setup actually works.
"""
a = Article(url='http://www.cnn.com/2013/11/27/travel/weather-thanksgiving/index.html')
assert a.config.language == 'en'
assert a.config.memoize_articles == True
assert a.config.use_meta_language == True
a = Article(url='http://www.cnn.com/2013/11/27/travel/weather-thanksgiving/index.html',
language='zh', memoize_articles=False)
assert a.config.language == 'zh'
assert a.config.memoize_articles == False
assert a.config.use_meta_language == False
s = Source(url='http://cnn.com')
assert s.config.language == 'en'
assert s.config.MAX_FILE_MEMO == 20000
assert s.config.memoize_articles == True
assert s.config.use_meta_language == True
s = Source(url="http://cnn.com", memoize_articles=False,
MAX_FILE_MEMO=10000, language='en')
assert s.config.memoize_articles == False
assert s.config.MAX_FILE_MEMO == 10000
assert s.config.language == 'en'
assert s.config.use_meta_language == False
s = newspaper.build('http://cnn.com', dry=True)
assert s.config.language == 'en'
assert s.config.MAX_FILE_MEMO == 20000
assert s.config.memoize_articles == True
assert s.config.use_meta_language == True
s = newspaper.build('http://cnn.com', dry=True, memoize_articles=False,
MAX_FILE_MEMO=10000, language='zh')
assert s.config.language == 'zh'
assert s.config.MAX_FILE_MEMO == 10000
assert s.config.memoize_articles == False
assert s.config.use_meta_language == False
class MultiLanguageTestCase(unittest.TestCase):
def runTest(self):
self.test_chinese_fulltext_extract()
self.test_arabic_fulltext_extract()
self.test_spanish_fulltext_extract()
@print_test
def test_chinese_fulltext_extract(self):
url = 'http://www.bbc.co.uk/zhongwen/simp/chinese_news/2012/12/121210_hongkong_politics.shtml'
article = Article(url=url, language='zh')
article.download()
article.parse()
with codecs.open(os.path.join(TEXT_FN, 'chinese_text_1.txt'), 'r', 'utf8') as f:
assert article.text == f.read()
# with codecs.open(os.path.join(HTML_FN, 'chinese_html_1.html'), 'w', 'utf8') as f:
# f.write(article.html)
@print_test
def test_arabic_fulltext_extract(self):
url = 'http://arabic.cnn.com/2013/middle_east/8/3/syria.clashes/index.html'
article = Article(url=url, language='ar')
article.download()
article.parse()
with codecs.open(os.path.join(TEXT_FN, 'arabic_text_1.txt'), 'r', 'utf8') as f:
assert article.text == f.read()
# with codecs.open(os.path.join(HTML_FN, 'arabic_html_1.html'), 'w', 'utf8') as f:
# f.write(article.html)
@print_test
def test_spanish_fulltext_extract(self):
url = 'http://ultimahora.es/mallorca/noticia/noticias/local/fiscalia-anticorrupcion-estudia-recurre-imputacion-infanta.html'
article = Article(url=url, language='es')
article.download()
article.parse()
with codecs.open(os.path.join(TEXT_FN, 'spanish_text_1.txt'), 'r', 'utf8') as f:
assert article.text == f.read()
# with codecs.open(os.path.join(HTML_FN, 'spanish_html_1.html'), 'w', 'utf8') as f:
# f.write(article.html)
if __name__ == '__main__':
# unittest.main() # run all units and their cases
suite = unittest.TestSuite()
suite.addTest(ConfigBuildTestCase())
# suite.addTest(MThreadingTestCase())
suite.addTest(MultiLanguageTestCase())
suite.addTest(SourceTestCase())
suite.addTest(EncodingTestCase())
suite.addTest(UrlTestCase())
suite.addTest(ArticleTestCase())
suite.addTest(APITestCase())
unittest.TextTestRunner().run(suite) # run custom subset
| cantino/newspaper | tests/unit_tests.py | Python | mit | 15,574 |
"""The tests for the Unifi WAP device tracker platform."""
from collections import deque
from copy import copy
from unittest.mock import Mock
from datetime import timedelta
import pytest
from aiounifi.clients import Clients, ClientsAll
from aiounifi.devices import Devices
from homeassistant import config_entries
from homeassistant.components import unifi
from homeassistant.components.unifi.const import (
CONF_CONTROLLER,
CONF_SITE_ID,
UNIFI_CONFIG,
)
from homeassistant.const import (
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
CONF_USERNAME,
CONF_VERIFY_SSL,
STATE_UNAVAILABLE,
)
from homeassistant.helpers import entity_registry
from homeassistant.setup import async_setup_component
import homeassistant.components.device_tracker as device_tracker
import homeassistant.components.unifi.device_tracker as unifi_dt
import homeassistant.util.dt as dt_util
DEFAULT_DETECTION_TIME = timedelta(seconds=300)
CLIENT_1 = {
"essid": "ssid",
"hostname": "client_1",
"ip": "10.0.0.1",
"is_wired": False,
"last_seen": 1562600145,
"mac": "00:00:00:00:00:01",
}
CLIENT_2 = {
"hostname": "client_2",
"ip": "10.0.0.2",
"is_wired": True,
"last_seen": 1562600145,
"mac": "00:00:00:00:00:02",
"name": "Wired Client",
}
CLIENT_3 = {
"essid": "ssid2",
"hostname": "client_3",
"ip": "10.0.0.3",
"is_wired": False,
"last_seen": 1562600145,
"mac": "00:00:00:00:00:03",
}
DEVICE_1 = {
"board_rev": 3,
"device_id": "mock-id",
"has_fan": True,
"fan_level": 0,
"ip": "10.0.1.1",
"last_seen": 1562600145,
"mac": "00:00:00:00:01:01",
"model": "US16P150",
"name": "device_1",
"overheating": False,
"type": "usw",
"upgradable": False,
"version": "4.0.42.10433",
}
DEVICE_2 = {
"board_rev": 3,
"device_id": "mock-id",
"has_fan": True,
"ip": "10.0.1.1",
"mac": "00:00:00:00:01:01",
"model": "US16P150",
"name": "device_1",
"type": "usw",
"version": "4.0.42.10433",
}
CONTROLLER_DATA = {
CONF_HOST: "mock-host",
CONF_USERNAME: "mock-user",
CONF_PASSWORD: "mock-pswd",
CONF_PORT: 1234,
CONF_SITE_ID: "mock-site",
CONF_VERIFY_SSL: True,
}
ENTRY_CONFIG = {CONF_CONTROLLER: CONTROLLER_DATA}
CONTROLLER_ID = unifi.CONTROLLER_ID.format(host="mock-host", site="mock-site")
@pytest.fixture
def mock_controller(hass):
"""Mock a UniFi Controller."""
hass.data[UNIFI_CONFIG] = {}
controller = unifi.UniFiController(hass, None)
controller.api = Mock()
controller.mock_requests = []
controller.mock_client_responses = deque()
controller.mock_device_responses = deque()
controller.mock_client_all_responses = deque()
async def mock_request(method, path, **kwargs):
kwargs["method"] = method
kwargs["path"] = path
controller.mock_requests.append(kwargs)
if path == "s/{site}/stat/sta":
return controller.mock_client_responses.popleft()
if path == "s/{site}/stat/device":
return controller.mock_device_responses.popleft()
if path == "s/{site}/rest/user":
return controller.mock_client_all_responses.popleft()
return None
controller.api.clients = Clients({}, mock_request)
controller.api.devices = Devices({}, mock_request)
controller.api.clients_all = ClientsAll({}, mock_request)
return controller
async def setup_controller(hass, mock_controller):
"""Load the UniFi switch platform with the provided controller."""
hass.config.components.add(unifi.DOMAIN)
hass.data[unifi.DOMAIN] = {CONTROLLER_ID: mock_controller}
config_entry = config_entries.ConfigEntry(
1,
unifi.DOMAIN,
"Mock Title",
ENTRY_CONFIG,
"test",
config_entries.CONN_CLASS_LOCAL_POLL,
entry_id=1,
)
mock_controller.config_entry = config_entry
await mock_controller.async_update()
await hass.config_entries.async_forward_entry_setup(
config_entry, device_tracker.DOMAIN
)
await hass.async_block_till_done()
async def test_platform_manually_configured(hass):
"""Test that we do not discover anything or try to set up a bridge."""
assert (
await async_setup_component(
hass, device_tracker.DOMAIN, {device_tracker.DOMAIN: {"platform": "unifi"}}
)
is True
)
assert unifi.DOMAIN not in hass.data
async def test_no_clients(hass, mock_controller):
"""Test the update_clients function when no clients are found."""
mock_controller.mock_client_responses.append({})
mock_controller.mock_device_responses.append({})
await setup_controller(hass, mock_controller)
assert len(mock_controller.mock_requests) == 2
assert len(hass.states.async_all()) == 2
async def test_tracked_devices(hass, mock_controller):
"""Test the update_items function with some clients."""
mock_controller.mock_client_responses.append([CLIENT_1, CLIENT_2, CLIENT_3])
mock_controller.mock_device_responses.append([DEVICE_1, DEVICE_2])
mock_controller.unifi_config = {unifi_dt.CONF_SSID_FILTER: ["ssid"]}
await setup_controller(hass, mock_controller)
assert len(mock_controller.mock_requests) == 2
assert len(hass.states.async_all()) == 5
client_1 = hass.states.get("device_tracker.client_1")
assert client_1 is not None
assert client_1.state == "not_home"
client_2 = hass.states.get("device_tracker.wired_client")
assert client_2 is not None
assert client_2.state == "not_home"
client_3 = hass.states.get("device_tracker.client_3")
assert client_3 is None
device_1 = hass.states.get("device_tracker.device_1")
assert device_1 is not None
assert device_1.state == "not_home"
client_1_copy = copy(CLIENT_1)
client_1_copy["last_seen"] = dt_util.as_timestamp(dt_util.utcnow())
device_1_copy = copy(DEVICE_1)
device_1_copy["last_seen"] = dt_util.as_timestamp(dt_util.utcnow())
mock_controller.mock_client_responses.append([client_1_copy])
mock_controller.mock_device_responses.append([device_1_copy])
await mock_controller.async_update()
await hass.async_block_till_done()
client_1 = hass.states.get("device_tracker.client_1")
assert client_1.state == "home"
device_1 = hass.states.get("device_tracker.device_1")
assert device_1.state == "home"
device_1_copy = copy(DEVICE_1)
device_1_copy["disabled"] = True
mock_controller.mock_client_responses.append({})
mock_controller.mock_device_responses.append([device_1_copy])
await mock_controller.async_update()
await hass.async_block_till_done()
device_1 = hass.states.get("device_tracker.device_1")
assert device_1.state == STATE_UNAVAILABLE
async def test_restoring_client(hass, mock_controller):
"""Test the update_items function with some clients."""
mock_controller.mock_client_responses.append([CLIENT_2])
mock_controller.mock_device_responses.append({})
mock_controller.mock_client_all_responses.append([CLIENT_1])
mock_controller.unifi_config = {unifi.CONF_BLOCK_CLIENT: True}
registry = await entity_registry.async_get_registry(hass)
registry.async_get_or_create(
device_tracker.DOMAIN,
unifi_dt.UNIFI_DOMAIN,
"{}-mock-site".format(CLIENT_1["mac"]),
suggested_object_id=CLIENT_1["hostname"],
config_entry_id=1,
)
registry.async_get_or_create(
device_tracker.DOMAIN,
unifi_dt.UNIFI_DOMAIN,
"{}-mock-site".format(CLIENT_2["mac"]),
suggested_object_id=CLIENT_2["hostname"],
config_entry_id=1,
)
await setup_controller(hass, mock_controller)
assert len(mock_controller.mock_requests) == 3
assert len(hass.states.async_all()) == 4
device_1 = hass.states.get("device_tracker.client_1")
assert device_1 is not None
async def test_dont_track_clients(hass, mock_controller):
"""Test dont track clients config works."""
mock_controller.mock_client_responses.append([CLIENT_1])
mock_controller.mock_device_responses.append([DEVICE_1])
mock_controller.unifi_config = {unifi.CONF_DONT_TRACK_CLIENTS: True}
await setup_controller(hass, mock_controller)
assert len(mock_controller.mock_requests) == 2
assert len(hass.states.async_all()) == 3
client_1 = hass.states.get("device_tracker.client_1")
assert client_1 is None
device_1 = hass.states.get("device_tracker.device_1")
assert device_1 is not None
assert device_1.state == "not_home"
async def test_dont_track_devices(hass, mock_controller):
"""Test dont track devices config works."""
mock_controller.mock_client_responses.append([CLIENT_1])
mock_controller.mock_device_responses.append([DEVICE_1])
mock_controller.unifi_config = {unifi.CONF_DONT_TRACK_DEVICES: True}
await setup_controller(hass, mock_controller)
assert len(mock_controller.mock_requests) == 2
assert len(hass.states.async_all()) == 3
client_1 = hass.states.get("device_tracker.client_1")
assert client_1 is not None
assert client_1.state == "not_home"
device_1 = hass.states.get("device_tracker.device_1")
assert device_1 is None
async def test_dont_track_wired_clients(hass, mock_controller):
"""Test dont track wired clients config works."""
mock_controller.mock_client_responses.append([CLIENT_1, CLIENT_2])
mock_controller.mock_device_responses.append({})
mock_controller.unifi_config = {unifi.CONF_DONT_TRACK_WIRED_CLIENTS: True}
await setup_controller(hass, mock_controller)
assert len(mock_controller.mock_requests) == 2
assert len(hass.states.async_all()) == 3
client_1 = hass.states.get("device_tracker.client_1")
assert client_1 is not None
assert client_1.state == "not_home"
client_2 = hass.states.get("device_tracker.client_2")
assert client_2 is None
| fbradyirl/home-assistant | tests/components/unifi/test_device_tracker.py | Python | apache-2.0 | 9,934 |
from optparse import make_option
from django.conf import settings
from django.core.management.commands.runserver import Command as RunserverCommand
from django.contrib.staticfiles.handlers import StaticFilesHandler
class Command(RunserverCommand):
option_list = RunserverCommand.option_list + (
make_option('--nostatic', action="store_false", dest='use_static_handler', default=True,
help='Tells Django to NOT automatically serve static files at STATIC_URL.'),
make_option('--insecure', action="store_true", dest='insecure_serving', default=False,
help='Allows serving static files even if DEBUG is False.'),
)
help = "Starts a lightweight Web server for development and also serves static files."
def get_handler(self, *args, **options):
"""
Returns the static files serving handler wrapping the default handler,
if static files should be served. Otherwise just returns the default
handler.
"""
handler = super(Command, self).get_handler(*args, **options)
use_static_handler = options.get('use_static_handler', True)
insecure_serving = options.get('insecure_serving', False)
if use_static_handler and (settings.DEBUG or insecure_serving):
return StaticFilesHandler(handler)
return handler
| 912/M-new | virtualenvironment/experimental/lib/python2.7/site-packages/django/contrib/staticfiles/management/commands/runserver.py | Python | gpl-2.0 | 1,344 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RIgraph(RPackage):
"""Network Analysis and Visualization
Routines for simple graphs and network analysis. It can handle large
graphs very well and provides functions for generating random and regular
graphs, graph visualization, centrality methods and much more."""
homepage = "https://igraph.org/"
url = "https://cloud.r-project.org/src/contrib/igraph_1.0.1.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/igraph"
version('1.2.6', sha256='640da72166fda84bea2c0e5eee374f1ed80cd9439c1171d056b1b1737ae6c76d')
version('1.2.4.1', sha256='891acc763b5a4a4a245358a95dee69280f4013c342f14dd6a438e7bb2bf2e480')
version('1.2.4', sha256='1048eb26ab6b592815bc269c1d91e974c86c9ab827ccb80ae0a40042019592cb')
version('1.1.2', sha256='89b16b41bc77949ea208419e52a18b78b5d418c7fedc52cd47d06a51a6e746ec')
version('1.0.1', sha256='dc64ed09b8b5f8d66ed4936cde3491974d6bc5178dd259b6eab7ef3936aa5602')
depends_on('r-magrittr', type=('build', 'run'))
depends_on('r-matrix', type=('build', 'run'))
depends_on('r-pkgconfig@2.0.0:', type=('build', 'run'))
depends_on('r-irlba', when='@:1.1.9', type=('build', 'run'))
depends_on('gmp')
depends_on('libxml2')
depends_on('glpk', when='@1.2.0:')
| LLNL/spack | var/spack/repos/builtin/packages/r-igraph/package.py | Python | lgpl-2.1 | 1,494 |
"""`featurization.py` - a collection of methods for featurizing."""
import logging
from typing import List
__author__ = ['Todd Cook <todd.g.cook@gmail.com>']
__license__ = 'MIT License'
LOG = logging.getLogger(__name__)
LOG.addHandler(logging.NullHandler())
def word_to_features(word: str, max_word_length: int = 20) -> List[int]:
"""
:param word: a single word
:param max_word_length: the maximum word length for the feature array
:return: A list of ordinal integers mapped to each character and padded to the max word length.
>>> word_to_features('far')
[114, 97, 102, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32]
>>> word_to_features('far', 5)
[114, 97, 102, 32, 32]
"""
if len(word) > max_word_length:
LOG.warning('Excessive word length {} for {}, truncating to {}'.format(len(word), word,
max_word_length))
word = word[:max_word_length]
wordlist = list(word)
wordlist.reverse()
return [ord(c) for c in "".join(wordlist).ljust(max_word_length, ' ')]
| LBenzahia/cltk | cltk/utils/featurization.py | Python | mit | 1,131 |
import logging
import urlparse
import sys
try:
from io import BytesIO # python 3
except ImportError:
from cStringIO import StringIO as BytesIO # python 2
from tornado import escape
from xudd.actor import Actor, super_init
_log = logging.getLogger(__name__)
class WSGI(Actor):
def __init__(self, hive, id, app=None):
super(WSGI, self).__init__(hive, id)
self.message_routing.update({
'handle_request': self.handle_request,
'set_app': self.set_app
})
self.wsgi_app = app
def set_app(self, message):
'''
Set the WSGI backend app.
Expects:
body: {
app: <WSGI app>
}
'''
self.wsgi_app = message.body['app']
def handle_request(self, message):
_log.info('Got request')
_log.debug('message body: {0}'.format(message.body))
options = message.body.get('options')
uri_parts = urlparse.urlparse(''.join([
'http://fake.example',
options.get('uri')
]))
environ = {
'REQUEST_METHOD': options.get('method'),
'SCRIPT_NAME': '',
'PATH_INFO': escape.url_unescape(uri_parts.path),
'QUERY_STRING': uri_parts.query,
"REMOTE_ADDR": options.get('remote_ip'),
"SERVER_NAME": options.get('server_name'),
"SERVER_PORT": str(options.get('port')),
"SERVER_PROTOCOL": options.get('version'),
"wsgi.version": (1, 0),
"wsgi.url_scheme": 'http',
"wsgi.input": BytesIO(escape.utf8(message.body.get('body'))),
"wsgi.errors": sys.stderr,
"wsgi.multithread": False,
"wsgi.multiprocess": False,
"wsgi.run_once": False,
}
if "Content-Type" in options.get('headers'):
environ["CONTENT_TYPE"] = options.get('headers').pop("Content-Type")
if "Content-Length" in options.get('headers'):
environ["CONTENT_LENGTH"] = options.get('headers').pop("Content-Length")
for key, value in options.get('headers').items():
environ["HTTP_" + key.replace("-", "_").upper()] = value
response = []
data = {
'status': 200,
'headers': {}}
def start_response(status, response_headers, exc_info=None):
data['status'] = status
data['headers'] = response_headers
return response.append
app_return_value = self.wsgi_app(environ, start_response)
if app_return_value is not None:
try:
response_iterator = iter(app_return_value)
for i in response_iterator:
response.append(i)
except TypeError as exc:
_log.error('response: {0}; error: {1}'.format(
response_iterator,
traceback.format_exc()))
_log.info('response: {0}'.format(response))
message.reply(
directive='respond',
body={
'response': 'HTTP/1.1 {status}\r\n{headers}\r\n\r\n{body}'.format(
status=data.get('status'),
headers='\r\n'.join(
[': '.join(i) for i in data.get('headers')]),
body=''.join(response))
})
| xudd/xudd | xudd/lib/wsgi.py | Python | apache-2.0 | 3,382 |
#!/usr/bin/python
#----------------------------------------------------------------
# pyRoute, a routing program for OpenStreetMap-style data
#
#------------------------------------------------------
# Usage:
# pyroute.py [input OSM file] [start node] [end node]
#------------------------------------------------------
# Copyright 2007, Oliver White
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#------------------------------------------------------
# Changelog:
# 2007-11-03 OJW Created
#------------------------------------------------------
import sys
import cairo
import math
from xml.sax import make_parser, handler
class GetRoutes(handler.ContentHandler):
"""Parse an OSM file looking for routing information, and do routing with it"""
def __init__(self):
"""Initialise an OSM-file parser"""
self.routing = {}
self.nodes = {}
self.minLon = 180
self.minLat = 90
self.maxLon = -180
self.maxLat = -90
def startElement(self, name, attrs):
"""Handle XML elements"""
if name in('node','way','relation'):
if name == 'node':
"""Nodes need to be stored"""
id = int(attrs.get('id'))
lat = float(attrs.get('lat'))
lon = float(attrs.get('lon'))
self.nodes[id] = (lat,lon)
if lon < self.minLon:
self.minLon = lon
if lat < self.minLat:
self.minLat = lat
if lon > self.maxLon:
self.maxLon = lon
if lat > self.maxLat:
self.maxLat = lat
self.tags = {}
self.waynodes = []
elif name == 'nd':
"""Nodes within a way -- add them to a list"""
self.waynodes.append(int(attrs.get('ref')))
elif name == 'tag':
"""Tags - store them in a hash"""
k,v = (attrs.get('k'), attrs.get('v'))
if not k in ('created_by'):
self.tags[k] = v
def endElement(self, name):
"""Handle ways in the OSM data"""
if name == 'way':
last = -1
highway = self.tags.get('highway', '')
railway = self.tags.get('railway', '')
oneway = self.tags.get('oneway', '')
reversible = not oneway in('yes','true','1')
cyclable = highway in ('primary','secondary','tertiary','unclassified','minor','cycleway','residential', 'service')
if cyclable:
for i in self.waynodes:
if last != -1:
#print "%d -> %d & v.v." % (last, i)
self.addLink(last, i)
if reversible:
self.addLink(i, last)
last = i
def addLink(self,fr,to):
"""Add a routeable edge to the scenario"""
# Look for existing
try:
if to in self.routing[fr]:
#print "duplicate %d from %d" % (to,fr)
return
# Try to add to list. If list doesn't exist, create it
self.routing[fr].append(to)
except KeyError:
self.routing[fr] = [to]
def initProj(self,w,h, lat,lon, scale=1):
"""Setup an image coordinate system"""
self.w = w
self.h = h
self.clat = lat
self.clon = lon
self.dlat = (self.maxLat - self.minLat) / scale
self.dlon = (self.maxLon - self.minLon) / scale
def project(self, lat, lon):
"""Convert from lat/long to image coordinates"""
x = self.w * (0.5 + 0.5 * (lon - self.clon) / (0.5 * self.dlon))
y = self.h * (0.5 - 0.5 * (lat - self.clat) / (0.5 * self.dlat))
return(x,y)
def markNode(self,node,r,g,b):
"""Mark a node on the map"""
self.ctx.set_source_rgb(r,g,b)
lat = self.nodes[node][0]
lon = self.nodes[node][1]
x,y = self.project(lat,lon)
self.ctx.arc(x,y,2, 0,2*3.14)
self.ctx.fill()
def markLine(self,n1,n2,r,g,b):
"""Draw a line on the map between two nodes"""
self.ctx.set_source_rgba(r,g,b,0.3)
lat = self.nodes[n1][0]
lon = self.nodes[n1][1]
x,y = self.project(lat,lon)
self.ctx.move_to(x,y)
lat = self.nodes[n2][0]
lon = self.nodes[n2][1]
x,y = self.project(lat,lon)
self.ctx.line_to(x,y)
self.ctx.stroke()
def distance(self,n1,n2):
"""Calculate distance between two nodes"""
lat1 = self.nodes[n1][0]
lon1 = self.nodes[n1][1]
lat2 = self.nodes[n2][0]
lon2 = self.nodes[n2][1]
# TODO: projection issues
dlat = lat2 - lat1
dlon = lon2 - lon1
dist2 = dlat * dlat + dlon * dlon
return(math.sqrt(dist2))
def doRouting(self, routeFrom, routeTo):
"""Wrapper around the routing function, which creates the output image, etc"""
size = 800
scalemap = 5 # the bigger this is, the more the map zooms-in
# Centre the map halfway between start and finish
ctrLat = (self.nodes[routeFrom][0] + self.nodes[routeTo][0]) / 2
ctrLon = (self.nodes[routeFrom][1] + self.nodes[routeTo][1]) / 2
self.initProj(size, size, ctrLat, ctrLon, scalemap)
surface = cairo.ImageSurface(cairo.FORMAT_RGB24, self.w, self.h)
self.ctx = cairo.Context(surface)
# Dump all the nodes onto the map, to give the routes some context
self.ctx.set_source_rgb(1.0, 0.0, 0.0)
self.ctx.set_line_cap(cairo.LINE_CAP_ROUND)
for id,n in self.nodes.items():
x,y = self.project(n[0], n[1])
self.ctx.move_to(x,y)
self.ctx.line_to(x,y)
self.ctx.stroke()
# Do the routing itself
self.doRoute(routeFrom, routeTo)
# Highlight which nodes were the start and end
self.markNode(routeFrom,1,1,1)
self.markNode(routeTo,1,1,0)
# Image is complete
surface.write_to_png("output.png")
def doRoute(self,start,end):
"""Do the routing"""
self.searchEnd = end
closed = [start]
self.queue = []
# Start by queueing all outbound links from the start node
blankQueueItem = {'end':-1,'distance':0,'nodes':str(start)}
for i in self.routing[start]:
self.addToQueue(start,i, blankQueueItem)
# Limit for how long it will search (also useful for debugging step-by-step)
maxSteps = 10000
while maxSteps > 0:
maxSteps = maxSteps - 1
try:
nextItem = self.queue.pop(0)
except IndexError:
print "Failed to find any route"
return
x = nextItem['end']
if x in closed:
continue
self.markNode(x,0,0,1)
if x == end:
print "Success!"
self.printRoute(nextItem)
return
closed.append(x)
try:
for i in self.routing[x]:
if not i in closed:
self.addToQueue(x,i,nextItem)
except KeyError:
pass
else:
self.debugQueue()
def debugQueue(self):
"""Display some information about the state of our queue"""
print "Queue now %d items long" % len(self.queue)
# Display on map
for i in self.queue:
self.markNode(i['end'],0,0.5,0)
def printRoute(self,item):
"""Output stage, for printing the route once found"""
# Route is stored as text initially. Split into a list
print "Route: %s" % item['nodes']
listNodes = [int(i) for i in item['nodes'].split(",")]
# Display the route on the map
last = -1
for i in listNodes:
if last != -1:
self.markLine(last,i,0.5,1.0,0.5)
self.markNode(i,0.5,1.0,0.5)
last = i
# Send the route to an OSM file
fout = open("route.osm", "w")
fout.write("<?xml version='1.0' encoding='UTF-8'?>");
fout.write("<osm version='0.5' generator='route.py'>");
for i in listNodes:
fout.write("<node id='%d' lat='%f' lon='%f'>\n</node>\n" % ( \
i,
self.nodes[i][0],
self.nodes[i][1]))
fout.write("<way id='1'>\n")
for i in listNodes:
fout.write("<nd ref='%d' lat='%f' lon='%f' />\n" % ( \
i,
self.nodes[i][0],
self.nodes[i][0]))
fout.write("</way>\n")
fout.write("</osm>")
fout.close()
def addToQueue(self,start,end, queueSoFar):
"""Add another potential route to the queue"""
# If already in queue
for test in self.queue:
if test['end'] == end:
return
distance = self.distance(start, end)
# Create a hash for all the route's attributes
queueItem = {}
queueItem['distance'] = queueSoFar['distance'] + distance
queueItem['maxdistance'] = queueItem['distance'] + self.distance(end, self.searchEnd)
queueItem['nodes'] = queueSoFar['nodes'] + ","+str(end)
queueItem['end'] = end
# Try to insert, keeping the queue ordered by decreasing worst-case distance
count = 0
for test in self.queue:
if test['maxdistance'] > queueItem['maxdistance']:
self.queue.insert(count,queueItem)
break
count = count + 1
else:
self.queue.append(queueItem)
# Show on the map
self.markLine(start,end,0.5,0.5,0.5)
# Parse the supplied OSM file
print "Loading data..."
obj = GetRoutes()
parser = make_parser()
parser.setContentHandler(obj)
parser.parse(sys.argv[1])
print "Routing..."
# Do routing between the two specified nodes
obj.doRouting(int(sys.argv[2]), int(sys.argv[3]))
| ftrimble/route-grower | pyroute/pyroute.py | Python | apache-2.0 | 9,473 |
''' Create empty schemas in Postgres database that will subsequently be needed to store tables. '''
from util import cred # load SQL credentials
from util.SQL_helpers import connect_to_db
def main():
# Create empty schemas
sqlcmd_createschemas = '''
CREATE SCHEMA lookup;
CREATE SCHEMA noble;
CREATE SCHEMA kipp_nj;
CREATE SCHEMA common;'''
with connect_to_db(cred.host, cred.user, cred.pw, cred.dbname) as conn: # use context managed db connection
with conn.cursor() as cur:
cur.execute(sqlcmd_createschemas)
cur.connection.commit()
if __name__ == '__main__':
main() | dssg/education-college-public | code/etl/uploaders/create_schemas.py | Python | mit | 606 |
##
#
# Copyright 2012-2013 Ghent University
#
# This file is part of vsc-base,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/vsc-base
#
# vsc-base is free software: you can redistribute it and/or modify
# it under the terms of the GNU Library General Public License as
# published by the Free Software Foundation, either version 2 of
# the License, or (at your option) any later version.
#
# vsc-base is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public License
# along with vsc-base. If not, see <http://www.gnu.org/licenses/>.
##
"""
@author: Toon Willems (Ghent University)
"""
from pkgutil import extend_path
# we're not the only ones in this namespace
__path__ = extend_path(__path__, __name__) #@ReservedAssignment
| stdweird/vsc-manage | test/__init__.py | Python | gpl-2.0 | 1,339 |
import unittest
import mock
import numpy
import chainer
from chainer import cuda
from chainer import functions
from chainer import testing
from chainer.testing import attr
if cuda.available:
cuda.init()
class TestInceptionBackward(unittest.TestCase):
in_channels = 3
out1, proj3, out3, proj5, out5, proj_pool = 3, 2, 3, 2, 3, 3
def setUp(self):
self.x = numpy.random.uniform(
-1, 1, (10, self.in_channels, 5, 5)
).astype(numpy.float32)
out = self.out1 + self.out3 + self.out5 + self.proj_pool
self.gy = numpy.random.uniform(
-1, 1, (10, out, 5, 5)).astype(numpy.float32)
self.f = functions.Inception(
self.in_channels, self.out1, self.proj3, self.out3,
self.proj5, self.out5, self.proj_pool)
def check_backward(self, x_data, y_grad):
x = chainer.Variable(x_data)
y = self.f(x)
y.grad = y_grad
y.backward()
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
def test_backward_gpu(self):
self.f.to_gpu()
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
def _ones(gpu, *shape):
if gpu:
return chainer.Variable(cuda.ones(shape).astype(numpy.float32))
return chainer.Variable(numpy.ones(shape).astype(numpy.float32))
class TestInceptionForward(unittest.TestCase):
in_channels = 3
out1, proj3, out3, proj5, out5, proj_pool = 3, 2, 3, 2, 3, 3
batchsize = 10
insize = 10
def setUp(self):
self.x = numpy.random.uniform(
-1, 1, (10, self.in_channels, 5, 5)
).astype(numpy.float32)
self.f = functions.Inception(
self.in_channels, self.out1,
self.proj3, self.out3,
self.proj5, self.out5, self.proj_pool)
def setup_mock(self, gpu):
self.f.f = mock.MagicMock()
self.f.f.conv1.return_value = _ones(gpu,
self.batchsize, self.out1,
self.insize, self.insize)
self.f.f.proj3.return_value = _ones(gpu,
self.batchsize, self.proj3,
self.insize, self.insize)
self.f.f.conv3.return_value = _ones(gpu,
self.batchsize, self.out3,
self.insize, self.insize)
self.f.f.proj5.return_value = _ones(gpu,
self.batchsize, self.proj5,
self.insize, self.insize)
self.f.f.conv5.return_value = _ones(gpu, self.batchsize, self.out5,
self.insize, self.insize)
self.f.f.projp.return_value = _ones(gpu, self.batchsize,
self.proj_pool, self.insize,
self.insize)
def check_call(self, x, f, gpu):
self.setup_mock(gpu)
f(chainer.Variable(x))
# Variable.__eq__ raises NotImplementedError,
# so we cannot check arguments
expected = [mock.call.conv1(mock.ANY), mock.call.proj3(mock.ANY),
mock.call.conv3(mock.ANY), mock.call.proj5(mock.ANY),
mock.call.conv5(mock.ANY), mock.call.projp(mock.ANY)]
self.assertListEqual(self.f.f.mock_calls, expected)
def test_call_cpu(self):
self.check_call(self.x, self.f, False)
@attr.gpu
def test_call_gpu(self):
x = cuda.to_gpu(self.x)
self.f.to_gpu()
self.check_call(x, self.f, True)
testing.run_module(__name__, __file__)
| woodshop/chainer | tests/functions_tests/test_inception.py | Python | mit | 3,737 |
class Trio(object):
genomap = { # shared genomap
'./.' : -1, '0/0' : 0, '0/1' : 1, '1/0' : 1, '1/1' : 2,
-1 : -1, 0 : 0, 1 : 1, 2 : 2
}
def __init__(self,offspring=None,father=None,mother=None):
self.off = self.genomap[offspring]
self.fat = self.genomap[father]
self.mot = self.genomap[mother]
def consistent(self):
if self.off == -1: # give benefit of doubt
consistent = True
elif self.off == 0 and (self.mot == 0 or self.mot ==1) and (self.fat == 0 or self.fat == 1):
# both self.father and self.mother must have 0 allele
consistent = True
elif self.off == 1 and (not (self.mot == 0 and self.fat == 0) or not (self.mot == 2 and self.fat == 2)):
# both self.father and self.mother cannot be homozygous
consistent = True
elif self.off == 2 and (self.mot == 1 or self.mot ==2) and (self.fat == 1 or self.fat == 2):
# both self.father and self.mother must have 1 allele
consistent = True
elif self.off == 0 and ((self.mot == -1 and self.fat != 2) or (self.mot != 2 and self.fat == -1)):
# if one parent is missing, the other must not be opposite homozygous
consistent = True
elif self.off == 2 and ((self.mot == -1 and self.fat != 0) or (self.mot != 0 and self.fat == -1)):
# if one parent is missing, the other must not be opposite homozygous
consistent = True
elif self.off == 1 and (self.mot == -1 or self.fat == -1):
# when self.offspring is homozygous, all bets are self.off
consistent = True
else:
consistent = False
return consistent
| schae234/PonyTools | ponytools/Trio.py | Python | mit | 1,742 |
"""
Let's assume Alice and Bob hold an escrow account and now they want to
send 100 XLM from the escrow account to Eve, the following code shows how to achieve it.
I recommend that you check the `./set_up_multisig_account.py` before reading this example.
"""
from stellar_sdk import Asset, Network, Server, TransactionBuilder, TransactionEnvelope
escrow_public = "GD7ZZHKFKFPV2KR6JPE5L6QOZ43LV6HBJWLITCC73V6R7YFERSAITE4S"
alice_secret = "SDKE26TSKMJDWPTWMA5YJYSIA6VQ5QNBUS5VEUR7P6NY4F7ITL7ZILQG"
bob_secret = "SBVFXGIXA22LSNZQKXCTNBRBFHBPRWBGZ7KNWAEINCYCPMNFGJDFPWA2"
eve_public = "GAPE2V77237AQJGTFNYNI3RBMERSFLTUYPVXDMANXUGUN6IEWCVY3VXN"
network_passphrase = Network.TESTNET_NETWORK_PASSPHRASE
server = Server(horizon_url="https://horizon-testnet.stellar.org")
escrow_account = server.load_account(eve_public)
base_fee = 100
transaction = (
TransactionBuilder(
source_account=escrow_account,
network_passphrase=network_passphrase,
base_fee=base_fee,
)
.add_text_memo("Hello, Stellar!")
.append_payment_op(eve_public, Asset.native(), "100")
.set_timeout(30)
.build()
)
# Now Alice signs this transaction and sends the generated XDR to Bob
transaction.sign(alice_secret)
xdr = transaction.to_xdr()
print(f"xdr: {xdr}")
# Bob receives this XDR and signs it.
transaction = TransactionEnvelope.from_xdr(xdr, network_passphrase)
transaction.sign(bob_secret)
print(f"xdr: {transaction.to_xdr()}")
# Last, you can submit it to the network
resp = server.submit_transaction(transaction)
| StellarCN/py-stellar-base | examples/multisig_xdr.py | Python | apache-2.0 | 1,535 |
# Copyright 2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from webob import exc
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import compute
from nova import exception
ALIAS = "os-instance-actions"
authorize_actions = extensions.extension_authorizer('compute',
'v3:' + ALIAS)
authorize_events = extensions.soft_extension_authorizer('compute',
'v3:' + ALIAS + ':events')
ACTION_KEYS = ['action', 'instance_uuid', 'request_id', 'user_id',
'project_id', 'start_time', 'message']
EVENT_KEYS = ['event', 'start_time', 'finish_time', 'result', 'traceback']
def make_actions(elem):
for key in ACTION_KEYS:
elem.set(key)
def make_action(elem):
for key in ACTION_KEYS:
elem.set(key)
event = xmlutil.TemplateElement('events', selector='events')
for key in EVENT_KEYS:
event.set(key)
elem.append(event)
class InstanceActionsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('instance_actions')
elem = xmlutil.SubTemplateElement(root, 'instance_action',
selector='instance_actions')
make_actions(elem)
return xmlutil.MasterTemplate(root, 1)
class InstanceActionTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('instance_action',
selector='instance_action')
make_action(root)
return xmlutil.MasterTemplate(root, 1)
class InstanceActionsController(wsgi.Controller):
def __init__(self):
super(InstanceActionsController, self).__init__()
self.compute_api = compute.API()
self.action_api = compute.InstanceActionAPI()
def _format_action(self, action_raw):
action = {}
for key in ACTION_KEYS:
action[key] = action_raw.get(key)
return action
def _format_event(self, event_raw):
event = {}
for key in EVENT_KEYS:
event[key] = event_raw.get(key)
return event
@wsgi.serializers(xml=InstanceActionsTemplate)
def index(self, req, server_id):
"""Returns the list of actions recorded for a given instance."""
context = req.environ["nova.context"]
try:
instance = self.compute_api.get(context, server_id)
except exception.InstanceNotFound as err:
raise exc.HTTPNotFound(explanation=err.format_message())
authorize_actions(context, target=instance)
actions_raw = self.action_api.actions_get(context, instance)
actions = [self._format_action(action) for action in actions_raw]
return {'instance_actions': actions}
@wsgi.serializers(xml=InstanceActionTemplate)
def show(self, req, server_id, id):
"""Return data about the given instance action."""
context = req.environ['nova.context']
try:
instance = self.compute_api.get(context, server_id)
except exception.InstanceNotFound as err:
raise exc.HTTPNotFound(explanation=err.format_message())
authorize_actions(context, target=instance)
action = self.action_api.action_get_by_request_id(context, instance,
id)
if action is None:
msg = _("Action %s not found") % id
raise exc.HTTPNotFound(msg)
action_id = action['id']
action = self._format_action(action)
if authorize_events(context):
events_raw = self.action_api.action_events_get(context, instance,
action_id)
action['events'] = [self._format_event(evt) for evt in events_raw]
return {'instance_action': action}
class InstanceActions(extensions.V3APIExtensionBase):
"""View a log of actions and events taken on an instance."""
name = "InstanceActions"
alias = ALIAS
namespace = ("http://docs.openstack.org/compute/ext/"
"instance-actions/api/v3")
version = 1
def get_resources(self):
ext = extensions.ResourceExtension('os-instance-actions',
InstanceActionsController(),
parent=dict(
member_name='server',
collection_name='servers'))
return [ext]
def get_controller_extensions(self):
"""It's an abstract function V3APIExtensionBase and the extension
will not be loaded without it.
"""
return []
| Brocade-OpenSource/OpenStack-DNRM-Nova | nova/api/openstack/compute/plugins/v3/instance_actions.py | Python | apache-2.0 | 5,391 |
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import absolute_import
import os
import sys
import tempfile
import warnings
import numpy
from numpy import testing as npt
import tables
from tables import Atom, ClosedNodeError, NoSuchNodeError
from tables.utils import byteorders
from tables.tests import common
from tables.tests.common import allequal
from tables.tests.common import unittest, test_filename
from tables.tests.common import PyTablesTestCase as TestCase
from six.moves import range
warnings.resetwarnings()
class BasicTestCase(TestCase):
"""Basic test for all the supported typecodes present in numpy.
All of them are included on pytables.
"""
endiancheck = False
def write_read(self, testarray):
a = testarray
if common.verbose:
print('\n', '-=' * 30)
print("Running test for array with type '%s'" % a.dtype.type,
end=' ')
print("for class check:", self.title)
# Create an instance of HDF5 file
filename = tempfile.mktemp(".h5")
try:
with tables.open_file(filename, mode="w") as fileh:
root = fileh.root
# Create the array under root and name 'somearray'
if self.endiancheck and a.dtype.kind != "S":
b = a.byteswap()
b.dtype = a.dtype.newbyteorder()
a = b
fileh.create_array(root, 'somearray', a, "Some array")
# Re-open the file in read-only mode
with tables.open_file(filename, mode="r") as fileh:
root = fileh.root
# Read the saved array
b = root.somearray.read()
# Compare them. They should be equal.
if common.verbose and not allequal(a, b):
print("Write and read arrays differ!")
# print("Array written:", a)
print("Array written shape:", a.shape)
print("Array written itemsize:", a.itemsize)
print("Array written type:", a.dtype.type)
# print("Array read:", b)
print("Array read shape:", b.shape)
print("Array read itemsize:", b.itemsize)
print("Array read type:", b.dtype.type)
if a.dtype.kind != "S":
print("Array written byteorder:", a.dtype.byteorder)
print("Array read byteorder:", b.dtype.byteorder)
# Check strictly the array equality
self.assertEqual(a.shape, b.shape)
self.assertEqual(a.shape, root.somearray.shape)
if a.dtype.kind == "S":
self.assertEqual(root.somearray.atom.type, "string")
else:
self.assertEqual(a.dtype.type, b.dtype.type)
self.assertEqual(a.dtype.type,
root.somearray.atom.dtype.type)
abo = byteorders[a.dtype.byteorder]
bbo = byteorders[b.dtype.byteorder]
if abo != "irrelevant":
self.assertEqual(abo, root.somearray.byteorder)
self.assertEqual(bbo, sys.byteorder)
if self.endiancheck:
self.assertNotEqual(bbo, abo)
obj = root.somearray
self.assertEqual(obj.flavor, 'numpy')
self.assertEqual(obj.shape, a.shape)
self.assertEqual(obj.ndim, a.ndim)
self.assertEqual(obj.chunkshape, None)
if a.shape:
nrows = a.shape[0]
else:
# scalar
nrows = 1
self.assertEqual(obj.nrows, nrows)
self.assertTrue(allequal(a, b))
finally:
# Then, delete the file
os.remove(filename)
def write_read_out_arg(self, testarray):
a = testarray
if common.verbose:
print('\n', '-=' * 30)
print("Running test for array with type '%s'" % a.dtype.type,
end=' ')
print("for class check:", self.title)
# Create an instance of HDF5 file
filename = tempfile.mktemp(".h5")
try:
with tables.open_file(filename, mode="w") as fileh:
root = fileh.root
# Create the array under root and name 'somearray'
if self.endiancheck and a.dtype.kind != "S":
b = a.byteswap()
b.dtype = a.dtype.newbyteorder()
a = b
fileh.create_array(root, 'somearray', a, "Some array")
# Re-open the file in read-only mode
with tables.open_file(filename, mode="r") as fileh:
root = fileh.root
# Read the saved array
b = numpy.empty_like(a, dtype=a.dtype)
root.somearray.read(out=b)
# Check strictly the array equality
self.assertEqual(a.shape, b.shape)
self.assertEqual(a.shape, root.somearray.shape)
if a.dtype.kind == "S":
self.assertEqual(root.somearray.atom.type, "string")
else:
self.assertEqual(a.dtype.type, b.dtype.type)
self.assertEqual(a.dtype.type,
root.somearray.atom.dtype.type)
abo = byteorders[a.dtype.byteorder]
bbo = byteorders[b.dtype.byteorder]
if abo != "irrelevant":
self.assertEqual(abo, root.somearray.byteorder)
self.assertEqual(abo, bbo)
if self.endiancheck:
self.assertNotEqual(bbo, sys.byteorder)
self.assertTrue(allequal(a, b))
finally:
# Then, delete the file
os.remove(filename)
def write_read_atom_shape_args(self, testarray):
a = testarray
atom = Atom.from_dtype(a.dtype)
shape = a.shape
byteorder = None
if common.verbose:
print('\n', '-=' * 30)
print("Running test for array with type '%s'" % a.dtype.type,
end=' ')
print("for class check:", self.title)
# Create an instance of HDF5 file
filename = tempfile.mktemp(".h5")
try:
with tables.open_file(filename, mode="w") as fileh:
root = fileh.root
# Create the array under root and name 'somearray'
if self.endiancheck and a.dtype.kind != "S":
b = a.byteswap()
b.dtype = a.dtype.newbyteorder()
if b.dtype.byteorder in ('>', '<'):
byteorder = byteorders[b.dtype.byteorder]
a = b
ptarr = fileh.create_array(root, 'somearray',
atom=atom, shape=shape,
title="Some array",
# specify the byteorder explicitly
# since there is no way to deduce
# it in this case
byteorder=byteorder)
self.assertEqual(shape, ptarr.shape)
self.assertEqual(atom, ptarr.atom)
ptarr[...] = a
# Re-open the file in read-only mode
with tables.open_file(filename, mode="r") as fileh:
root = fileh.root
# Read the saved array
b = root.somearray.read()
# Compare them. They should be equal.
if common.verbose and not allequal(a, b):
print("Write and read arrays differ!")
# print("Array written:", a)
print("Array written shape:", a.shape)
print("Array written itemsize:", a.itemsize)
print("Array written type:", a.dtype.type)
# print("Array read:", b)
print("Array read shape:", b.shape)
print("Array read itemsize:", b.itemsize)
print("Array read type:", b.dtype.type)
if a.dtype.kind != "S":
print("Array written byteorder:", a.dtype.byteorder)
print("Array read byteorder:", b.dtype.byteorder)
# Check strictly the array equality
self.assertEqual(a.shape, b.shape)
self.assertEqual(a.shape, root.somearray.shape)
if a.dtype.kind == "S":
self.assertEqual(root.somearray.atom.type, "string")
else:
self.assertEqual(a.dtype.type, b.dtype.type)
self.assertEqual(a.dtype.type,
root.somearray.atom.dtype.type)
abo = byteorders[a.dtype.byteorder]
bbo = byteorders[b.dtype.byteorder]
if abo != "irrelevant":
self.assertEqual(abo, root.somearray.byteorder)
self.assertEqual(bbo, sys.byteorder)
if self.endiancheck:
self.assertNotEqual(bbo, abo)
obj = root.somearray
self.assertEqual(obj.flavor, 'numpy')
self.assertEqual(obj.shape, a.shape)
self.assertEqual(obj.ndim, a.ndim)
self.assertEqual(obj.chunkshape, None)
if a.shape:
nrows = a.shape[0]
else:
# scalar
nrows = 1
self.assertEqual(obj.nrows, nrows)
self.assertTrue(allequal(a, b))
finally:
# Then, delete the file
os.remove(filename)
def setup00_char(self):
"""Data integrity during recovery (character objects)"""
if not isinstance(self.tupleChar, numpy.ndarray):
a = numpy.array(self.tupleChar, dtype="S")
else:
a = self.tupleChar
return a
def test00_char(self):
a = self.setup00_char()
self.write_read(a)
def test00_char_out_arg(self):
a = self.setup00_char()
self.write_read_out_arg(a)
def test00_char_atom_shape_args(self):
a = self.setup00_char()
self.write_read_atom_shape_args(a)
def test00b_char(self):
"""Data integrity during recovery (string objects)"""
a = self.tupleChar
filename = tempfile.mktemp(".h5")
try:
# Create an instance of HDF5 file
with tables.open_file(filename, mode="w") as fileh:
fileh.create_array(fileh.root, 'somearray', a, "Some array")
# Re-open the file in read-only mode
with tables.open_file(filename, mode="r") as fileh:
# Read the saved array
b = fileh.root.somearray.read()
if isinstance(a, bytes):
self.assertEqual(type(b), bytes)
self.assertEqual(a, b)
else:
# If a is not a python string, then it should be a list
# or ndarray
self.assertTrue(type(b) in [list, numpy.ndarray])
finally:
# Then, delete the file
os.remove(filename)
def test00b_char_out_arg(self):
"""Data integrity during recovery (string objects)"""
a = self.tupleChar
filename = tempfile.mktemp(".h5")
try:
# Create an instance of HDF5 file
with tables.open_file(filename, mode="w") as fileh:
fileh.create_array(fileh.root, 'somearray', a, "Some array")
# Re-open the file in read-only mode
with tables.open_file(filename, mode="r") as fileh:
# Read the saved array
b = numpy.empty_like(a)
if fileh.root.somearray.flavor != 'numpy':
self.assertRaises(TypeError,
lambda: fileh.root.somearray.read(out=b))
else:
fileh.root.somearray.read(out=b)
self.assertTrue(type(b), numpy.ndarray)
finally:
# Then, delete the file
os.remove(filename)
def test00b_char_atom_shape_args(self):
"""Data integrity during recovery (string objects)"""
a = self.tupleChar
filename = tempfile.mktemp(".h5")
try:
# Create an instance of HDF5 file
with tables.open_file(filename, mode="w") as fileh:
nparr = numpy.asarray(a)
atom = Atom.from_dtype(nparr.dtype)
shape = nparr.shape
if nparr.dtype.byteorder in ('>', '<'):
byteorder = byteorders[nparr.dtype.byteorder]
else:
byteorder = None
ptarr = fileh.create_array(fileh.root, 'somearray',
atom=atom, shape=shape,
byteorder=byteorder,
title="Some array")
self.assertEqual(shape, ptarr.shape)
self.assertEqual(atom, ptarr.atom)
ptarr[...] = a
# Re-open the file in read-only mode
with tables.open_file(filename, mode="r") as fileh:
# Read the saved array
b = numpy.empty_like(a)
if fileh.root.somearray.flavor != 'numpy':
self.assertRaises(TypeError,
lambda: fileh.root.somearray.read(out=b))
else:
fileh.root.somearray.read(out=b)
self.assertTrue(type(b), numpy.ndarray)
finally:
# Then, delete the file
os.remove(filename)
def setup01_char_nc(self):
"""Data integrity during recovery (non-contiguous character objects)"""
if not isinstance(self.tupleChar, numpy.ndarray):
a = numpy.array(self.tupleChar, dtype="S")
else:
a = self.tupleChar
if a.ndim == 0:
b = a.copy()
else:
b = a[::2]
# Ensure that this numpy string is non-contiguous
if len(b) > 1:
self.assertEqual(b.flags.contiguous, False)
return b
def test01_char_nc(self):
b = self.setup01_char_nc()
self.write_read(b)
def test01_char_nc_out_arg(self):
b = self.setup01_char_nc()
self.write_read_out_arg(b)
def test01_char_nc_atom_shape_args(self):
b = self.setup01_char_nc()
self.write_read_atom_shape_args(b)
def test02_types(self):
"""Data integrity during recovery (numerical types)"""
typecodes = ['int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64',
'float32', 'float64',
'complex64', 'complex128']
for name in ('float16', 'float96', 'float128',
'complex192', 'complex256'):
atomname = name.capitalize() + 'Atom'
if hasattr(tables, atomname):
typecodes.append(name)
for typecode in typecodes:
a = numpy.array(self.tupleInt, typecode)
self.write_read(a)
b = numpy.array(self.tupleInt, typecode)
self.write_read_out_arg(b)
c = numpy.array(self.tupleInt, typecode)
self.write_read_atom_shape_args(c)
def test03_types_nc(self):
"""Data integrity during recovery (non-contiguous numerical types)"""
typecodes = ['int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64',
'float32', 'float64',
'complex64', 'complex128', ]
for name in ('float16', 'float96', 'float128',
'complex192', 'complex256'):
atomname = name.capitalize() + 'Atom'
if hasattr(tables, atomname):
typecodes.append(name)
for typecode in typecodes:
a = numpy.array(self.tupleInt, typecode)
if a.ndim == 0:
b1 = a.copy()
b2 = a.copy()
b3 = a.copy()
else:
b1 = a[::2]
b2 = a[::2]
b3 = a[::2]
# Ensure that this array is non-contiguous
if len(b1) > 1:
self.assertEqual(b1.flags.contiguous, False)
if len(b2) > 1:
self.assertEqual(b2.flags.contiguous, False)
if len(b3) > 1:
self.assertEqual(b3.flags.contiguous, False)
self.write_read(b1)
self.write_read_out_arg(b2)
self.write_read_atom_shape_args(b3)
class Basic0DOneTestCase(BasicTestCase):
# Scalar case
title = "Rank-0 case 1"
tupleInt = 3
tupleChar = b"3"
endiancheck = True
class Basic0DTwoTestCase(BasicTestCase):
# Scalar case
title = "Rank-0 case 2"
tupleInt = 33
tupleChar = b"33"
endiancheck = True
class Basic1DZeroTestCase(BasicTestCase):
# This test case is not supported by PyTables (HDF5 limitations)
# 1D case
title = "Rank-1 case 0"
tupleInt = ()
tupleChar = ()
endiancheck = False
class Basic1DOneTestCase(BasicTestCase):
# 1D case
title = "Rank-1 case 1"
tupleInt = (3,)
tupleChar = (b"a",)
endiancheck = True
class Basic1DTwoTestCase(BasicTestCase):
# 1D case
title = "Rank-1 case 2"
tupleInt = (3, 4)
tupleChar = (b"aaa",)
endiancheck = True
class Basic1DThreeTestCase(BasicTestCase):
# 1D case
title = "Rank-1 case 3"
tupleInt = (3, 4, 5)
tupleChar = (b"aaa", b"bbb",)
endiancheck = True
class Basic2DOneTestCase(BasicTestCase):
# 2D case
title = "Rank-2 case 1"
tupleInt = numpy.array(numpy.arange((4)**2))
tupleInt.shape = (4,)*2
tupleChar = numpy.array(["abc"]*3**2, dtype="S3")
tupleChar.shape = (3,)*2
endiancheck = True
class Basic2DTwoTestCase(BasicTestCase):
# 2D case, with a multidimensional dtype
title = "Rank-2 case 2"
tupleInt = numpy.array(numpy.arange((4)), dtype=(numpy.int_, (4,)))
tupleChar = numpy.array(["abc"]*3, dtype=("S3", (3,)))
endiancheck = True
class Basic10DTestCase(BasicTestCase):
# 10D case
title = "Rank-10 test"
tupleInt = numpy.array(numpy.arange((2)**10))
tupleInt.shape = (2,)*10
tupleChar = numpy.array(
["abc"]*2**10, dtype="S3")
tupleChar.shape = (2,)*10
endiancheck = True
class Basic32DTestCase(BasicTestCase):
# 32D case (maximum)
title = "Rank-32 test"
tupleInt = numpy.array((32,))
tupleInt.shape = (1,)*32
tupleChar = numpy.array(["121"], dtype="S3")
tupleChar.shape = (1,)*32
class ReadOutArgumentTests(common.TempFileMixin, TestCase):
def setUp(self):
super(ReadOutArgumentTests, self).setUp()
self.size = 1000
def create_array(self):
array = numpy.arange(self.size, dtype='f8')
disk_array = self.h5file.create_array('/', 'array', array)
return array, disk_array
def test_read_entire_array(self):
array, disk_array = self.create_array()
out_buffer = numpy.empty((self.size, ), 'f8')
disk_array.read(out=out_buffer)
numpy.testing.assert_equal(out_buffer, array)
def test_read_contiguous_slice1(self):
array, disk_array = self.create_array()
out_buffer = numpy.arange(self.size, dtype='f8')
out_buffer = numpy.random.permutation(out_buffer)
out_buffer_orig = out_buffer.copy()
start = self.size // 2
disk_array.read(start=start, stop=self.size, out=out_buffer[start:])
numpy.testing.assert_equal(out_buffer[start:], array[start:])
numpy.testing.assert_equal(out_buffer[:start], out_buffer_orig[:start])
def test_read_contiguous_slice2(self):
array, disk_array = self.create_array()
out_buffer = numpy.arange(self.size, dtype='f8')
out_buffer = numpy.random.permutation(out_buffer)
out_buffer_orig = out_buffer.copy()
start = self.size // 4
stop = self.size - start
disk_array.read(start=start, stop=stop, out=out_buffer[start:stop])
numpy.testing.assert_equal(out_buffer[start:stop], array[start:stop])
numpy.testing.assert_equal(out_buffer[:start], out_buffer_orig[:start])
numpy.testing.assert_equal(out_buffer[stop:], out_buffer_orig[stop:])
def test_read_non_contiguous_slice_contiguous_buffer(self):
array, disk_array = self.create_array()
out_buffer = numpy.empty((self.size // 2, ), dtype='f8')
disk_array.read(start=0, stop=self.size, step=2, out=out_buffer)
numpy.testing.assert_equal(out_buffer, array[0:self.size:2])
def test_read_non_contiguous_buffer(self):
array, disk_array = self.create_array()
out_buffer = numpy.empty((self.size, ), 'f8')
out_buffer_slice = out_buffer[0:self.size:2]
# once Python 2.6 support is dropped, this could change
# to assertRaisesRegexp to check exception type and message at once
self.assertRaises(ValueError, disk_array.read, 0, self.size, 2,
out_buffer_slice)
try:
disk_array.read(0, self.size, 2, out_buffer_slice)
except ValueError as exc:
self.assertEqual('output array not C contiguous', str(exc))
def test_buffer_too_small(self):
array, disk_array = self.create_array()
out_buffer = numpy.empty((self.size // 2, ), 'f8')
self.assertRaises(ValueError, disk_array.read, 0, self.size, 1,
out_buffer)
try:
disk_array.read(0, self.size, 1, out_buffer)
except ValueError as exc:
self.assertTrue('output array size invalid, got' in str(exc))
def test_buffer_too_large(self):
array, disk_array = self.create_array()
out_buffer = numpy.empty((self.size + 1, ), 'f8')
self.assertRaises(ValueError, disk_array.read, 0, self.size, 1,
out_buffer)
try:
disk_array.read(0, self.size, 1, out_buffer)
except ValueError as exc:
self.assertTrue('output array size invalid, got' in str(exc))
class SizeOnDiskInMemoryPropertyTestCase(common.TempFileMixin, TestCase):
def setUp(self):
super(SizeOnDiskInMemoryPropertyTestCase, self).setUp()
self.array_size = (10, 10)
self.array = self.h5file.create_array(
'/', 'somearray', numpy.zeros(self.array_size, 'i4'))
def test_all_zeros(self):
self.assertEqual(self.array.size_on_disk, 10 * 10 * 4)
self.assertEqual(self.array.size_in_memory, 10 * 10 * 4)
class UnalignedAndComplexTestCase(common.TempFileMixin, TestCase):
"""Basic test for all the supported typecodes present in numpy.
Most of them are included on PyTables.
"""
def setUp(self):
super(UnalignedAndComplexTestCase, self).setUp()
self.root = self.h5file.root
def write_read(self, testArray):
if common.verbose:
print('\n', '-=' * 30)
print("\nRunning test for array with type '%s'" %
testArray.dtype.type)
# Create the array under root and name 'somearray'
a = testArray
if self.endiancheck:
byteorder = {"little": "big", "big": "little"}[sys.byteorder]
else:
byteorder = sys.byteorder
self.h5file.create_array(self.root, 'somearray', a, "Some array",
byteorder=byteorder)
if self.reopen:
self._reopen()
self.root = self.h5file.root
# Read the saved array
b = self.root.somearray.read()
# Get an array to be compared in the correct byteorder
c = a.newbyteorder(byteorder)
# Compare them. They should be equal.
if not allequal(c, b) and common.verbose:
print("Write and read arrays differ!")
print("Array written:", a)
print("Array written shape:", a.shape)
print("Array written itemsize:", a.itemsize)
print("Array written type:", a.dtype.type)
print("Array read:", b)
print("Array read shape:", b.shape)
print("Array read itemsize:", b.itemsize)
print("Array read type:", b.dtype.type)
# Check strictly the array equality
self.assertEqual(a.shape, b.shape)
self.assertEqual(a.shape, self.root.somearray.shape)
if a.dtype.byteorder != "|":
self.assertEqual(a.dtype, b.dtype)
self.assertEqual(a.dtype, self.root.somearray.atom.dtype)
self.assertEqual(byteorders[b.dtype.byteorder], sys.byteorder)
self.assertEqual(self.root.somearray.byteorder, byteorder)
self.assertTrue(allequal(c, b))
def test01_signedShort_unaligned(self):
"""Checking an unaligned signed short integer array"""
r = numpy.rec.array(b'a'*200, formats='i1,f4,i2', shape=10)
a = r["f2"]
# Ensure that this array is non-aligned
self.assertEqual(a.flags.aligned, False)
self.assertEqual(a.dtype.type, numpy.int16)
self.write_read(a)
def test02_float_unaligned(self):
"""Checking an unaligned single precision array"""
r = numpy.rec.array(b'a'*200, formats='i1,f4,i2', shape=10)
a = r["f1"]
# Ensure that this array is non-aligned
self.assertEqual(a.flags.aligned, 0)
self.assertEqual(a.dtype.type, numpy.float32)
self.write_read(a)
def test03_byte_offset(self):
"""Checking an offsetted byte array"""
r = numpy.arange(100, dtype=numpy.int8)
r.shape = (10, 10)
a = r[2]
self.write_read(a)
def test04_short_offset(self):
"""Checking an offsetted unsigned short int precision array"""
r = numpy.arange(100, dtype=numpy.uint32)
r.shape = (10, 10)
a = r[2]
self.write_read(a)
def test05_int_offset(self):
"""Checking an offsetted integer array"""
r = numpy.arange(100, dtype=numpy.int32)
r.shape = (10, 10)
a = r[2]
self.write_read(a)
def test06_longlongint_offset(self):
"""Checking an offsetted long long integer array"""
r = numpy.arange(100, dtype=numpy.int64)
r.shape = (10, 10)
a = r[2]
self.write_read(a)
def test07_float_offset(self):
"""Checking an offsetted single precision array"""
r = numpy.arange(100, dtype=numpy.float32)
r.shape = (10, 10)
a = r[2]
self.write_read(a)
def test08_double_offset(self):
"""Checking an offsetted double precision array"""
r = numpy.arange(100, dtype=numpy.float64)
r.shape = (10, 10)
a = r[2]
self.write_read(a)
def test09_float_offset_unaligned(self):
"""Checking an unaligned and offsetted single precision array"""
r = numpy.rec.array(b'a'*200, formats='i1,3f4,i2', shape=10)
a = r["f1"][3]
# Ensure that this array is non-aligned
self.assertEqual(a.flags.aligned, False)
self.assertEqual(a.dtype.type, numpy.float32)
self.write_read(a)
def test10_double_offset_unaligned(self):
"""Checking an unaligned and offsetted double precision array"""
r = numpy.rec.array(b'a'*400, formats='i1,3f8,i2', shape=10)
a = r["f1"][3]
# Ensure that this array is non-aligned
self.assertEqual(a.flags.aligned, False)
self.assertEqual(a.dtype.type, numpy.float64)
self.write_read(a)
def test11_int_byteorder(self):
"""Checking setting data with different byteorder in a range
(integer)"""
# Save an array with the reversed byteorder on it
a = numpy.arange(25, dtype=numpy.int32).reshape(5, 5)
a = a.byteswap()
a = a.newbyteorder()
array = self.h5file.create_array(
self.h5file.root, 'array', a, "byteorder (int)")
# Read a subarray (got an array with the machine byteorder)
b = array[2:4, 3:5]
b = b.byteswap()
b = b.newbyteorder()
# Set this subarray back to the array
array[2:4, 3:5] = b
b = b.byteswap()
b = b.newbyteorder()
# Set this subarray back to the array
array[2:4, 3:5] = b
# Check that the array is back in the correct byteorder
c = array[...]
if common.verbose:
print("byteorder of array on disk-->", array.byteorder)
print("byteorder of subarray-->", b.dtype.byteorder)
print("subarray-->", b)
print("retrieved array-->", c)
self.assertTrue(allequal(a, c))
def test12_float_byteorder(self):
"""Checking setting data with different byteorder in a range (float)"""
# Save an array with the reversed byteorder on it
a = numpy.arange(25, dtype=numpy.float64).reshape(5, 5)
a = a.byteswap()
a = a.newbyteorder()
array = self.h5file.create_array(
self.h5file.root, 'array', a, "byteorder (float)")
# Read a subarray (got an array with the machine byteorder)
b = array[2:4, 3:5]
b = b.byteswap()
b = b.newbyteorder()
# Set this subarray back to the array
array[2:4, 3:5] = b
b = b.byteswap()
b = b.newbyteorder()
# Set this subarray back to the array
array[2:4, 3:5] = b
# Check that the array is back in the correct byteorder
c = array[...]
if common.verbose:
print("byteorder of array on disk-->", array.byteorder)
print("byteorder of subarray-->", b.dtype.byteorder)
print("subarray-->", b)
print("retrieved array-->", c)
self.assertTrue(allequal(a, c))
class ComplexNotReopenNotEndianTestCase(UnalignedAndComplexTestCase):
endiancheck = False
reopen = False
class ComplexReopenNotEndianTestCase(UnalignedAndComplexTestCase):
endiancheck = False
reopen = True
class ComplexNotReopenEndianTestCase(UnalignedAndComplexTestCase):
endiancheck = True
reopen = False
class ComplexReopenEndianTestCase(UnalignedAndComplexTestCase):
endiancheck = True
reopen = True
class GroupsArrayTestCase(common.TempFileMixin, TestCase):
"""This test class checks combinations of arrays with groups."""
def test00_iterativeGroups(self):
"""Checking combinations of arrays with groups."""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test00_iterativeGroups..." %
self.__class__.__name__)
# Get the root group
group = self.h5file.root
# Set the type codes to test
# The typecodes below does expose an ambiguity that is reported in:
# http://projects.scipy.org/scipy/numpy/ticket/283 and
# http://projects.scipy.org/scipy/numpy/ticket/290
typecodes = ['b', 'B', 'h', 'H', 'i', 'I', 'l', 'L', 'q', 'f', 'd',
'F', 'D']
if hasattr(tables, 'Float16Atom'):
typecodes.append('e')
if hasattr(tables, 'Float96Atom') or hasattr(tables, 'Float128Atom'):
typecodes.append('g')
if (hasattr(tables, 'Complex192Atom') or
hasattr(tables, 'Complex256Atom')):
typecodes.append('G')
for i, typecode in enumerate(typecodes):
a = numpy.ones((3,), typecode)
dsetname = 'array_' + typecode
if common.verbose:
print("Creating dataset:", group._g_join(dsetname))
self.h5file.create_array(group, dsetname, a, "Large array")
group = self.h5file.create_group(group, 'group' + str(i))
# Reopen the file
self._reopen()
# Get the root group
group = self.h5file.root
# Get the metadata on the previosly saved arrays
for i in range(len(typecodes)):
# Create an array for later comparison
a = numpy.ones((3,), typecodes[i])
# Get the dset object hanging from group
dset = getattr(group, 'array_' + typecodes[i])
# Get the actual array
b = dset.read()
if common.verbose:
print("Info from dataset:", dset._v_pathname)
print(" shape ==>", dset.shape, end=' ')
print(" type ==> %s" % dset.atom.dtype)
print("Array b read from file. Shape: ==>", b.shape, end=' ')
print(". Type ==> %s" % b.dtype)
self.assertEqual(a.shape, b.shape)
self.assertEqual(a.dtype, b.dtype)
self.assertTrue(allequal(a, b))
# Iterate over the next group
group = getattr(group, 'group' + str(i))
def test01_largeRankArrays(self):
"""Checking creation of large rank arrays (0 < rank <= 32)
It also uses arrays ranks which ranges until maxrank.
"""
# maximum level of recursivity (deepest group level) achieved:
# maxrank = 32 (for a effective maximum rank of 32)
# This limit is due to HDF5 library limitations.
minrank = 1
maxrank = 32
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test01_largeRankArrays..." %
self.__class__.__name__)
print("Maximum rank for tested arrays:", maxrank)
group = self.h5file.root
if common.verbose:
print("Rank array writing progress: ", end=' ')
for rank in range(minrank, maxrank + 1):
# Create an array of integers, with incrementally bigger ranges
a = numpy.ones((1,) * rank, numpy.int32)
if common.verbose:
print("%3d," % (rank), end=' ')
self.h5file.create_array(group, "array", a, "Rank: %s" % rank)
group = self.h5file.create_group(group, 'group' + str(rank))
# Reopen the file
self._reopen()
group = self.h5file.root
if common.verbose:
print()
print("Rank array reading progress: ")
# Get the metadata on the previosly saved arrays
for rank in range(minrank, maxrank + 1):
# Create an array for later comparison
a = numpy.ones((1,) * rank, numpy.int32)
# Get the actual array
b = group.array.read()
if common.verbose:
print("%3d," % (rank), end=' ')
if common.verbose and not allequal(a, b):
print("Info from dataset:", group.array._v_pathname)
print(" Shape: ==>", group.array.shape, end=' ')
print(" typecode ==> %c" % group.array.typecode)
print("Array b read from file. Shape: ==>", b.shape, end=' ')
print(". Type ==> %c" % b.dtype)
self.assertEqual(a.shape, b.shape)
self.assertEqual(a.dtype, b.dtype)
self.assertTrue(allequal(a, b))
# print(self.h5file)
# Iterate over the next group
group = self.h5file.get_node(group, 'group' + str(rank))
if common.verbose:
print() # This flush the stdout buffer
class CopyTestCase(common.TempFileMixin, TestCase):
def test01_copy(self):
"""Checking Array.copy() method."""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test01_copy..." % self.__class__.__name__)
# Create an Array
arr = numpy.array([[456, 2], [3, 457]], dtype='int16')
array1 = self.h5file.create_array(
self.h5file.root, 'array1', arr, "title array1")
# Copy to another Array
array2 = array1.copy('/', 'array2')
if self.close:
if common.verbose:
print("(closing file version)")
self._reopen()
array1 = self.h5file.root.array1
array2 = self.h5file.root.array2
if common.verbose:
print("array1-->", array1.read())
print("array2-->", array2.read())
# print("dirs-->", dir(array1), dir(array2))
print("attrs array1-->", repr(array1.attrs))
print("attrs array2-->", repr(array2.attrs))
# Check that all the elements are equal
self.assertTrue(allequal(array1.read(), array2.read()))
# Assert other properties in array
self.assertEqual(array1.nrows, array2.nrows)
self.assertEqual(array1.flavor, array2.flavor)
self.assertEqual(array1.atom.dtype, array2.atom.dtype)
self.assertEqual(array1.title, array2.title)
def test02_copy(self):
"""Checking Array.copy() method (where specified)"""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test02_copy..." % self.__class__.__name__)
# Create an Array
arr = numpy.array([[456, 2], [3, 457]], dtype='int16')
array1 = self.h5file.create_array(
self.h5file.root, 'array1', arr, "title array1")
# Copy to another Array
group1 = self.h5file.create_group("/", "group1")
array2 = array1.copy(group1, 'array2')
if self.close:
if common.verbose:
print("(closing file version)")
self._reopen()
array1 = self.h5file.root.array1
array2 = self.h5file.root.group1.array2
if common.verbose:
print("array1-->", array1.read())
print("array2-->", array2.read())
# print("dirs-->", dir(array1), dir(array2))
print("attrs array1-->", repr(array1.attrs))
print("attrs array2-->", repr(array2.attrs))
# Check that all the elements are equal
self.assertTrue(allequal(array1.read(), array2.read()))
# Assert other properties in array
self.assertEqual(array1.nrows, array2.nrows)
self.assertEqual(array1.flavor, array2.flavor)
self.assertEqual(array1.atom.dtype, array2.atom.dtype)
self.assertEqual(array1.title, array2.title)
def test03_copy(self):
"""Checking Array.copy() method (checking title copying)"""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test04_copy..." % self.__class__.__name__)
# Create an Array
arr = numpy.array([[456, 2], [3, 457]], dtype='int16')
array1 = self.h5file.create_array(
self.h5file.root, 'array1', arr, "title array1")
# Append some user attrs
array1.attrs.attr1 = "attr1"
array1.attrs.attr2 = 2
# Copy it to another Array
array2 = array1.copy('/', 'array2', title="title array2")
if self.close:
if common.verbose:
print("(closing file version)")
self._reopen()
array1 = self.h5file.root.array1
array2 = self.h5file.root.array2
# Assert user attributes
if common.verbose:
print("title of destination array-->", array2.title)
self.assertEqual(array2.title, "title array2")
def test04_copy(self):
"""Checking Array.copy() method (user attributes copied)"""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test05_copy..." % self.__class__.__name__)
# Create an Array
arr = numpy.array([[456, 2], [3, 457]], dtype='int16')
array1 = self.h5file.create_array(
self.h5file.root, 'array1', arr, "title array1")
# Append some user attrs
array1.attrs.attr1 = "attr1"
array1.attrs.attr2 = 2
# Copy it to another Array
array2 = array1.copy('/', 'array2', copyuserattrs=1)
if self.close:
if common.verbose:
print("(closing file version)")
self._reopen()
array1 = self.h5file.root.array1
array2 = self.h5file.root.array2
if common.verbose:
print("attrs array1-->", repr(array1.attrs))
print("attrs array2-->", repr(array2.attrs))
# Assert user attributes
self.assertEqual(array2.attrs.attr1, "attr1")
self.assertEqual(array2.attrs.attr2, 2)
def test04b_copy(self):
"""Checking Array.copy() method (user attributes not copied)"""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test05b_copy..." % self.__class__.__name__)
# Create an Array
arr = numpy.array([[456, 2], [3, 457]], dtype='int16')
array1 = self.h5file.create_array(
self.h5file.root, 'array1', arr, "title array1")
# Append some user attrs
array1.attrs.attr1 = "attr1"
array1.attrs.attr2 = 2
# Copy it to another Array
array2 = array1.copy('/', 'array2', copyuserattrs=0)
if self.close:
if common.verbose:
print("(closing file version)")
self._reopen()
array1 = self.h5file.root.array1
array2 = self.h5file.root.array2
if common.verbose:
print("attrs array1-->", repr(array1.attrs))
print("attrs array2-->", repr(array2.attrs))
# Assert user attributes
self.assertEqual(hasattr(array2.attrs, "attr1"), 0)
self.assertEqual(hasattr(array2.attrs, "attr2"), 0)
class CloseCopyTestCase(CopyTestCase):
close = 1
class OpenCopyTestCase(CopyTestCase):
close = 0
class CopyIndexTestCase(common.TempFileMixin, TestCase):
def test01_index(self):
"""Checking Array.copy() method with indexes."""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test01_index..." % self.__class__.__name__)
# Create a numpy
r = numpy.arange(200, dtype='int32')
r.shape = (100, 2)
# Save it in a array:
array1 = self.h5file.create_array(
self.h5file.root, 'array1', r, "title array1")
# Copy to another array
array2 = array1.copy("/", 'array2',
start=self.start,
stop=self.stop,
step=self.step)
if common.verbose:
print("array1-->", array1.read())
print("array2-->", array2.read())
print("attrs array1-->", repr(array1.attrs))
print("attrs array2-->", repr(array2.attrs))
# Check that all the elements are equal
r2 = r[self.start:self.stop:self.step]
self.assertTrue(allequal(r2, array2.read()))
# Assert the number of rows in array
if common.verbose:
print("nrows in array2-->", array2.nrows)
print("and it should be-->", r2.shape[0])
self.assertEqual(r2.shape[0], array2.nrows)
def test02_indexclosef(self):
"""Checking Array.copy() method with indexes (close file version)"""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test02_indexclosef..." % self.__class__.__name__)
# Create a numpy
r = numpy.arange(200, dtype='int32')
r.shape = (100, 2)
# Save it in a array:
array1 = self.h5file.create_array(
self.h5file.root, 'array1', r, "title array1")
# Copy to another array
array2 = array1.copy("/", 'array2',
start=self.start,
stop=self.stop,
step=self.step)
# Close and reopen the file
self._reopen()
array1 = self.h5file.root.array1
array2 = self.h5file.root.array2
if common.verbose:
print("array1-->", array1.read())
print("array2-->", array2.read())
print("attrs array1-->", repr(array1.attrs))
print("attrs array2-->", repr(array2.attrs))
# Check that all the elements are equal
r2 = r[self.start:self.stop:self.step]
self.assertTrue(allequal(r2, array2.read()))
# Assert the number of rows in array
if common.verbose:
print("nrows in array2-->", array2.nrows)
print("and it should be-->", r2.shape[0])
self.assertEqual(r2.shape[0], array2.nrows)
class CopyIndex1TestCase(CopyIndexTestCase):
start = 0
stop = 7
step = 1
class CopyIndex2TestCase(CopyIndexTestCase):
start = 0
stop = -1
step = 1
class CopyIndex3TestCase(CopyIndexTestCase):
start = 1
stop = 7
step = 1
class CopyIndex4TestCase(CopyIndexTestCase):
start = 0
stop = 6
step = 1
class CopyIndex5TestCase(CopyIndexTestCase):
start = 3
stop = 7
step = 1
class CopyIndex6TestCase(CopyIndexTestCase):
start = 3
stop = 6
step = 2
class CopyIndex7TestCase(CopyIndexTestCase):
start = 0
stop = 7
step = 10
class CopyIndex8TestCase(CopyIndexTestCase):
start = 6
stop = -1 # Negative values means starting from the end
step = 1
class CopyIndex9TestCase(CopyIndexTestCase):
start = 3
stop = 4
step = 1
class CopyIndex10TestCase(CopyIndexTestCase):
start = 3
stop = 4
step = 2
class CopyIndex11TestCase(CopyIndexTestCase):
start = -3
stop = -1
step = 2
class CopyIndex12TestCase(CopyIndexTestCase):
start = -1 # Should point to the last element
stop = None # None should mean the last element (including it)
step = 1
class GetItemTestCase(common.TempFileMixin, TestCase):
def test00_single(self):
"""Single element access (character types)"""
# Create the array under root and name 'somearray'
a = self.charList
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen()
arr = self.h5file.root.somearray
# Get and compare an element
if common.verbose:
print("Original first element:", a[0], type(a[0]))
print("Read first element:", arr[0], type(arr[0]))
self.assertTrue(allequal(a[0], arr[0]))
self.assertEqual(type(a[0]), type(arr[0]))
def test01_single(self):
"""Single element access (numerical types)"""
# Create the array under root and name 'somearray'
a = self.numericalList
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen()
arr = self.h5file.root.somearray
# Get and compare an element
if common.verbose:
print("Original first element:", a[0], type(a[0]))
print("Read first element:", arr[0], type(arr[0]))
self.assertEqual(a[0], arr[0])
self.assertEqual(type(a[0]), type(arr[0]))
def test02_range(self):
"""Range element access (character types)"""
# Create the array under root and name 'somearray'
a = self.charListME
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen()
arr = self.h5file.root.somearray
# Get and compare an element
if common.verbose:
print("Original elements:", a[1:4])
print("Read elements:", arr[1:4])
self.assertTrue(allequal(a[1:4], arr[1:4]))
def test03_range(self):
"""Range element access (numerical types)"""
# Create the array under root and name 'somearray'
a = self.numericalListME
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen()
arr = self.h5file.root.somearray
# Get and compare an element
if common.verbose:
print("Original elements:", a[1:4])
print("Read elements:", arr[1:4])
self.assertTrue(allequal(a[1:4], arr[1:4]))
def test04_range(self):
"""Range element access, strided (character types)"""
# Create the array under root and name 'somearray'
a = self.charListME
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen()
arr = self.h5file.root.somearray
# Get and compare an element
if common.verbose:
print("Original elements:", a[1:4:2])
print("Read elements:", arr[1:4:2])
self.assertTrue(allequal(a[1:4:2], arr[1:4:2]))
def test05_range(self):
"""Range element access, strided (numerical types)"""
# Create the array under root and name 'somearray'
a = self.numericalListME
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen()
arr = self.h5file.root.somearray
# Get and compare an element
if common.verbose:
print("Original elements:", a[1:4:2])
print("Read elements:", arr[1:4:2])
self.assertTrue(allequal(a[1:4:2], arr[1:4:2]))
def test06_negativeIndex(self):
"""Negative Index element access (character types)"""
# Create the array under root and name 'somearray'
a = self.charListME
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen()
arr = self.h5file.root.somearray
# Get and compare an element
if common.verbose:
print("Original last element:", a[-1])
print("Read last element:", arr[-1])
self.assertTrue(allequal(a[-1], arr[-1]))
def test07_negativeIndex(self):
"""Negative Index element access (numerical types)"""
# Create the array under root and name 'somearray'
a = self.numericalListME
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen()
arr = self.h5file.root.somearray
# Get and compare an element
if common.verbose:
print("Original before last element:", a[-2])
print("Read before last element:", arr[-2])
if isinstance(a[-2], numpy.ndarray):
self.assertTrue(allequal(a[-2], arr[-2]))
else:
self.assertEqual(a[-2], arr[-2])
def test08_negativeRange(self):
"""Negative range element access (character types)"""
# Create the array under root and name 'somearray'
a = self.charListME
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen()
arr = self.h5file.root.somearray
# Get and compare an element
if common.verbose:
print("Original last elements:", a[-4:-1])
print("Read last elements:", arr[-4:-1])
self.assertTrue(allequal(a[-4:-1], arr[-4:-1]))
def test09_negativeRange(self):
"""Negative range element access (numerical types)"""
# Create the array under root and name 'somearray'
a = self.numericalListME
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen()
arr = self.h5file.root.somearray
# Get and compare an element
if common.verbose:
print("Original last elements:", a[-4:-1])
print("Read last elements:", arr[-4:-1])
self.assertTrue(allequal(a[-4:-1], arr[-4:-1]))
class GI1NATestCase(GetItemTestCase, TestCase):
title = "Rank-1 case 1"
numericalList = numpy.array([3])
numericalListME = numpy.array([3, 2, 1, 0, 4, 5, 6])
charList = numpy.array(["3"], 'S')
charListME = numpy.array(
["321", "221", "121", "021", "421", "521", "621"], 'S')
class GI1NAOpenTestCase(GI1NATestCase):
close = 0
class GI1NACloseTestCase(GI1NATestCase):
close = 1
class GI2NATestCase(GetItemTestCase):
# A more complex example
title = "Rank-1,2 case 2"
numericalList = numpy.array([3, 4])
numericalListME = numpy.array([[3, 2, 1, 0, 4, 5, 6],
[2, 1, 0, 4, 5, 6, 7],
[4, 3, 2, 1, 0, 4, 5],
[3, 2, 1, 0, 4, 5, 6],
[3, 2, 1, 0, 4, 5, 6]])
charList = numpy.array(["a", "b"], 'S')
charListME = numpy.array(
[["321", "221", "121", "021", "421", "521", "621"],
["21", "21", "11", "02", "42", "21", "61"],
["31", "21", "12", "21", "41", "51", "621"],
["321", "221", "121", "021",
"421", "521", "621"],
["3241", "2321", "13216",
"0621", "4421", "5421", "a621"],
["a321", "s221", "d121", "g021", "b421", "5vvv21", "6zxzxs21"]], 'S')
class GI2NAOpenTestCase(GI2NATestCase):
close = 0
class GI2NACloseTestCase(GI2NATestCase):
close = 1
class SetItemTestCase(common.TempFileMixin, TestCase):
def test00_single(self):
"""Single element update (character types)"""
# Create the array under root and name 'somearray'
a = self.charList
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen('a')
arr = self.h5file.root.somearray
# Modify a single element of a and arr:
a[0] = b"b"
arr[0] = b"b"
# Get and compare an element
if common.verbose:
print("Original first element:", a[0])
print("Read first element:", arr[0])
self.assertTrue(allequal(a[0], arr[0]))
def test01_single(self):
"""Single element update (numerical types)"""
# Create the array under root and name 'somearray'
a = self.numericalList
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen('a')
arr = self.h5file.root.somearray
# Modify elements of a and arr:
a[0] = 333
arr[0] = 333
# Get and compare an element
if common.verbose:
print("Original first element:", a[0])
print("Read first element:", arr[0])
self.assertEqual(a[0], arr[0])
def test02_range(self):
"""Range element update (character types)"""
# Create the array under root and name 'somearray'
a = self.charListME
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen('a')
arr = self.h5file.root.somearray
# Modify elements of a and arr:
a[1:3] = b"xXx"
arr[1:3] = b"xXx"
# Get and compare an element
if common.verbose:
print("Original elements:", a[1:4])
print("Read elements:", arr[1:4])
self.assertTrue(allequal(a[1:4], arr[1:4]))
def test03_range(self):
"""Range element update (numerical types)"""
# Create the array under root and name 'somearray'
a = self.numericalListME
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen('a')
arr = self.h5file.root.somearray
# Modify elements of a and arr:
s = slice(1, 3, None)
rng = numpy.arange(a[s].size)*2 + 3
rng.shape = a[s].shape
a[s] = rng
arr[s] = rng
# Get and compare an element
if common.verbose:
print("Original elements:", a[1:4])
print("Read elements:", arr[1:4])
self.assertTrue(allequal(a[1:4], arr[1:4]))
def test04_range(self):
"""Range element update, strided (character types)"""
# Create the array under root and name 'somearray'
a = self.charListME
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen('a')
arr = self.h5file.root.somearray
# Modify elements of a and arr:
s = slice(1, 4, 2)
a[s] = b"xXx"
arr[s] = b"xXx"
# Get and compare an element
if common.verbose:
print("Original elements:", a[1:4:2])
print("Read elements:", arr[1:4:2])
self.assertTrue(allequal(a[1:4:2], arr[1:4:2]))
def test05_range(self):
"""Range element update, strided (numerical types)"""
# Create the array under root and name 'somearray'
a = self.numericalListME
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen('a')
arr = self.h5file.root.somearray
# Modify elements of a and arr:
s = slice(1, 4, 2)
rng = numpy.arange(a[s].size)*2 + 3
rng.shape = a[s].shape
a[s] = rng
arr[s] = rng
# Get and compare an element
if common.verbose:
print("Original elements:", a[1:4:2])
print("Read elements:", arr[1:4:2])
self.assertTrue(allequal(a[1:4:2], arr[1:4:2]))
def test06_negativeIndex(self):
"""Negative Index element update (character types)"""
# Create the array under root and name 'somearray'
a = self.charListME
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen('a')
arr = self.h5file.root.somearray
# Modify elements of a and arr:
s = -1
a[s] = b"xXx"
arr[s] = b"xXx"
# Get and compare an element
if common.verbose:
print("Original last element:", a[-1])
print("Read last element:", arr[-1])
self.assertTrue(allequal(a[-1], arr[-1]))
def test07_negativeIndex(self):
"""Negative Index element update (numerical types)"""
# Create the array under root and name 'somearray'
a = self.numericalListME
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen('a')
arr = self.h5file.root.somearray
# Modify elements of a and arr:
s = -2
a[s] = a[s]*2 + 3
arr[s] = arr[s]*2 + 3
# Get and compare an element
if common.verbose:
print("Original before last element:", a[-2])
print("Read before last element:", arr[-2])
if isinstance(a[-2], numpy.ndarray):
self.assertTrue(allequal(a[-2], arr[-2]))
else:
self.assertEqual(a[-2], arr[-2])
def test08_negativeRange(self):
"""Negative range element update (character types)"""
# Create the array under root and name 'somearray'
a = self.charListME
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen('a')
arr = self.h5file.root.somearray
# Modify elements of a and arr:
s = slice(-4, -1, None)
a[s] = b"xXx"
arr[s] = b"xXx"
# Get and compare an element
if common.verbose:
print("Original last elements:", a[-4:-1])
print("Read last elements:", arr[-4:-1])
self.assertTrue(allequal(a[-4:-1], arr[-4:-1]))
def test09_negativeRange(self):
"""Negative range element update (numerical types)"""
# Create the array under root and name 'somearray'
a = self.numericalListME
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen('a')
arr = self.h5file.root.somearray
# Modify elements of a and arr:
s = slice(-3, -1, None)
rng = numpy.arange(a[s].size)*2 + 3
rng.shape = a[s].shape
a[s] = rng
arr[s] = rng
# Get and compare an element
if common.verbose:
print("Original last elements:", a[-4:-1])
print("Read last elements:", arr[-4:-1])
self.assertTrue(allequal(a[-4:-1], arr[-4:-1]))
def test10_outOfRange(self):
"""Out of range update (numerical types)"""
# Create the array under root and name 'somearray'
a = self.numericalListME
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen('a')
arr = self.h5file.root.somearray
# Modify elements of arr that are out of range:
s = slice(1, a.shape[0]+1, None)
s2 = slice(1, 1000, None)
rng = numpy.arange(a[s].size)*2 + 3
rng.shape = a[s].shape
a[s] = rng
rng2 = numpy.arange(a[s2].size)*2 + 3
rng2.shape = a[s2].shape
arr[s2] = rng2
# Get and compare an element
if common.verbose:
print("Original last elements:", a[-4:-1])
print("Read last elements:", arr[-4:-1])
self.assertTrue(allequal(a[-4:-1], arr[-4:-1]))
class SI1NATestCase(SetItemTestCase, TestCase):
title = "Rank-1 case 1"
numericalList = numpy.array([3])
numericalListME = numpy.array([3, 2, 1, 0, 4, 5, 6])
charList = numpy.array(["3"], 'S')
charListME = numpy.array(
["321", "221", "121", "021", "421", "521", "621"], 'S')
class SI1NAOpenTestCase(SI1NATestCase):
close = 0
class SI1NACloseTestCase(SI1NATestCase):
close = 1
class SI2NATestCase(SetItemTestCase):
# A more complex example
title = "Rank-1,2 case 2"
numericalList = numpy.array([3, 4])
numericalListME = numpy.array([[3, 2, 1, 0, 4, 5, 6],
[2, 1, 0, 4, 5, 6, 7],
[4, 3, 2, 1, 0, 4, 5],
[3, 2, 1, 0, 4, 5, 6],
[3, 2, 1, 0, 4, 5, 6]])
charList = numpy.array(["a", "b"], 'S')
charListME = numpy.array(
[["321", "221", "121", "021", "421", "521", "621"],
["21", "21", "11", "02", "42", "21", "61"],
["31", "21", "12", "21", "41", "51", "621"],
["321", "221", "121", "021",
"421", "521", "621"],
["3241", "2321", "13216",
"0621", "4421", "5421", "a621"],
["a321", "s221", "d121", "g021", "b421", "5vvv21", "6zxzxs21"]], 'S')
class SI2NAOpenTestCase(SI2NATestCase):
close = 0
class SI2NACloseTestCase(SI2NATestCase):
close = 1
class GeneratorTestCase(common.TempFileMixin, TestCase):
def test00a_single(self):
"""Testing generator access to Arrays, single elements (char)"""
# Create the array under root and name 'somearray'
a = self.charList
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen()
arr = self.h5file.root.somearray
# Get and compare an element
ga = [i for i in a]
garr = [i for i in arr]
if common.verbose:
print("Result of original iterator:", ga)
print("Result of read generator:", garr)
self.assertEqual(ga, garr)
def test00b_me(self):
"""Testing generator access to Arrays, multiple elements (char)"""
# Create the array under root and name 'somearray'
a = self.charListME
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen()
arr = self.h5file.root.somearray
# Get and compare an element
ga = [i for i in a]
garr = [i for i in arr]
if common.verbose:
print("Result of original iterator:", ga)
print("Result of read generator:", garr)
for i in range(len(ga)):
self.assertTrue(allequal(ga[i], garr[i]))
def test01a_single(self):
"""Testing generator access to Arrays, single elements (numeric)"""
# Create the array under root and name 'somearray'
a = self.numericalList
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen()
arr = self.h5file.root.somearray
# Get and compare an element
ga = [i for i in a]
garr = [i for i in arr]
if common.verbose:
print("Result of original iterator:", ga)
print("Result of read generator:", garr)
self.assertEqual(ga, garr)
def test01b_me(self):
"""Testing generator access to Arrays, multiple elements (numeric)"""
# Create the array under root and name 'somearray'
a = self.numericalListME
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen()
arr = self.h5file.root.somearray
# Get and compare an element
ga = [i for i in a]
garr = [i for i in arr]
if common.verbose:
print("Result of original iterator:", ga)
print("Result of read generator:", garr)
for i in range(len(ga)):
self.assertTrue(allequal(ga[i], garr[i]))
class GE1NATestCase(GeneratorTestCase):
title = "Rank-1 case 1"
numericalList = numpy.array([3])
numericalListME = numpy.array([3, 2, 1, 0, 4, 5, 6])
charList = numpy.array(["3"], 'S')
charListME = numpy.array(
["321", "221", "121", "021", "421", "521", "621"], 'S')
class GE1NAOpenTestCase(GE1NATestCase):
close = 0
class GE1NACloseTestCase(GE1NATestCase):
close = 1
class GE2NATestCase(GeneratorTestCase):
# A more complex example
title = "Rank-1,2 case 2"
numericalList = numpy.array([3, 4])
numericalListME = numpy.array([[3, 2, 1, 0, 4, 5, 6],
[2, 1, 0, 4, 5, 6, 7],
[4, 3, 2, 1, 0, 4, 5],
[3, 2, 1, 0, 4, 5, 6],
[3, 2, 1, 0, 4, 5, 6]])
charList = numpy.array(["a", "b"], 'S')
charListME = numpy.array(
[["321", "221", "121", "021", "421", "521", "621"],
["21", "21", "11", "02", "42", "21", "61"],
["31", "21", "12", "21", "41", "51", "621"],
["321", "221", "121", "021",
"421", "521", "621"],
["3241", "2321", "13216",
"0621", "4421", "5421", "a621"],
["a321", "s221", "d121", "g021", "b421", "5vvv21", "6zxzxs21"]], 'S')
class GE2NAOpenTestCase(GE2NATestCase):
close = 0
class GE2NACloseTestCase(GE2NATestCase):
close = 1
class NonHomogeneousTestCase(common.TempFileMixin, TestCase):
def test(self):
"""Test for creation of non-homogeneous arrays."""
# This checks ticket #12.
self.assertRaises(ValueError,
self.h5file.create_array, '/', 'test', [1, [2, 3]])
self.assertRaises(NoSuchNodeError, self.h5file.remove_node, '/test')
class TruncateTestCase(common.TempFileMixin, TestCase):
def test(self):
"""Test for unability to truncate Array objects."""
array1 = self.h5file.create_array('/', 'array1', [0, 2])
self.assertRaises(TypeError, array1.truncate, 0)
class PointSelectionTestCase(common.TempFileMixin, TestCase):
def setUp(self):
super(PointSelectionTestCase, self).setUp()
# Limits for selections
self.limits = [
(0, 1), # just one element
(20, -10), # no elements
(-10, 4), # several elements
(0, 10), # several elements (again)
]
# Create a sample array
size = numpy.prod(self.shape)
nparr = numpy.arange(size, dtype=numpy.int32).reshape(self.shape)
self.nparr = nparr
self.tbarr = self.h5file.create_array(self.h5file.root, 'array', nparr)
def test01a_read(self):
"""Test for point-selections (read, boolean keys)."""
nparr = self.nparr
tbarr = self.tbarr
for value1, value2 in self.limits:
key = (nparr >= value1) & (nparr < value2)
if common.verbose:
print("Selection to test:", key)
a = nparr[key]
b = tbarr[key]
self.assertTrue(
numpy.alltrue(a == b),
"NumPy array and PyTables selections does not match.")
def test01b_read(self):
"""Test for point-selections (read, integer keys)."""
nparr = self.nparr
tbarr = self.tbarr
for value1, value2 in self.limits:
key = numpy.where((nparr >= value1) & (nparr < value2))
if common.verbose:
print("Selection to test:", key)
a = nparr[key]
b = tbarr[key]
self.assertTrue(
numpy.alltrue(a == b),
"NumPy array and PyTables selections does not match.")
def test01c_read(self):
"""Test for point-selections (read, float keys)."""
nparr = self.nparr
tbarr = self.tbarr
for value1, value2 in self.limits:
key = numpy.where((nparr >= value1) & (nparr < value2))
if common.verbose:
print("Selection to test:", key)
# a = nparr[key]
fkey = numpy.array(key, "f4")
self.assertRaises((IndexError, TypeError), tbarr.__getitem__, fkey)
def test01d_read(self):
nparr = self.nparr
tbarr = self.tbarr
for key in self.working_keyset:
if common.verbose:
print("Selection to test:", key)
a = nparr[key]
b = tbarr[key]
npt.assert_array_equal(
a, b, "NumPy array and PyTables selections does not match.")
def test01e_read(self):
tbarr = self.tbarr
for key in self.not_working_keyset:
if common.verbose:
print("Selection to test:", key)
self.assertRaises(IndexError, tbarr.__getitem__, key)
def test02a_write(self):
"""Test for point-selections (write, boolean keys)."""
nparr = self.nparr
tbarr = self.tbarr
for value1, value2 in self.limits:
key = (nparr >= value1) & (nparr < value2)
if common.verbose:
print("Selection to test:", key)
s = nparr[key]
nparr[key] = s * 2
tbarr[key] = s * 2
a = nparr[:]
b = tbarr[:]
self.assertTrue(
numpy.alltrue(a == b),
"NumPy array and PyTables modifications does not match.")
def test02b_write(self):
"""Test for point-selections (write, integer keys)."""
nparr = self.nparr
tbarr = self.tbarr
for value1, value2 in self.limits:
key = numpy.where((nparr >= value1) & (nparr < value2))
if common.verbose:
print("Selection to test:", key)
s = nparr[key]
nparr[key] = s * 2
tbarr[key] = s * 2
a = nparr[:]
b = tbarr[:]
self.assertTrue(
numpy.alltrue(a == b),
"NumPy array and PyTables modifications does not match.")
def test02c_write(self):
"""Test for point-selections (write, integer values, broadcast)."""
nparr = self.nparr
tbarr = self.tbarr
for value1, value2 in self.limits:
key = numpy.where((nparr >= value1) & (nparr < value2))
if common.verbose:
print("Selection to test:", key)
# s = nparr[key]
nparr[key] = 2 # force a broadcast
tbarr[key] = 2 # force a broadcast
a = nparr[:]
b = tbarr[:]
self.assertTrue(
numpy.alltrue(a == b),
"NumPy array and PyTables modifications does not match.")
class PointSelection0(PointSelectionTestCase):
shape = (3,)
working_keyset = [
[0, 1],
[0, -1],
]
not_working_keyset = [
[0, 3],
[0, 4],
[0, -4],
]
class PointSelection1(PointSelectionTestCase):
shape = (5, 3, 3)
working_keyset = [
[(0, 0), (0, 1), (0, 0)],
[(0, 0), (0, -1), (0, 0)],
]
not_working_keyset = [
[(0, 0), (0, 3), (0, 0)],
[(0, 0), (0, 4), (0, 0)],
[(0, 0), (0, -4), (0, 0)],
[(0, 0), (0, -5), (0, 0)]
]
class PointSelection2(PointSelectionTestCase):
shape = (7, 3)
working_keyset = [
[(0, 0), (0, 1)],
[(0, 0), (0, -1)],
[(0, 0), (0, -2)],
]
not_working_keyset = [
[(0, 0), (0, 3)],
[(0, 0), (0, 4)],
[(0, 0), (0, -4)],
[(0, 0), (0, -5)],
]
class PointSelection3(PointSelectionTestCase):
shape = (4, 3, 2, 1)
working_keyset = [
[(0, 0), (0, 1), (0, 0), (0, 0)],
[(0, 0), (0, -1), (0, 0), (0, 0)],
]
not_working_keyset = [
[(0, 0), (0, 3), (0, 0), (0, 0)],
[(0, 0), (0, 4), (0, 0), (0, 0)],
[(0, 0), (0, -4), (0, 0), (0, 0)],
]
class PointSelection4(PointSelectionTestCase):
shape = (1, 3, 2, 5, 6)
working_keyset = [
[(0, 0), (0, 1), (0, 0), (0, 0), (0, 0)],
[(0, 0), (0, -1), (0, 0), (0, 0), (0, 0)],
]
not_working_keyset = [
[(0, 0), (0, 3), (0, 0), (0, 0), (0, 0)],
[(0, 0), (0, 4), (0, 0), (0, 0), (0, 0)],
[(0, 0), (0, -4), (0, 0), (0, 0), (0, 0)],
]
class FancySelectionTestCase(common.TempFileMixin, TestCase):
def setUp(self):
super(FancySelectionTestCase, self).setUp()
M, N, O = self.shape
# The next are valid selections for both NumPy and PyTables
self.working_keyset = [
([1, 3], slice(1, N-1), 2),
([M-1, 1, 3, 2], slice(None), 2), # unordered lists supported
(slice(M), [N-1, 1, 0], slice(None)),
(slice(1, M, 3), slice(1, N), [O-1, 1, 0]),
(M-1, [2, 1], 1),
(1, 2, 1), # regular selection
([1, 2], -2, -1), # negative indices
([1, -2], 2, -1), # more negative indices
([1, -2], 2, Ellipsis), # one ellipsis
(Ellipsis, [1, 2]), # one ellipsis
(numpy.array(
[1, -2], 'i4'), 2, -1), # array 32-bit instead of list
(numpy.array(
[-1, 2], 'i8'), 2, -1), # array 64-bit instead of list
]
# Using booleans instead of ints is deprecated since numpy 1.8
# Tests for keys that have to support the __index__ attribute
#if (sys.version_info[0] >= 2 and sys.version_info[1] >= 5):
# self.working_keyset.append(
# (False, True), # equivalent to (0,1) ;-)
# )
# Valid selections for NumPy, but not for PyTables (yet)
# The next should raise an IndexError
self.not_working_keyset = [
numpy.array([False, True], dtype="b1"), # boolean arrays
([1, 2, 1], 2, 1), # repeated values
([1, 2], 2, [1, 2]), # several lists
([], 2, 1), # empty selections
(Ellipsis, [1, 2], Ellipsis), # several ellipsis
# Using booleans instead of ints is deprecated since numpy 1.8
([False, True]), # boolean values with incompatible shape
]
# The next should raise an IndexError in both NumPy and PyTables
self.not_working_oob = [
([1, 2], 2, 1000), # out-of-bounds selections
([1, 2], 2000, 1), # out-of-bounds selections
]
# The next should raise a IndexError in both NumPy and PyTables
self.not_working_too_many = [
([1, 2], 2, 1, 1),
]
# Create a sample array
nparr = numpy.empty(self.shape, dtype=numpy.int32)
data = numpy.arange(N * O, dtype=numpy.int32).reshape(N, O)
for i in range(M):
nparr[i] = data * i
self.nparr = nparr
self.tbarr = self.h5file.create_array(self.h5file.root, 'array', nparr)
def test01a_read(self):
"""Test for fancy-selections (working selections, read)."""
nparr = self.nparr
tbarr = self.tbarr
for key in self.working_keyset:
if common.verbose:
print("Selection to test:", key)
a = nparr[key]
b = tbarr[key]
self.assertTrue(
numpy.alltrue(a == b),
"NumPy array and PyTables selections does not match.")
def test01b_read(self):
"""Test for fancy-selections (not working selections, read)."""
# nparr = self.nparr
tbarr = self.tbarr
for key in self.not_working_keyset:
if common.verbose:
print("Selection to test:", key)
# a = nparr[key]
self.assertRaises(IndexError, tbarr.__getitem__, key)
def test01c_read(self):
"""Test for fancy-selections (out-of-bound indexes, read)."""
nparr = self.nparr
tbarr = self.tbarr
for key in self.not_working_oob:
if common.verbose:
print("Selection to test:", key)
self.assertRaises(IndexError, nparr.__getitem__, key)
self.assertRaises(IndexError, tbarr.__getitem__, key)
def test01d_read(self):
"""Test for fancy-selections (too many indexes, read)."""
nparr = self.nparr
tbarr = self.tbarr
for key in self.not_working_too_many:
if common.verbose:
print("Selection to test:", key)
# ValueError for numpy 1.6.x and earlier
# IndexError in numpy > 1.8.0
self.assertRaises((ValueError, IndexError), nparr.__getitem__, key)
self.assertRaises(IndexError, tbarr.__getitem__, key)
def test02a_write(self):
"""Test for fancy-selections (working selections, write)."""
nparr = self.nparr
tbarr = self.tbarr
for key in self.working_keyset:
if common.verbose:
print("Selection to test:", key)
s = nparr[key]
nparr[key] = s * 2
tbarr[key] = s * 2
a = nparr[:]
b = tbarr[:]
self.assertTrue(
numpy.alltrue(a == b),
"NumPy array and PyTables modifications does not match.")
def test02b_write(self):
"""Test for fancy-selections (working selections, write, broadcast)."""
nparr = self.nparr
tbarr = self.tbarr
for key in self.working_keyset:
if common.verbose:
print("Selection to test:", key)
# s = nparr[key]
nparr[key] = 2 # broadcast value
tbarr[key] = 2 # broadcast value
a = nparr[:]
b = tbarr[:]
# if common.verbose:
# print("NumPy modified array:", a)
# print("PyTables modifyied array:", b)
self.assertTrue(
numpy.alltrue(a == b),
"NumPy array and PyTables modifications does not match.")
class FancySelection1(FancySelectionTestCase):
shape = (5, 3, 3) # Minimum values
class FancySelection2(FancySelectionTestCase):
# shape = (5, 3, 3) # Minimum values
shape = (7, 3, 3)
class FancySelection3(FancySelectionTestCase):
# shape = (5, 3, 3) # Minimum values
shape = (7, 4, 5)
class FancySelection4(FancySelectionTestCase):
# shape = (5, 3, 3) # Minimum values
shape = (5, 3, 10)
class CopyNativeHDF5MDAtom(TestCase):
def setUp(self):
super(CopyNativeHDF5MDAtom, self).setUp()
filename = test_filename("array_mdatom.h5")
self.h5file = tables.open_file(filename, "r")
self.arr = self.h5file.root.arr
self.copy = tempfile.mktemp(".h5")
self.copyh = tables.open_file(self.copy, mode="w")
self.arr2 = self.arr.copy(self.copyh.root, newname="arr2")
def tearDown(self):
self.h5file.close()
self.copyh.close()
os.remove(self.copy)
super(CopyNativeHDF5MDAtom, self).tearDown()
def test01_copy(self):
"""Checking that native MD atoms are copied as-is"""
self.assertEqual(self.arr.atom, self.arr2.atom)
self.assertEqual(self.arr.shape, self.arr2.shape)
def test02_reopen(self):
"""Checking that native MD atoms are copied as-is (re-open)"""
self.copyh.close()
self.copyh = tables.open_file(self.copy, mode="r")
self.arr2 = self.copyh.root.arr2
self.assertEqual(self.arr.atom, self.arr2.atom)
self.assertEqual(self.arr.shape, self.arr2.shape)
class AccessClosedTestCase(common.TempFileMixin, TestCase):
def setUp(self):
super(AccessClosedTestCase, self).setUp()
a = numpy.zeros((10, 10))
self.array = self.h5file.create_array(self.h5file.root, 'array', a)
def test_read(self):
self.h5file.close()
self.assertRaises(ClosedNodeError, self.array.read)
def test_getitem(self):
self.h5file.close()
self.assertRaises(ClosedNodeError, self.array.__getitem__, 0)
def test_setitem(self):
self.h5file.close()
self.assertRaises(ClosedNodeError, self.array.__setitem__, 0, 0)
class BroadcastTest(common.TempFileMixin, TestCase):
def test(self):
"""Test correct broadcasting when the array atom is not scalar."""
array_shape = (2, 3)
element_shape = (3,)
dtype = numpy.dtype((numpy.int, element_shape))
atom = Atom.from_dtype(dtype)
h5arr = self.h5file.create_carray(self.h5file.root, 'array',
atom, array_shape)
size = numpy.prod(element_shape)
nparr = numpy.arange(size).reshape(element_shape)
h5arr[0] = nparr
self.assertTrue(numpy.all(h5arr[0] == nparr))
class TestCreateArrayArgs(common.TempFileMixin, TestCase):
where = '/'
name = 'array'
obj = numpy.array([[1, 2], [3, 4]])
title = 'title'
byteorder = None
createparents = False
atom = Atom.from_dtype(obj.dtype)
shape = obj.shape
def test_positional_args(self):
self.h5file.create_array(self.where, self.name, self.obj, self.title)
self.h5file.close()
self.h5file = tables.open_file(self.h5fname)
ptarr = self.h5file.get_node(self.where, self.name)
nparr = ptarr.read()
self.assertEqual(ptarr.title, self.title)
self.assertEqual(ptarr.shape, self.shape)
self.assertEqual(ptarr.atom, self.atom)
self.assertEqual(ptarr.atom.dtype, self.atom.dtype)
self.assertTrue(allequal(self.obj, nparr))
def test_positional_args_atom_shape(self):
self.h5file.create_array(self.where, self.name, None, self.title,
self.byteorder, self.createparents,
self.atom, self.shape)
self.h5file.close()
self.h5file = tables.open_file(self.h5fname)
ptarr = self.h5file.get_node(self.where, self.name)
nparr = ptarr.read()
self.assertEqual(ptarr.title, self.title)
self.assertEqual(ptarr.shape, self.shape)
self.assertEqual(ptarr.atom, self.atom)
self.assertEqual(ptarr.atom.dtype, self.atom.dtype)
self.assertTrue(allequal(numpy.zeros_like(self.obj), nparr))
def test_kwargs_obj(self):
self.h5file.create_array(self.where, self.name, title=self.title,
obj=self.obj)
self.h5file.close()
self.h5file = tables.open_file(self.h5fname)
ptarr = self.h5file.get_node(self.where, self.name)
nparr = ptarr.read()
self.assertEqual(ptarr.title, self.title)
self.assertEqual(ptarr.shape, self.shape)
self.assertEqual(ptarr.atom, self.atom)
self.assertEqual(ptarr.atom.dtype, self.atom.dtype)
self.assertTrue(allequal(self.obj, nparr))
def test_kwargs_atom_shape_01(self):
ptarr = self.h5file.create_array(self.where, self.name,
title=self.title,
atom=self.atom, shape=self.shape)
ptarr[...] = self.obj
self.h5file.close()
self.h5file = tables.open_file(self.h5fname)
ptarr = self.h5file.get_node(self.where, self.name)
nparr = ptarr.read()
self.assertEqual(ptarr.title, self.title)
self.assertEqual(ptarr.shape, self.shape)
self.assertEqual(ptarr.atom, self.atom)
self.assertEqual(ptarr.atom.dtype, self.atom.dtype)
self.assertTrue(allequal(self.obj, nparr))
def test_kwargs_atom_shape_02(self):
ptarr = self.h5file.create_array(self.where, self.name,
title=self.title,
atom=self.atom, shape=self.shape)
#ptarr[...] = self.obj
self.h5file.close()
self.h5file = tables.open_file(self.h5fname)
ptarr = self.h5file.get_node(self.where, self.name)
nparr = ptarr.read()
self.assertEqual(ptarr.title, self.title)
self.assertEqual(ptarr.shape, self.shape)
self.assertEqual(ptarr.atom, self.atom)
self.assertEqual(ptarr.atom.dtype, self.atom.dtype)
self.assertTrue(allequal(numpy.zeros_like(self.obj), nparr))
def test_kwargs_obj_atom(self):
ptarr = self.h5file.create_array(self.where, self.name,
title=self.title,
obj=self.obj,
atom=self.atom)
self.h5file.close()
self.h5file = tables.open_file(self.h5fname)
ptarr = self.h5file.get_node(self.where, self.name)
nparr = ptarr.read()
self.assertEqual(ptarr.title, self.title)
self.assertEqual(ptarr.shape, self.shape)
self.assertEqual(ptarr.atom, self.atom)
self.assertEqual(ptarr.atom.dtype, self.atom.dtype)
self.assertTrue(allequal(self.obj, nparr))
def test_kwargs_obj_shape(self):
ptarr = self.h5file.create_array(self.where, self.name,
title=self.title,
obj=self.obj,
shape=self.shape)
self.h5file.close()
self.h5file = tables.open_file(self.h5fname)
ptarr = self.h5file.get_node(self.where, self.name)
nparr = ptarr.read()
self.assertEqual(ptarr.title, self.title)
self.assertEqual(ptarr.shape, self.shape)
self.assertEqual(ptarr.atom, self.atom)
self.assertEqual(ptarr.atom.dtype, self.atom.dtype)
self.assertTrue(allequal(self.obj, nparr))
def test_kwargs_obj_atom_shape(self):
ptarr = self.h5file.create_array(self.where, self.name,
title=self.title,
obj=self.obj,
atom=self.atom,
shape=self.shape)
self.h5file.close()
self.h5file = tables.open_file(self.h5fname)
ptarr = self.h5file.get_node(self.where, self.name)
nparr = ptarr.read()
self.assertEqual(ptarr.title, self.title)
self.assertEqual(ptarr.shape, self.shape)
self.assertEqual(ptarr.atom, self.atom)
self.assertEqual(ptarr.atom.dtype, self.atom.dtype)
self.assertTrue(allequal(self.obj, nparr))
def test_kwargs_obj_atom_error(self):
atom = Atom.from_dtype(numpy.dtype('complex'))
#shape = self.shape + self.shape
self.assertRaises(TypeError,
self.h5file.create_array,
self.where,
self.name,
title=self.title,
obj=self.obj,
atom=atom)
def test_kwargs_obj_shape_error(self):
#atom = Atom.from_dtype(numpy.dtype('complex'))
shape = self.shape + self.shape
self.assertRaises(TypeError,
self.h5file.create_array,
self.where,
self.name,
title=self.title,
obj=self.obj,
shape=shape)
def test_kwargs_obj_atom_shape_error_01(self):
atom = Atom.from_dtype(numpy.dtype('complex'))
#shape = self.shape + self.shape
self.assertRaises(TypeError,
self.h5file.create_array,
self.where,
self.name,
title=self.title,
obj=self.obj,
atom=atom,
shape=self.shape)
def test_kwargs_obj_atom_shape_error_02(self):
#atom = Atom.from_dtype(numpy.dtype('complex'))
shape = self.shape + self.shape
self.assertRaises(TypeError,
self.h5file.create_array,
self.where,
self.name,
title=self.title,
obj=self.obj,
atom=self.atom,
shape=shape)
def test_kwargs_obj_atom_shape_error_03(self):
atom = Atom.from_dtype(numpy.dtype('complex'))
shape = self.shape + self.shape
self.assertRaises(TypeError,
self.h5file.create_array,
self.where,
self.name,
title=self.title,
obj=self.obj,
atom=atom,
shape=shape)
def suite():
theSuite = unittest.TestSuite()
niter = 1
for i in range(niter):
# The scalar case test should be refined in order to work
theSuite.addTest(unittest.makeSuite(Basic0DOneTestCase))
theSuite.addTest(unittest.makeSuite(Basic0DTwoTestCase))
# theSuite.addTest(unittest.makeSuite(Basic1DZeroTestCase))
theSuite.addTest(unittest.makeSuite(Basic1DOneTestCase))
theSuite.addTest(unittest.makeSuite(Basic1DTwoTestCase))
theSuite.addTest(unittest.makeSuite(Basic1DThreeTestCase))
theSuite.addTest(unittest.makeSuite(Basic2DOneTestCase))
theSuite.addTest(unittest.makeSuite(Basic2DTwoTestCase))
theSuite.addTest(unittest.makeSuite(Basic10DTestCase))
# The 32 dimensions case is tested on GroupsArray
# theSuite.addTest(unittest.makeSuite(Basic32DTestCase))
theSuite.addTest(unittest.makeSuite(ReadOutArgumentTests))
theSuite.addTest(unittest.makeSuite(
SizeOnDiskInMemoryPropertyTestCase))
theSuite.addTest(unittest.makeSuite(GroupsArrayTestCase))
theSuite.addTest(unittest.makeSuite(ComplexNotReopenNotEndianTestCase))
theSuite.addTest(unittest.makeSuite(ComplexReopenNotEndianTestCase))
theSuite.addTest(unittest.makeSuite(ComplexNotReopenEndianTestCase))
theSuite.addTest(unittest.makeSuite(ComplexReopenEndianTestCase))
theSuite.addTest(unittest.makeSuite(CloseCopyTestCase))
theSuite.addTest(unittest.makeSuite(OpenCopyTestCase))
theSuite.addTest(unittest.makeSuite(CopyIndex1TestCase))
theSuite.addTest(unittest.makeSuite(CopyIndex2TestCase))
theSuite.addTest(unittest.makeSuite(CopyIndex3TestCase))
theSuite.addTest(unittest.makeSuite(CopyIndex4TestCase))
theSuite.addTest(unittest.makeSuite(CopyIndex5TestCase))
theSuite.addTest(unittest.makeSuite(CopyIndex6TestCase))
theSuite.addTest(unittest.makeSuite(CopyIndex7TestCase))
theSuite.addTest(unittest.makeSuite(CopyIndex8TestCase))
theSuite.addTest(unittest.makeSuite(CopyIndex9TestCase))
theSuite.addTest(unittest.makeSuite(CopyIndex10TestCase))
theSuite.addTest(unittest.makeSuite(CopyIndex11TestCase))
theSuite.addTest(unittest.makeSuite(CopyIndex12TestCase))
theSuite.addTest(unittest.makeSuite(GI1NAOpenTestCase))
theSuite.addTest(unittest.makeSuite(GI1NACloseTestCase))
theSuite.addTest(unittest.makeSuite(GI2NAOpenTestCase))
theSuite.addTest(unittest.makeSuite(GI2NACloseTestCase))
theSuite.addTest(unittest.makeSuite(SI1NAOpenTestCase))
theSuite.addTest(unittest.makeSuite(SI1NACloseTestCase))
theSuite.addTest(unittest.makeSuite(SI2NAOpenTestCase))
theSuite.addTest(unittest.makeSuite(SI2NACloseTestCase))
theSuite.addTest(unittest.makeSuite(GE1NAOpenTestCase))
theSuite.addTest(unittest.makeSuite(GE1NACloseTestCase))
theSuite.addTest(unittest.makeSuite(GE2NAOpenTestCase))
theSuite.addTest(unittest.makeSuite(GE2NACloseTestCase))
theSuite.addTest(unittest.makeSuite(NonHomogeneousTestCase))
theSuite.addTest(unittest.makeSuite(TruncateTestCase))
theSuite.addTest(unittest.makeSuite(FancySelection1))
theSuite.addTest(unittest.makeSuite(FancySelection2))
theSuite.addTest(unittest.makeSuite(FancySelection3))
theSuite.addTest(unittest.makeSuite(FancySelection4))
theSuite.addTest(unittest.makeSuite(PointSelection0))
theSuite.addTest(unittest.makeSuite(PointSelection1))
theSuite.addTest(unittest.makeSuite(PointSelection2))
theSuite.addTest(unittest.makeSuite(PointSelection3))
theSuite.addTest(unittest.makeSuite(PointSelection4))
theSuite.addTest(unittest.makeSuite(CopyNativeHDF5MDAtom))
theSuite.addTest(unittest.makeSuite(AccessClosedTestCase))
theSuite.addTest(unittest.makeSuite(TestCreateArrayArgs))
theSuite.addTest(unittest.makeSuite(BroadcastTest))
return theSuite
if __name__ == '__main__':
common.parse_argv(sys.argv)
common.print_versions()
unittest.main(defaultTest='suite')
| gdementen/PyTables | tables/tests/test_array.py | Python | bsd-3-clause | 95,636 |
import warnings
import pyaf.Bench.TS_datasets as tsds
import pyaf.Bench.MComp as mcomp
tester7 = mcomp.cMComp_Tester(tsds.load_M4_comp("FINANCE") , "M4_COMP");
with warnings.catch_warnings():
warnings.simplefilter("error")
tester7.testSignals('FIN28')
# tester7.testSignal('ECON0299')
# tester7.testAllSignals()
# tester7.run_multiprocessed(20);
| antoinecarme/pyaf | tests/bench/test_M4_FIN28_failure.py | Python | bsd-3-clause | 369 |
########################################################################
# amara/xpath/locationpaths/predicates.py
"""
A parsed token that represents a predicate list.
"""
from __future__ import absolute_import
from itertools import count, izip
from amara.xpath import datatypes
from amara.xpath.expressions.basics import literal, variable_reference
from amara.xpath.expressions.booleans import equality_expr, relational_expr
from amara.xpath.functions import position_function
from ._nodetests import positionfilter
from ._paths import pathiter
__all__ = ['predicates', 'predicate']
class predicates(tuple):
def __init__(self, *args):
self.select = pathiter(pred.select for pred in self).select
return
def filter(self, nodes, context, reverse):
if self:
state = context.node, context.position, context.size
for predicate in self:
nodes = datatypes.nodeset(predicate.select(context, nodes))
context.node, context.position, context.size = state
else:
nodes = datatypes.nodeset(nodes)
if reverse:
nodes.reverse()
return nodes
def pprint(self, indent='', stream=None):
print >> stream, indent + repr(self)
for pred in self:
pred.pprint(indent + ' ', stream)
def __str__(self):
return self.__unicode__().encode('utf-8')
def __repr__(self):
ptr = id(self)
if ptr < 0: ptr += 0x100000000L
return '<%s at 0x%x: %s>' % (self.__class__.__name__, ptr, self)
def __unicode__(self):
return u''.join(map(unicode, self))
#FIXME: should this derive from boolean_expression?
class predicate:
def __init__(self, expression):
self._expr = expression
self._provide_context_size = False #See http://trac.xml3k.org/ticket/62
#FIXME: There are probably many code paths which need self._provide_context_size set
# Check for just "Number"
if isinstance(expression, literal):
const = datatypes.number(expression._literal)
index = int(const)
if index == const and index >= 1:
self.select = positionfilter(index)
else:
# FIXME: add warning that expression will not select anything
self.select = izip()
return
# Check for "position() = Expr"
elif isinstance(expression, equality_expr) and expression._op == '=':
if isinstance(expression._left, position_function):
expression = expression._right
if isinstance(expression, literal):
const = datatypes.number(expression._literal)
index = int(const)
if index == const and index >= 1:
self.select = positionfilter(index)
else:
self.select = izip()
else:
#FIXME: This will kick in the non-lazy behavior too broadly, e.g. in the case of [position = 1+1]
#See: http://trac.xml3k.org/ticket/62
self._provide_context_size = True
self._expr = expression
self.select = self._number
return
elif isinstance(expression._right, position_function):
expression = expression._left
if isinstance(expression, literal):
const = datatypes.number(expression._literal)
index = int(const)
if index == const and index >= 1:
self.select = positionfilter(index)
else:
self.select = izip()
else:
self._expr = expression
self.select = self._number
return
# Check for "position() [>,>=] Expr" or "Expr [<,<=] position()"
# FIXME - do full slice-type notation
elif isinstance(expression, relational_expr):
op = expression._op
if (isinstance(expression._left, position_function) and
isinstance(expression._right, (literal, variable_reference))
and op in ('>', '>=')):
self._start = expression._right
self._position = (op == '>')
self.select = self._slice
return
elif (isinstance(expression._left, (literal, variable_reference))
and isinstance(expression._right, Position)
and op in ('<', '<=')):
self._start = expression._left
self._position = (op == '<')
self.select = self._slice
return
if issubclass(expression.return_type, datatypes.number):
self.select = self._number
elif expression.return_type is not datatypes.xpathobject:
assert issubclass(expression.return_type, datatypes.xpathobject)
self.select = self._boolean
return
def _slice(self, context, nodes):
start = self._start.evaluate_as_number(context)
position = self._position
if position > start:
return nodes
position += 1
nodes = iter(nodes)
for node in nodes:
if position > start:
break
position += 1
return nodes
def _number(self, context, nodes):
expr = self._expr
position = 1
if self._provide_context_size:
nodes = list(nodes)
context.size = len(nodes)
context.current_node = context.node
for node in nodes:
context.node, context.position = node, position
if expr.evaluate_as_number(context) == position:
yield node
position += 1
return
def _boolean(self, context, nodes):
expr = self._expr
position = 1
context.current_node = context.node
for node in nodes:
context.node, context.position = node, position
if expr.evaluate_as_boolean(context):
yield node
position += 1
return
def select(self, context, nodes):
expr = self._expr
position = 1
context.current_node = context.node
for node in nodes:
context.node, context.position = node, position
result = expr.evaluate(context)
if isinstance(result, datatypes.number):
# This must be separate to prevent falling into
# the boolean check.
if result == position:
yield node
elif result:
yield node
position += 1
return
def pprint(self, indent='', stream=None):
print >> stream, indent + repr(self)
self._expr.pprint(indent + ' ', stream)
def __str__(self):
return self.__unicode__().encode('utf-8')
def __repr__(self):
ptr = id(self)
if ptr < 0: ptr += 0x100000000L
return '<%s at 0x%x: %s>' % (self.__class__.__name__, ptr, self)
def __unicode__(self):
return u'[%s]' % self._expr
@property
def children(self):
'Child of the parse tree of a predicate is its expression'
return (self._expr,)
| zepheira/amara | lib/xpath/locationpaths/predicates.py | Python | apache-2.0 | 7,392 |
# Copyright 2020 Red Hat, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Validators for non-namespaced extra specs."""
from nova.api.validation.extra_specs import base
EXTRA_SPEC_VALIDATORS = [
base.ExtraSpecValidator(
name='hide_hypervisor_id',
description=(
'Determine whether the hypervisor ID should be hidden from the '
'guest. Only supported by the libvirt virt driver. '
'This extra spec is not compatible with the '
'AggregateInstanceExtraSpecsFilter scheduler filter. '
'The ``hw:hide_hypervisor_id`` extra spec should be used instead.'
),
value={
'type': bool,
'description': 'Whether to hide the hypervisor ID.',
},
deprecated=True,
),
# TODO(stephenfin): This should be moved to a namespace
base.ExtraSpecValidator(
name='group_policy',
description=(
'The group policy to apply when using the granular resource '
'request syntax.'
),
value={
'type': str,
'enum': [
'isolate',
'none',
],
},
),
]
def register():
return EXTRA_SPEC_VALIDATORS
| openstack/nova | nova/api/validation/extra_specs/null.py | Python | apache-2.0 | 1,775 |
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors import LinkExtractor
import scrapy
import os
# IN the special case of the wto disputes, we need to get the list of all disputes, so we run this script preliminarily:
execfile("./fetchjson.py")
# Now we build the scraper:
# First we define a box to put information into (an object to recieve scrapted info)
class WTO_Dispute(scrapy.Item):
url = scrapy.Field()
# name = scrapy.Field()
# description = scrapy.Field()
# Then we define a class which will be used to direct scrapy as to what to gather from the web.
class WTO_Dispute_Link_Spider(CrawlSpider):
name = 'wtodisputes'
# allowed_domains=['wto.org']
start_urls = ['http://wto.org/english/tratop_e/dispu_e/dispu_status_e.htm']
def parse(self,response):
dispute=WTO_Dispute()
dispute['url']= response.xpath("//a/text()").extract()
# dispute['name']= # fill in here
# dispute['description']= # fill in here
return dispute
# based on http://doc.scrapy.org/en/0.24/intro/overview.html
# to run: run $ scrapy runspider wtoscraper.py -o wto_disputes.json
| trcook/wto_python_scrape | wto_scraper.py | Python | mit | 1,127 |
# -*- coding: utf-8 -*-
# © 2016 Eficent Business and IT Consulting Services S.L.
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from openerp import api, exceptions, fields, models, _
class AccountMove(models.Model):
_inherit = 'account.move'
stock_valuation_account_manual_adjustment_id = fields.Many2one(
comodel_name='stock.valuation.account.manual.adjustment',
string='Stock Valuation Account Manual Adjustment',
ondelete='restrict')
@api.multi
def unlink(self):
for rec in self:
if rec.stock_valuation_account_manual_adjustment_id:
raise exceptions.Warning(
_("You cannot remove the journal entry that is related "
"to a Stock valuation account manual adjustment "))
return super(AccountMove, self).unlink()
class AccountMoveLine(models.Model):
_inherit = 'account.move.line'
stock_valuation_account_manual_adjustment_id = fields.Many2one(
comodel_name='stock.valuation.account.manual.adjustment',
related='move_id.stock_valuation_account_manual_adjustment_id',
string='Stock Valuation Account Manual Adjustment',
store=True, ondelete='restrict')
@api.multi
def unlink(self):
for rec in self:
if rec.stock_valuation_account_manual_adjustment_id:
raise exceptions.Warning(
_("You cannot remove the journal item that is related "
"to a Stock valuation account manual adjustment "))
return super(AccountMoveLine, self).unlink()
| factorlibre/stock-logistics-warehouse | stock_valuation_account_manual_adjustment/models/account_move.py | Python | agpl-3.0 | 1,624 |
# -*- coding: utf-8 -*-
#
# This file is part of EventGhost.
# Copyright © 2005-2019 EventGhost Project <http://www.eventghost.org/>
#
# EventGhost is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 2 of the License, or (at your option)
# any later version.
#
# EventGhost is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along
# with EventGhost. If not, see <http://www.gnu.org/licenses/>.
"""
Builds a file that would import all used modules.
This way we trick py2exe to include all standard library files and some more
packages and modules.
"""
import os
import sys
import warnings
from os.path import join
# Local imports
import builder
MODULES_TO_IGNORE = [
"__phello__.foo",
"antigravity",
"unittest",
"win32com.propsys.propsys",
"wx.lib.graphics",
"wx.lib.rpcMixin",
"wx.lib.wxcairo",
"wx.build.config",
]
HEADER = """\
# -*- coding: utf-8 -*-
#
# This file is part of EventGhost.
# Copyright © 2005-2019 EventGhost Project <http://www.eventghost.org/>
#
# EventGhost is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 2 of the License, or (at your option)
# any later version.
#
# EventGhost is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along
# with EventGhost. If not, see <http://www.gnu.org/licenses/>.
#-----------------------------------------------------------------------------
# This file was automatically created by the BuildImports.py script.
# Don't try to edit this file yourself.
#-----------------------------------------------------------------------------
#pylint: disable-msg=W0611,W0622,W0402,E0611,F0401
"""
warnings.simplefilter('error', DeprecationWarning)
class BuildImports(builder.Task):
description = "Build Imports.py"
def Setup(self):
self.outFileName = join(self.buildSetup.pyVersionDir, "Imports.py")
if self.buildSetup.showGui:
if os.path.exists(self.outFileName):
self.activated = False
else:
self.activated = bool(self.buildSetup.args.build)
def DoTask(self):
"""
Starts the actual work.
"""
buildSetup = self.buildSetup
MODULES_TO_IGNORE.extend(buildSetup.excludeModules)
globalModuleIndex, badModules = ReadGlobalModuleIndex(
join(buildSetup.pyVersionDir, "Global Module Index.txt")
)
MODULES_TO_IGNORE.extend(badModules)
pyDir = sys.real_prefix if hasattr(sys, "real_prefix") else sys.prefix
stdLibModules = (
FindModulesInPath(join(pyDir, "DLLs"), "", True) +
FindModulesInPath(join(pyDir, "Lib"), "", True)
)
notFoundModules = []
for module in globalModuleIndex:
if module in stdLibModules:
continue
if module in sys.builtin_module_names:
continue
if ShouldBeIgnored(module):
continue
notFoundModules.append(module)
if notFoundModules:
print " Modules found in global module index but not in scan:"
for module in notFoundModules:
print " ", module
#print "Modules found in scan but not in global module index:"
#for module in stdLibModules:
# if module not in globalModuleIndex:
# print " ", module
outfile = open(self.outFileName, "wt")
outfile.write(HEADER)
for module in stdLibModules:
outfile.write("import %s\n" % module)
# add every .pyd of the current directory
for package in buildSetup.includeModules:
outfile.write("\n# modules found for package '%s'\n" % package)
for module in GetPackageModules(package):
outfile.write("import %s\n" % module)
outfile.write("\n")
outfile.close()
class DummyStdOut: #IGNORE:W0232 class has no __init__ method
"""
Just a dummy stdout implementation, that suppresses all output.
"""
def write(self, dummyData): #IGNORE:C0103
"""
A do-nothing write.
"""
pass
def FindModulesInPath(path, prefix="", includeDeprecated=False):
"""
Find modules and packages for a given filesystem path.
"""
if prefix:
prefix += "."
print " Scanning:", path
modules = []
for root, dirs, files in os.walk(path):
package = root[len(path) + 1:].replace("\\", ".")
package = prefix + package
for directory in dirs[:]:
if (
not os.path.exists(join(root, directory, "__init__.py")) or
ShouldBeIgnored(package + "." + directory)
):
dirs.remove(directory)
if ShouldBeIgnored(package) or package.rfind(".test") > 0:
continue
if package != prefix:
isOk, eType, eMesg = TestImport(package)
if isOk:
modules.append(package)
package += "."
for filename in files:
name, extension = os.path.splitext(filename)
if extension.lower() not in (".py", ".pyd"):
continue
moduleName = package + name
if ShouldBeIgnored(moduleName) or moduleName.endswith(".__init__"):
continue
if moduleName == "MimeWrite":
print "found"
isOk, eType, eMesg = TestImport(moduleName, includeDeprecated)
if not isOk:
if not eType == "DeprecationWarning":
print " ", moduleName, eType, eMesg
continue
modules.append(moduleName)
return modules
def GetPackageModules(package):
"""
Returns a list with all modules of the package.
"""
moduleList = []
tail = join("Lib", "site-packages", package) + ".pth"
pthPaths = [join(sys.prefix, tail)]
if hasattr(sys, "real_prefix"):
pthPaths.append(join(sys.real_prefix, tail))
for pthPath in pthPaths:
if os.path.exists(pthPath):
for path in ReadPth(pthPath):
moduleList.extend(FindModulesInPath(path))
break
else:
mod = __import__(package)
moduleList.append(package)
if hasattr(mod, "__path__"):
paths = mod.__path__
else:
if mod.__file__.endswith(".pyd"):
return moduleList
paths = [os.path.dirname(mod.__file__)]
for path in paths:
moduleList.extend(FindModulesInPath(path, package))
return moduleList
def GetPydFiles(path):
"""
Returns a list of all .pyd modules in supplied path.
"""
files = []
for filepath in os.listdir(path):
moduleName, extension = os.path.splitext(os.path.basename(filepath))
if extension.lower() == ".pyd":
files.append(moduleName)
return files
def ReadGlobalModuleIndex(infile):
"""
Read the global module index file (created by copy&paste from the Python
documentation) and sort out all modules that are not available on Windows.
"""
modules = []
badModules = []
inFile = open(infile, "r")
for line in inFile.readlines():
if line.startswith("#"):
continue
parts = line.strip().split(" ", 1)
if len(parts) > 1:
if parts[1].startswith("(") and parts[1].find("Windows") < 0:
badModules.append(parts[0])
continue
# if parts[1].find("Deprecated:") >= 0:
# print line
modules.append(parts[0])
inFile.close()
return modules, badModules
def ReadPth(path):
"""
Read a .PTH file and return the paths inside as a list
"""
result = []
pthFile = open(path, "rt")
for line in pthFile:
if line.strip().startswith("#"):
continue
result.append(join(os.path.dirname(path), line.strip()))
return result
def ShouldBeIgnored(moduleName):
"""
Return True if the supplied module should be ignored, because it is a
module or submodule in MODULES_TO_IGNORE.
"""
moduleParts = moduleName.split(".")
modulePartsLength = len(moduleParts)
for module in MODULES_TO_IGNORE:
ignoreParts = module.split(".")
ignorePartsLength = len(ignoreParts)
if ignorePartsLength > modulePartsLength:
continue
if moduleParts[:ignorePartsLength] == ignoreParts:
return True
return False
def TestImport(moduleName, includeDeprecated=False):
"""
Test if the given module can be imported without error.
"""
#print "Testing", moduleName
oldStdOut = sys.stdout
oldStdErr = sys.stderr
sys.stdout = DummyStdOut()
try:
__import__(moduleName)
return (True, "", "")
except DeprecationWarning, exc:
return includeDeprecated, "DeprecationWarning", str(exc)
except ImportError, exc:
return False, "ImportError", str(exc)
except SyntaxError, exc:
return False, "SyntaxError", str(exc)
except Exception, exc:
return False, "Exception", str(exc)
finally:
sys.stdout = oldStdOut
sys.stderr = oldStdErr
| topic2k/EventGhost | _build/builder/BuildImports.py | Python | gpl-2.0 | 9,896 |
# -*- coding: utf-8 -*-
"""2520 is the smallest number that can be divided by each of the numbers
from 1 to 10 without any remainder.
What is the smallest positive number
that is evenly divisible by all of the numbers from 1 to 20?"""
doorgaan = True
getal = 2520
while doorgaan == True:
doorgaan = False
for x in xrange(11, 21):
if getal % x != 0:
if getal % 1000 == 0:
print getal, x
getal += 2520
doorgaan = True
break
print getal | hendrikjeb/Euler | 05.py | Python | mit | 462 |
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for Twisted.names' root resolver.
"""
from twisted.trial import unittest
class RootResolverTestCase(unittest.TestCase):
pass
| kenorb-contrib/BitTorrent | twisted/names/test/test_rootresolve.py | Python | gpl-3.0 | 231 |
from django.contrib import admin
from .models import Website, Language, Domain, Search
# Register your models here.
admin.site.register(Website)
admin.site.register(Language)
admin.site.register(Domain)
admin.site.register(Search) | olivmaurel/termsearch | aggregator/admin.py | Python | apache-2.0 | 232 |
#!/usr/bin/env python3
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2015 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Generate asciidoc source for qutebrowser based on docstrings."""
import os
import sys
import html
import shutil
import os.path
import inspect
import subprocess
import collections
import tempfile
import argparse
sys.path.insert(0, os.path.join(os.path.dirname(__file__), os.pardir))
# We import qutebrowser.app so all @cmdutils-register decorators are run.
import qutebrowser.app
from scripts import asciidoc2html, utils
from qutebrowser import qutebrowser
from qutebrowser.commands import cmdutils
from qutebrowser.config import configdata
from qutebrowser.utils import docutils
class UsageFormatter(argparse.HelpFormatter):
"""Patched HelpFormatter to include some asciidoc markup in the usage.
This does some horrible things, but the alternative would be to reimplement
argparse.HelpFormatter while copying 99% of the code :-/
"""
def _format_usage(self, usage, actions, groups, _prefix):
"""Override _format_usage to not add the 'usage:' prefix."""
return super()._format_usage(usage, actions, groups, '')
def _metavar_formatter(self, action, default_metavar):
"""Override _metavar_formatter to add asciidoc markup to metavars.
Most code here is copied from Python 3.4's argparse.py.
"""
if action.metavar is not None:
result = "'{}'".format(action.metavar)
elif action.choices is not None:
choice_strs = [str(choice) for choice in action.choices]
result = '{%s}' % ','.join('*{}*'.format(e) for e in choice_strs)
else:
result = "'{}'".format(default_metavar)
def fmt(tuple_size):
"""Format the result according to the tuple size."""
if isinstance(result, tuple):
return result
else:
return (result, ) * tuple_size
return fmt
def _format_actions_usage(self, actions, groups):
"""Override _format_actions_usage to add asciidoc markup to flags.
Because argparse.py's _format_actions_usage is very complex, we first
monkey-patch the option strings to include the asciidoc markup, then
run the original method, then undo the patching.
"""
old_option_strings = {}
for action in actions:
old_option_strings[action] = action.option_strings[:]
action.option_strings = ['*{}*'.format(s)
for s in action.option_strings]
ret = super()._format_actions_usage(actions, groups)
for action in actions:
action.option_strings = old_option_strings[action]
return ret
def _open_file(name, mode='w'):
"""Open a file with a preset newline/encoding mode."""
return open(name, mode, newline='\n', encoding='utf-8')
def _get_cmd_syntax(_name, cmd):
"""Get the command syntax for a command.
We monkey-patch the parser's formatter_class here to use our UsageFormatter
which adds some asciidoc markup.
"""
old_fmt_class = cmd.parser.formatter_class
cmd.parser.formatter_class = UsageFormatter
usage = cmd.parser.format_usage().rstrip()
cmd.parser.formatter_class = old_fmt_class
return usage
def _get_command_quickref(cmds):
"""Generate the command quick reference."""
out = []
out.append('[options="header",width="75%",cols="25%,75%"]')
out.append('|==============')
out.append('|Command|Description')
for name, cmd in cmds:
desc = inspect.getdoc(cmd.handler).splitlines()[0]
out.append('|<<{},{}>>|{}'.format(name, name, desc))
out.append('|==============')
return '\n'.join(out)
def _get_setting_quickref():
"""Generate the settings quick reference."""
out = []
for sectname, sect in configdata.DATA.items():
if not getattr(sect, 'descriptions'):
continue
out.append("")
out.append(".Quick reference for section ``{}''".format(sectname))
out.append('[options="header",width="75%",cols="25%,75%"]')
out.append('|==============')
out.append('|Setting|Description')
for optname, _option in sect.items():
desc = sect.descriptions[optname].splitlines()[0]
out.append('|<<{}-{},{}>>|{}'.format(
sectname, optname, optname, desc))
out.append('|==============')
return '\n'.join(out)
def _get_command_doc(name, cmd):
"""Generate the documentation for a command."""
output = ['[[{}]]'.format(name)]
output += ['=== {}'.format(name)]
syntax = _get_cmd_syntax(name, cmd)
if syntax != name:
output.append('Syntax: +:{}+'.format(syntax))
output.append("")
parser = docutils.DocstringParser(cmd.handler)
output.append(parser.short_desc)
if parser.long_desc:
output.append("")
output.append(parser.long_desc)
if cmd.pos_args:
output.append("")
output.append("==== positional arguments")
for arg, name in cmd.pos_args:
try:
output.append("* +'{}'+: {}".format(name,
parser.arg_descs[arg]))
except KeyError as e:
raise KeyError("No description for arg {} of command "
"'{}'!".format(e, cmd.name))
if cmd.opt_args:
output.append("")
output.append("==== optional arguments")
for arg, (long_flag, short_flag) in cmd.opt_args.items():
try:
output.append('* +*{}*+, +*{}*+: {}'.format(
short_flag, long_flag, parser.arg_descs[arg]))
except KeyError:
raise KeyError("No description for arg {} of command "
"'{}'!".format(e, cmd.name))
if cmd.special_params['count'] is not None:
output.append("")
output.append("==== count")
output.append(parser.arg_descs[cmd.special_params['count']])
output.append("")
output.append("")
return '\n'.join(output)
def _get_action_metavar(action):
"""Get the metavar to display for an argparse action."""
if action.metavar is not None:
return "'{}'".format(action.metavar)
elif action.choices is not None:
choices = ','.join(map(str, action.choices))
return "'{{{}}}'".format(choices)
else:
return "'{}'".format(action.dest.upper())
def _format_action_args(action):
"""Get an argument string based on an argparse action."""
if action.nargs is None:
return _get_action_metavar(action)
elif action.nargs == '?':
return '[{}]'.format(_get_action_metavar(action))
elif action.nargs == '*':
return '[{mv} [{mv} ...]]'.format(mv=_get_action_metavar(action))
elif action.nargs == '+':
return '{mv} [{mv} ...]'.format(mv=_get_action_metavar(action))
elif action.nargs == '...':
return '...'
else:
return ' '.join([_get_action_metavar(action)] * action.nargs)
def _format_action(action):
"""Get an invocation string/help from an argparse action."""
if not action.option_strings:
invocation = '*{}*::'.format(_get_action_metavar(action))
else:
parts = []
if action.nargs == 0:
# Doesn't take a value, so the syntax is -s, --long
parts += ['*{}*'.format(s) for s in action.option_strings]
else:
# Takes a value, so the syntax is -s ARGS or --long ARGS.
args_string = _format_action_args(action)
for opt in action.option_strings:
parts.append('*{}* {}'.format(opt, args_string))
invocation = ', '.join(parts) + '::'
return '{}\n {}\n'.format(invocation, action.help)
def generate_commands(filename):
"""Generate the complete commands section."""
with _open_file(filename) as f:
f.write("= Commands\n")
normal_cmds = []
hidden_cmds = []
debug_cmds = []
for name, cmd in cmdutils.cmd_dict.items():
if name in cmdutils.aliases:
continue
if cmd.hide:
hidden_cmds.append((name, cmd))
elif cmd.debug:
debug_cmds.append((name, cmd))
elif not cmd.deprecated:
normal_cmds.append((name, cmd))
normal_cmds.sort()
hidden_cmds.sort()
debug_cmds.sort()
f.write("\n")
f.write("== Normal commands\n")
f.write(".Quick reference\n")
f.write(_get_command_quickref(normal_cmds) + '\n')
for name, cmd in normal_cmds:
f.write(_get_command_doc(name, cmd))
f.write("\n")
f.write("== Hidden commands\n")
f.write(".Quick reference\n")
f.write(_get_command_quickref(hidden_cmds) + '\n')
for name, cmd in hidden_cmds:
f.write(_get_command_doc(name, cmd))
f.write("\n")
f.write("== Debugging commands\n")
f.write("These commands are mainly intended for debugging. They are "
"hidden if qutebrowser was started without the "
"`--debug`-flag.\n")
f.write("\n")
f.write(".Quick reference\n")
f.write(_get_command_quickref(debug_cmds) + '\n')
for name, cmd in debug_cmds:
f.write(_get_command_doc(name, cmd))
def generate_settings(filename):
"""Generate the complete settings section."""
with _open_file(filename) as f:
f.write("= Settings\n")
f.write(_get_setting_quickref() + "\n")
for sectname, sect in configdata.DATA.items():
f.write("\n")
f.write("== {}".format(sectname) + "\n")
f.write(configdata.SECTION_DESC[sectname] + "\n")
if not getattr(sect, 'descriptions'):
pass
else:
for optname, option in sect.items():
f.write("\n")
f.write('[[{}-{}]]'.format(sectname, optname) + "\n")
f.write("=== {}".format(optname) + "\n")
f.write(sect.descriptions[optname] + "\n")
f.write("\n")
valid_values = option.typ.valid_values
if valid_values is not None:
f.write("Valid values:\n")
f.write("\n")
for val in valid_values:
try:
desc = valid_values.descriptions[val]
f.write(" * +{}+: {}".format(val, desc) + "\n")
except KeyError:
f.write(" * +{}+".format(val) + "\n")
f.write("\n")
if option.default():
f.write("Default: +pass:[{}]+\n".format(html.escape(
option.default())))
else:
f.write("Default: empty\n")
def _get_authors():
"""Get a list of authors based on git commit logs."""
commits = subprocess.check_output(['git', 'log', '--format=%aN'])
cnt = collections.Counter(commits.decode('utf-8').splitlines())
return sorted(cnt, key=lambda k: (cnt[k], k), reverse=True)
def _format_block(filename, what, data):
"""Format a block in a file.
The block is delimited by markers like these:
// QUTE_*_START
...
// QUTE_*_END
The * part is the part which should be given as 'what'.
Args:
filename: The file to change.
what: What to change (authors, options, etc.)
data; A list of strings which is the new data.
"""
what = what.upper()
oshandle, tmpname = tempfile.mkstemp()
try:
with _open_file(filename, mode='r') as infile, \
_open_file(oshandle, mode='w') as temp:
found_start = False
found_end = False
for line in infile:
if line.strip() == '// QUTE_{}_START'.format(what):
temp.write(line)
temp.write(''.join(data))
found_start = True
elif line.strip() == '// QUTE_{}_END'.format(what.upper()):
temp.write(line)
found_end = True
elif (not found_start) or found_end:
temp.write(line)
if not found_start:
raise Exception("Marker '// QUTE_{}_START' not found in "
"'{}'!".format(what, filename))
elif not found_end:
raise Exception("Marker '// QUTE_{}_END' not found in "
"'{}'!".format(what, filename))
except: # pylint: disable=bare-except
os.remove(tmpname)
raise
else:
os.remove(filename)
shutil.move(tmpname, filename)
def regenerate_authors(filename):
"""Re-generate the authors inside README based on the commits made."""
data = ['* {}\n'.format(author) for author in _get_authors()]
_format_block(filename, 'authors', data)
def regenerate_manpage(filename):
"""Update manpage OPTIONS using an argparse parser."""
# pylint: disable=protected-access
parser = qutebrowser.get_argparser()
groups = []
# positionals, optionals and user-defined groups
for group in parser._action_groups:
groupdata = []
groupdata.append('=== {}'.format(group.title))
if group.description is not None:
groupdata.append(group.description)
for action in group._group_actions:
groupdata.append(_format_action(action))
groups.append('\n'.join(groupdata))
options = '\n'.join(groups)
# epilog
if parser.epilog is not None:
options.append(parser.epilog)
_format_block(filename, 'options', options)
def main():
"""Regenerate all documentation."""
utils.change_cwd()
print("Generating manpage...")
regenerate_manpage('doc/qutebrowser.1.asciidoc')
print("Generating settings help...")
generate_settings('doc/help/settings.asciidoc')
print("Generating command help...")
generate_commands('doc/help/commands.asciidoc')
print("Generating authors in README...")
regenerate_authors('README.asciidoc')
if '--html' in sys.argv:
asciidoc2html.main()
if __name__ == '__main__':
main()
| mnick/qutebrowser | scripts/src2asciidoc.py | Python | gpl-3.0 | 15,193 |
## @file
# This file is used to define comment parsing interface
#
# Copyright (c) 2011 - 2014, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
'''
CommentParsing
'''
##
# Import Modules
#
import re
from Library.String import GetSplitValueList
from Library.String import CleanString2
from Library.DataType import HEADER_COMMENT_NOT_STARTED
from Library.DataType import TAB_COMMENT_SPLIT
from Library.DataType import HEADER_COMMENT_LICENSE
from Library.DataType import HEADER_COMMENT_ABSTRACT
from Library.DataType import HEADER_COMMENT_COPYRIGHT
from Library.DataType import HEADER_COMMENT_DESCRIPTION
from Library.DataType import TAB_SPACE_SPLIT
from Library.DataType import TAB_COMMA_SPLIT
from Library.DataType import SUP_MODULE_LIST
from Library.DataType import TAB_VALUE_SPLIT
from Library.DataType import TAB_PCD_VALIDRANGE
from Library.DataType import TAB_PCD_VALIDLIST
from Library.DataType import TAB_PCD_EXPRESSION
from Library.DataType import TAB_PCD_PROMPT
from Library.DataType import TAB_CAPHEX_START
from Library.DataType import TAB_HEX_START
from Library.DataType import PCD_ERR_CODE_MAX_SIZE
from Library.ExpressionValidate import IsValidRangeExpr
from Library.ExpressionValidate import IsValidListExpr
from Library.ExpressionValidate import IsValidLogicalExpr
from Object.POM.CommonObject import TextObject
from Object.POM.CommonObject import PcdErrorObject
import Logger.Log as Logger
from Logger.ToolError import FORMAT_INVALID
from Logger.ToolError import FORMAT_NOT_SUPPORTED
from Logger import StringTable as ST
## ParseHeaderCommentSection
#
# Parse Header comment section lines, extract Abstract, Description, Copyright
# , License lines
#
# @param CommentList: List of (Comment, LineNumber)
# @param FileName: FileName of the comment
#
def ParseHeaderCommentSection(CommentList, FileName = None, IsBinaryHeader = False):
Abstract = ''
Description = ''
Copyright = ''
License = ''
EndOfLine = "\n"
if IsBinaryHeader:
STR_HEADER_COMMENT_START = "@BinaryHeader"
else:
STR_HEADER_COMMENT_START = "@file"
HeaderCommentStage = HEADER_COMMENT_NOT_STARTED
#
# first find the last copyright line
#
Last = 0
for Index in xrange(len(CommentList)-1, 0, -1):
Line = CommentList[Index][0]
if _IsCopyrightLine(Line):
Last = Index
break
for Item in CommentList:
Line = Item[0]
LineNo = Item[1]
if not Line.startswith(TAB_COMMENT_SPLIT) and Line:
Logger.Error("\nUPT", FORMAT_INVALID, ST.ERR_INVALID_COMMENT_FORMAT, FileName, Item[1])
Comment = CleanString2(Line)[1]
Comment = Comment.strip()
#
# if there are blank lines between License or Description, keep them as they would be
# indication of different block; or in the position that Abstract should be, also keep it
# as it indicates that no abstract
#
if not Comment and HeaderCommentStage not in [HEADER_COMMENT_LICENSE, \
HEADER_COMMENT_DESCRIPTION, HEADER_COMMENT_ABSTRACT]:
continue
if HeaderCommentStage == HEADER_COMMENT_NOT_STARTED:
if Comment.startswith(STR_HEADER_COMMENT_START):
HeaderCommentStage = HEADER_COMMENT_ABSTRACT
else:
License += Comment + EndOfLine
else:
if HeaderCommentStage == HEADER_COMMENT_ABSTRACT:
#
# in case there is no abstract and description
#
if not Comment:
HeaderCommentStage = HEADER_COMMENT_DESCRIPTION
elif _IsCopyrightLine(Comment):
Result, ErrMsg = _ValidateCopyright(Comment)
ValidateCopyright(Result, ST.WRN_INVALID_COPYRIGHT, FileName, LineNo, ErrMsg)
Copyright += Comment + EndOfLine
HeaderCommentStage = HEADER_COMMENT_COPYRIGHT
else:
Abstract += Comment + EndOfLine
HeaderCommentStage = HEADER_COMMENT_DESCRIPTION
elif HeaderCommentStage == HEADER_COMMENT_DESCRIPTION:
#
# in case there is no description
#
if _IsCopyrightLine(Comment):
Result, ErrMsg = _ValidateCopyright(Comment)
ValidateCopyright(Result, ST.WRN_INVALID_COPYRIGHT, FileName, LineNo, ErrMsg)
Copyright += Comment + EndOfLine
HeaderCommentStage = HEADER_COMMENT_COPYRIGHT
else:
Description += Comment + EndOfLine
elif HeaderCommentStage == HEADER_COMMENT_COPYRIGHT:
if _IsCopyrightLine(Comment):
Result, ErrMsg = _ValidateCopyright(Comment)
ValidateCopyright(Result, ST.WRN_INVALID_COPYRIGHT, FileName, LineNo, ErrMsg)
Copyright += Comment + EndOfLine
else:
#
# Contents after copyright line are license, those non-copyright lines in between
# copyright line will be discarded
#
if LineNo > Last:
if License:
License += EndOfLine
License += Comment + EndOfLine
HeaderCommentStage = HEADER_COMMENT_LICENSE
else:
if not Comment and not License:
continue
License += Comment + EndOfLine
return Abstract.strip(), Description.strip(), Copyright.strip(), License.strip()
## _IsCopyrightLine
# check whether current line is copyright line, the criteria is whether there is case insensitive keyword "Copyright"
# followed by zero or more white space characters followed by a "(" character
#
# @param LineContent: the line need to be checked
# @return: True if current line is copyright line, False else
#
def _IsCopyrightLine (LineContent):
LineContent = LineContent.upper()
Result = False
ReIsCopyrightRe = re.compile(r"""(^|\s)COPYRIGHT *\(""", re.DOTALL)
if ReIsCopyrightRe.search(LineContent):
Result = True
return Result
## ParseGenericComment
#
# @param GenericComment: Generic comment list, element of
# (CommentLine, LineNum)
# @param ContainerFile: Input value for filename of Dec file
#
def ParseGenericComment (GenericComment, ContainerFile=None, SkipTag=None):
if ContainerFile:
pass
HelpTxt = None
HelpStr = ''
for Item in GenericComment:
CommentLine = Item[0]
Comment = CleanString2(CommentLine)[1]
if SkipTag is not None and Comment.startswith(SkipTag):
Comment = Comment.replace(SkipTag, '', 1)
HelpStr += Comment + '\n'
if HelpStr:
HelpTxt = TextObject()
if HelpStr.endswith('\n') and not HelpStr.endswith('\n\n') and HelpStr != '\n':
HelpStr = HelpStr[:-1]
HelpTxt.SetString(HelpStr)
return HelpTxt
## ParsePcdErrorCode
#
# @param Value: original ErrorCode value
# @param ContainerFile: Input value for filename of Dec file
# @param LineNum: Line Num
#
def ParsePcdErrorCode (Value = None, ContainerFile = None, LineNum = None):
try:
if Value.strip().startswith((TAB_HEX_START, TAB_CAPHEX_START)):
Base = 16
else:
Base = 10
ErrorCode = long(Value, Base)
if ErrorCode > PCD_ERR_CODE_MAX_SIZE or ErrorCode < 0:
Logger.Error('Parser',
FORMAT_NOT_SUPPORTED,
"The format %s of ErrorCode is not valid, should be UNIT32 type or long type" % Value,
File = ContainerFile,
Line = LineNum)
#
# To delete the tailing 'L'
#
return hex(ErrorCode)[:-1]
except ValueError, XStr:
if XStr:
pass
Logger.Error('Parser',
FORMAT_NOT_SUPPORTED,
"The format %s of ErrorCode is not valid, should be UNIT32 type or long type" % Value,
File = ContainerFile,
Line = LineNum)
## ParseDecPcdGenericComment
#
# @param GenericComment: Generic comment list, element of (CommentLine,
# LineNum)
# @param ContainerFile: Input value for filename of Dec file
#
def ParseDecPcdGenericComment (GenericComment, ContainerFile, TokenSpaceGuidCName, CName, MacroReplaceDict):
HelpStr = ''
PromptStr = ''
PcdErr = None
PcdErrList = []
ValidValueNum = 0
ValidRangeNum = 0
ExpressionNum = 0
for (CommentLine, LineNum) in GenericComment:
Comment = CleanString2(CommentLine)[1]
#
# To replace Macro
#
MACRO_PATTERN = '[\t\s]*\$\([A-Z][_A-Z0-9]*\)'
MatchedStrs = re.findall(MACRO_PATTERN, Comment)
for MatchedStr in MatchedStrs:
if MatchedStr:
Macro = MatchedStr.strip().lstrip('$(').rstrip(')').strip()
if Macro in MacroReplaceDict:
Comment = Comment.replace(MatchedStr, MacroReplaceDict[Macro])
if Comment.startswith(TAB_PCD_VALIDRANGE):
if ValidValueNum > 0 or ExpressionNum > 0:
Logger.Error('Parser',
FORMAT_NOT_SUPPORTED,
ST.WRN_MULTI_PCD_RANGES,
File = ContainerFile,
Line = LineNum)
else:
PcdErr = PcdErrorObject()
PcdErr.SetTokenSpaceGuidCName(TokenSpaceGuidCName)
PcdErr.SetCName(CName)
PcdErr.SetFileLine(Comment)
PcdErr.SetLineNum(LineNum)
ValidRangeNum += 1
ValidRange = Comment.replace(TAB_PCD_VALIDRANGE, "", 1).strip()
Valid, Cause = _CheckRangeExpression(ValidRange)
if Valid:
ValueList = ValidRange.split(TAB_VALUE_SPLIT)
if len(ValueList) > 1:
PcdErr.SetValidValueRange((TAB_VALUE_SPLIT.join(ValueList[1:])).strip())
PcdErr.SetErrorNumber(ParsePcdErrorCode(ValueList[0], ContainerFile, LineNum))
else:
PcdErr.SetValidValueRange(ValidRange)
PcdErrList.append(PcdErr)
else:
Logger.Error("Parser",
FORMAT_NOT_SUPPORTED,
Cause,
ContainerFile,
LineNum)
elif Comment.startswith(TAB_PCD_VALIDLIST):
if ValidRangeNum > 0 or ExpressionNum > 0:
Logger.Error('Parser',
FORMAT_NOT_SUPPORTED,
ST.WRN_MULTI_PCD_RANGES,
File = ContainerFile,
Line = LineNum)
elif ValidValueNum > 0:
Logger.Error('Parser',
FORMAT_NOT_SUPPORTED,
ST.WRN_MULTI_PCD_VALIDVALUE,
File = ContainerFile,
Line = LineNum)
else:
PcdErr = PcdErrorObject()
PcdErr.SetTokenSpaceGuidCName(TokenSpaceGuidCName)
PcdErr.SetCName(CName)
PcdErr.SetFileLine(Comment)
PcdErr.SetLineNum(LineNum)
ValidValueNum += 1
ValidValueExpr = Comment.replace(TAB_PCD_VALIDLIST, "", 1).strip()
Valid, Cause = _CheckListExpression(ValidValueExpr)
if Valid:
ValidValue = Comment.replace(TAB_PCD_VALIDLIST, "", 1).replace(TAB_COMMA_SPLIT, TAB_SPACE_SPLIT)
ValueList = ValidValue.split(TAB_VALUE_SPLIT)
if len(ValueList) > 1:
PcdErr.SetValidValue((TAB_VALUE_SPLIT.join(ValueList[1:])).strip())
PcdErr.SetErrorNumber(ParsePcdErrorCode(ValueList[0], ContainerFile, LineNum))
else:
PcdErr.SetValidValue(ValidValue)
PcdErrList.append(PcdErr)
else:
Logger.Error("Parser",
FORMAT_NOT_SUPPORTED,
Cause,
ContainerFile,
LineNum)
elif Comment.startswith(TAB_PCD_EXPRESSION):
if ValidRangeNum > 0 or ValidValueNum > 0:
Logger.Error('Parser',
FORMAT_NOT_SUPPORTED,
ST.WRN_MULTI_PCD_RANGES,
File = ContainerFile,
Line = LineNum)
else:
PcdErr = PcdErrorObject()
PcdErr.SetTokenSpaceGuidCName(TokenSpaceGuidCName)
PcdErr.SetCName(CName)
PcdErr.SetFileLine(Comment)
PcdErr.SetLineNum(LineNum)
ExpressionNum += 1
Expression = Comment.replace(TAB_PCD_EXPRESSION, "", 1).strip()
Valid, Cause = _CheckExpression(Expression)
if Valid:
ValueList = Expression.split(TAB_VALUE_SPLIT)
if len(ValueList) > 1:
PcdErr.SetExpression((TAB_VALUE_SPLIT.join(ValueList[1:])).strip())
PcdErr.SetErrorNumber(ParsePcdErrorCode(ValueList[0], ContainerFile, LineNum))
else:
PcdErr.SetExpression(Expression)
PcdErrList.append(PcdErr)
else:
Logger.Error("Parser",
FORMAT_NOT_SUPPORTED,
Cause,
ContainerFile,
LineNum)
elif Comment.startswith(TAB_PCD_PROMPT):
if PromptStr:
Logger.Error('Parser',
FORMAT_NOT_SUPPORTED,
ST.WRN_MULTI_PCD_PROMPT,
File = ContainerFile,
Line = LineNum)
PromptStr = Comment.replace(TAB_PCD_PROMPT, "", 1).strip()
else:
if Comment:
HelpStr += Comment + '\n'
#
# remove the last EOL if the comment is of format 'FOO\n'
#
if HelpStr.endswith('\n'):
if HelpStr != '\n' and not HelpStr.endswith('\n\n'):
HelpStr = HelpStr[:-1]
return HelpStr, PcdErrList, PromptStr
## ParseDecPcdTailComment
#
# @param TailCommentList: Tail comment list of Pcd, item of format (Comment, LineNum)
# @param ContainerFile: Input value for filename of Dec file
# @retVal SupModuleList: The supported module type list detected
# @retVal HelpStr: The generic help text string detected
#
def ParseDecPcdTailComment (TailCommentList, ContainerFile):
assert(len(TailCommentList) == 1)
TailComment = TailCommentList[0][0]
LineNum = TailCommentList[0][1]
Comment = TailComment.lstrip(" #")
ReFindFirstWordRe = re.compile(r"""^([^ #]*)""", re.DOTALL)
#
# get first word and compare with SUP_MODULE_LIST
#
MatchObject = ReFindFirstWordRe.match(Comment)
if not (MatchObject and MatchObject.group(1) in SUP_MODULE_LIST):
return None, Comment
#
# parse line, it must have supported module type specified
#
if Comment.find(TAB_COMMENT_SPLIT) == -1:
Comment += TAB_COMMENT_SPLIT
SupMode, HelpStr = GetSplitValueList(Comment, TAB_COMMENT_SPLIT, 1)
SupModuleList = []
for Mod in GetSplitValueList(SupMode, TAB_SPACE_SPLIT):
if not Mod:
continue
elif Mod not in SUP_MODULE_LIST:
Logger.Error("UPT",
FORMAT_INVALID,
ST.WRN_INVALID_MODULE_TYPE%Mod,
ContainerFile,
LineNum)
else:
SupModuleList.append(Mod)
return SupModuleList, HelpStr
## _CheckListExpression
#
# @param Expression: Pcd value list expression
#
def _CheckListExpression(Expression):
ListExpr = ''
if TAB_VALUE_SPLIT in Expression:
ListExpr = Expression[Expression.find(TAB_VALUE_SPLIT)+1:]
else:
ListExpr = Expression
return IsValidListExpr(ListExpr)
## _CheckExpreesion
#
# @param Expression: Pcd value expression
#
def _CheckExpression(Expression):
Expr = ''
if TAB_VALUE_SPLIT in Expression:
Expr = Expression[Expression.find(TAB_VALUE_SPLIT)+1:]
else:
Expr = Expression
return IsValidLogicalExpr(Expr, True)
## _CheckRangeExpression
#
# @param Expression: Pcd range expression
#
def _CheckRangeExpression(Expression):
RangeExpr = ''
if TAB_VALUE_SPLIT in Expression:
RangeExpr = Expression[Expression.find(TAB_VALUE_SPLIT)+1:]
else:
RangeExpr = Expression
return IsValidRangeExpr(RangeExpr)
## ValidateCopyright
#
#
#
def ValidateCopyright(Result, ErrType, FileName, LineNo, ErrMsg):
if not Result:
Logger.Warn("\nUPT", ErrType, FileName, LineNo, ErrMsg)
## _ValidateCopyright
#
# @param Line: Line that contains copyright information, # stripped
#
# @retval Result: True if line is conformed to Spec format, False else
# @retval ErrMsg: the detailed error description
#
def _ValidateCopyright(Line):
if Line:
pass
Result = True
ErrMsg = ''
return Result, ErrMsg
def GenerateTokenList (Comment):
#
# Tokenize Comment using '#' and ' ' as token seperators
#
RelplacedComment = None
while Comment != RelplacedComment:
RelplacedComment = Comment
Comment = Comment.replace('##', '#').replace(' ', ' ').replace(' ', '#').strip('# ')
return Comment.split('#')
#
# Comment - Comment to parse
# TypeTokens - A dictionary of type token synonyms
# RemoveTokens - A list of tokens to remove from help text
# ParseVariable - True for parsing [Guids]. Otherwise False
#
def ParseComment (Comment, UsageTokens, TypeTokens, RemoveTokens, ParseVariable):
#
# Initialize return values
#
Usage = None
Type = None
String = None
Comment = Comment[0]
NumTokens = 2
if ParseVariable:
#
# Remove white space around first instance of ':' from Comment if 'Variable'
# is in front of ':' and Variable is the 1st or 2nd token in Comment.
#
List = Comment.split(':', 1)
if len(List) > 1:
SubList = GenerateTokenList (List[0].strip())
if len(SubList) in [1, 2] and SubList[-1] == 'Variable':
if List[1].strip().find('L"') == 0:
Comment = List[0].strip() + ':' + List[1].strip()
#
# Remove first instance of L"<VariableName> from Comment and put into String
# if and only if L"<VariableName>" is the 1st token, the 2nd token. Or
# L"<VariableName>" is the third token immediately following 'Variable:'.
#
End = -1
Start = Comment.find('Variable:L"')
if Start >= 0:
String = Comment[Start + 9:]
End = String[2:].find('"')
else:
Start = Comment.find('L"')
if Start >= 0:
String = Comment[Start:]
End = String[2:].find('"')
if End >= 0:
SubList = GenerateTokenList (Comment[:Start])
if len(SubList) < 2:
Comment = Comment[:Start] + String[End + 3:]
String = String[:End + 3]
Type = 'Variable'
NumTokens = 1
#
# Initialze HelpText to Comment.
# Content will be remove from HelpText as matching tokens are found
#
HelpText = Comment
#
# Tokenize Comment using '#' and ' ' as token seperators
#
List = GenerateTokenList (Comment)
#
# Search first two tokens for Usage and Type and remove any matching tokens
# from HelpText
#
for Token in List[0:NumTokens]:
if Usage == None and Token in UsageTokens:
Usage = UsageTokens[Token]
HelpText = HelpText.replace(Token, '')
if Usage != None or not ParseVariable:
for Token in List[0:NumTokens]:
if Type == None and Token in TypeTokens:
Type = TypeTokens[Token]
HelpText = HelpText.replace(Token, '')
if Usage != None:
for Token in List[0:NumTokens]:
if Token in RemoveTokens:
HelpText = HelpText.replace(Token, '')
#
# If no Usage token is present and set Usage to UNDEFINED
#
if Usage == None:
Usage = 'UNDEFINED'
#
# If no Type token is present and set Type to UNDEFINED
#
if Type == None:
Type = 'UNDEFINED'
#
# If Type is not 'Variable:', then set String to None
#
if Type != 'Variable':
String = None
#
# Strip ' ' and '#' from the beginning of HelpText
# If HelpText is an empty string after all parsing is
# complete then set HelpText to None
#
HelpText = HelpText.lstrip('# ')
if HelpText == '':
HelpText = None
#
# Return parsing results
#
return Usage, Type, String, HelpText
| miguelinux/vbox | src/VBox/Devices/EFI/Firmware/BaseTools/Source/Python/UPT/Library/CommentParsing.py | Python | gpl-2.0 | 21,809 |
#!/usr/bin/env python
#
# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
# Copyright (c) 1997-2015 California Institute of Technology.
# License: 3-clause BSD. The full license text is available at:
# - http://trac.mystic.cacr.caltech.edu/project/mystic/browser/mystic/LICENSE
"""
solvers: minimal and expanded interfaces for optimization algorithms
Standard Interface
==================
All of mystic's optimizers derive from the solver API, which provides
each optimizer with a standard, but highly-customizable interface.
A description of the solver API is found in `mystic.models.abstract_model`,
and in each derived optimizer. Mystic's optimizers are::
== Global Optimizers ==
DifferentialEvolutionSolver -- Differential Evolution algorithm
DifferentialEvolutionSolver2 -- Price & Storn's Differential Evolution
== Pseudo-Global Optimizers ==
BuckshotSolver -- Uniform Random Distribution of N Solvers
LatticeSolver -- Distribution of N Solvers on a Regular Grid
== Local-Search Optimizers ==
NelderMeadSimplexSolver -- Nelder-Mead Simplex algorithm
PowellDirectionalSolver -- Powell's (modified) Level Set algorithm
Minimal Interface
=================
Most of mystic's optimizers can be called from a minimal (i.e. one-line)
interface. The collection of arguments is often unique to the optimizer,
and if the underlying solver derives from a third-party package, the
original interface is reproduced. Minimal interfaces to these optimizers
are provided::
== Global Optimizers ==
diffev -- DifferentialEvolutionSolver
diffev2 -- DifferentialEvolutionSolver2
== Pseudo-Global Optimizers ==
buckshot -- BuckshotSolver
lattice -- LatticeSolver
== Local-Search Optimizers ==
fmin -- NelderMeadSimplexSolver
fmin_powell -- PowellDirectionalSolver
More Information
================
For more information, please see the solver documentation found here::
- mystic.mystic.differential_evolution [differential evolution solvers]
- mystic.mystic.scipy_optimize [scipy local-search solvers]
- mystic.mystic.ensemble [pseudo-global solvers]
or the API documentation found here::
- mystic.mystic.abstract_solver [the solver API definition]
- mystic.mystic.abstract_map_solver [the parallel solver API]
- mystic.mystic.abstract_ensemble_solver [the ensemble solver API]
"""
# global optimizers
from differential_evolution import DifferentialEvolutionSolver
from differential_evolution import DifferentialEvolutionSolver2
from differential_evolution import diffev, diffev2
# pseudo-global optimizers
from ensemble import BuckshotSolver
from ensemble import LatticeSolver
from ensemble import buckshot, lattice
# local-search optimizers
from scipy_optimize import NelderMeadSimplexSolver
from scipy_optimize import PowellDirectionalSolver
from scipy_optimize import fmin, fmin_powell
# load a solver from a restart file
def LoadSolver(filename=None, **kwds):
"""load solver state from a restart file"""
if filename is None:
filename = kwds['_state'] if '_state' in kwds else None
#XXX: only allow a list override keys (lookup values from self)
# if filename is None: filename = self._state
# if filename is None:
# solver = self
# else:
import dill
if filename: f = file(filename, 'rb')
else: return
try:
solver = dill.load(f)
_locals = {}
_locals['solver'] = solver
code = "from mystic.solvers import %s;" % solver._type
code += "self = %s(solver.nDim);" % solver._type
code = compile(code, '<string>', 'exec')
exec code in _locals
self = _locals['self']
finally:
f.close()
# transfer state from solver to self, allowing overrides
self._AbstractSolver__load_state(solver, **kwds)
self._state = filename
self._stepmon.info('LOADED("%s")' % filename)
return self
# end of file
| jcfr/mystic | mystic/solvers.py | Python | bsd-3-clause | 4,042 |
from imp import load_source
from pathlib import Path
from os.path import expanduser
from pprint import pformat
from subprocess import Popen, PIPE
import os
import sys
from psutil import Process, TimeoutExpired
import colorama
import six
from . import logs, conf, types, shells
def setup_user_dir():
"""Returns user config dir, create it when it doesn't exist."""
user_dir = Path(expanduser('~/.thefuck'))
rules_dir = user_dir.joinpath('rules')
if not rules_dir.is_dir():
rules_dir.mkdir(parents=True)
conf.initialize_settings_file(user_dir)
return user_dir
def load_rule(rule):
"""Imports rule module and returns it."""
rule_module = load_source(rule.name[:-3], str(rule))
return types.Rule(rule.name[:-3], rule_module.match,
rule_module.get_new_command,
getattr(rule_module, 'enabled_by_default', True),
getattr(rule_module, 'side_effect', None),
getattr(rule_module, 'priority', conf.DEFAULT_PRIORITY))
def _get_loaded_rules(rules, settings):
"""Yields all available rules."""
for rule in rules:
if rule.name != '__init__.py':
loaded_rule = load_rule(rule)
if loaded_rule in settings.rules:
yield loaded_rule
def get_rules(user_dir, settings):
"""Returns all enabled rules."""
bundled = Path(__file__).parent \
.joinpath('rules') \
.glob('*.py')
user = user_dir.joinpath('rules').glob('*.py')
rules = _get_loaded_rules(sorted(bundled) + sorted(user), settings)
return sorted(rules, key=lambda rule: settings.priority.get(
rule.name, rule.priority))
def wait_output(settings, popen):
"""Returns `True` if we can get output of the command in the
`wait_command` time.
Command will be killed if it wasn't finished in the time.
"""
proc = Process(popen.pid)
try:
proc.wait(settings.wait_command)
return True
except TimeoutExpired:
for child in proc.children(recursive=True):
child.kill()
proc.kill()
return False
def get_command(settings, args):
"""Creates command from `args` and executes it."""
if six.PY2:
script = ' '.join(arg.decode('utf-8') for arg in args[1:])
else:
script = ' '.join(args[1:])
if not script:
return
script = shells.from_shell(script)
logs.debug('Call: {}'.format(script), settings)
result = Popen(script, shell=True, stdout=PIPE, stderr=PIPE,
env=dict(os.environ, LANG='C'))
if wait_output(settings, result):
return types.Command(script, result.stdout.read().decode('utf-8'),
result.stderr.read().decode('utf-8'))
def get_matched_rule(command, rules, settings):
"""Returns first matched rule for command."""
for rule in rules:
try:
logs.debug(u'Trying rule: {}'.format(rule.name), settings)
if rule.match(command, settings):
return rule
except Exception:
logs.rule_failed(rule, sys.exc_info(), settings)
def confirm(new_command, side_effect, settings):
"""Returns `True` when running of new command confirmed."""
if not settings.require_confirmation:
logs.show_command(new_command, side_effect, settings)
return True
logs.confirm_command(new_command, side_effect, settings)
try:
sys.stdin.read(1)
return True
except KeyboardInterrupt:
logs.failed('Aborted', settings)
return False
def run_rule(rule, command, settings):
"""Runs command from rule for passed command."""
new_command = shells.to_shell(rule.get_new_command(command, settings))
if confirm(new_command, rule.side_effect, settings):
if rule.side_effect:
rule.side_effect(command, settings)
shells.put_to_history(new_command)
print(new_command)
def main():
colorama.init()
user_dir = setup_user_dir()
settings = conf.get_settings(user_dir)
logs.debug('Run with settings: {}'.format(pformat(settings)), settings)
command = get_command(settings, sys.argv)
if command:
logs.debug('Received stdout: {}'.format(command.stdout), settings)
logs.debug('Received stderr: {}'.format(command.stderr), settings)
rules = get_rules(user_dir, settings)
logs.debug(
'Loaded rules: {}'.format(', '.join(rule.name for rule in rules)),
settings)
matched_rule = get_matched_rule(command, rules, settings)
if matched_rule:
logs.debug('Matched rule: {}'.format(matched_rule.name), settings)
run_rule(matched_rule, command, settings)
return
logs.failed('No fuck given', settings)
| petr-tichy/thefuck | thefuck/main.py | Python | mit | 4,819 |
""" Unit tests for visibility operations
"""
import sys
import unittest
import logging
import numpy
from data_models.parameters import arl_path
from processing_components.visibility.base import create_blockvisibility_from_ms, create_visibility_from_ms
from processing_components.visibility.operations import integrate_visibility_by_channel
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
log.addHandler(logging.StreamHandler(sys.stdout))
log.addHandler(logging.StreamHandler(sys.stderr))
class TestCreateMS(unittest.TestCase):
def setUp(self):
try:
from casacore.tables import table # pylint: disable=import-error
self.casacore_available = True
# except ModuleNotFoundError:
except:
self.casacore_available = False
def test_create_list(self):
if not self.casacore_available:
return
msfile = arl_path("data/vis/xcasa.ms")
self.vis = create_blockvisibility_from_ms(msfile)
for v in self.vis:
assert v.vis.data.shape[-1] == 4
assert v.polarisation_frame.type == "circular"
def test_create_list_spectral(self):
if not self.casacore_available:
return
msfile = arl_path("data/vis/ASKAP_example.ms")
vis_by_channel = list()
nchan_ave = 16
nchan = 192
for schan in range(0, nchan, nchan_ave):
max_chan = min(nchan, schan + nchan_ave)
v = create_visibility_from_ms(msfile, range(schan, max_chan))
vis_by_channel.append(v[0])
assert len(vis_by_channel) == 12
for v in vis_by_channel:
assert v.vis.data.shape[-1] == 4
assert v.polarisation_frame.type == "linear"
def test_create_list_slice(self):
if not self.casacore_available:
return
msfile = arl_path("data/vis/ASKAP_example.ms")
vis_by_channel = list()
nchan_ave = 16
nchan = 192
for schan in range(0, nchan, nchan_ave):
max_chan = min(nchan, schan + nchan_ave)
v = create_blockvisibility_from_ms(msfile, start_chan=schan, end_chan=max_chan - 1)
assert v[0].vis.shape[-2] == nchan_ave
vis_by_channel.append(v[0])
assert len(vis_by_channel) == 12
for v in vis_by_channel:
assert v.vis.data.shape[-1] == 4
assert v.polarisation_frame.type == "linear"
def test_create_list_slice_visibility(self):
if not self.casacore_available:
return
msfile = arl_path("data/vis/ASKAP_example.ms")
vis_by_channel = list()
nchan_ave = 16
nchan = 192
for schan in range(0, nchan, nchan_ave):
max_chan = min(nchan, schan + nchan_ave)
v = create_visibility_from_ms(msfile, start_chan=schan, end_chan=max_chan - 1)
nchannels = len(numpy.unique(v[0].frequency))
assert nchannels == nchan_ave
vis_by_channel.append(v[0])
assert len(vis_by_channel) == 12
for v in vis_by_channel:
assert v.vis.data.shape[-1] == 4
assert v.polarisation_frame.type == "linear"
def test_create_list_single(self):
if not self.casacore_available:
return
msfile = arl_path("data/vis/ASKAP_example.ms")
vis_by_channel = list()
nchan_ave = 1
nchan = 8
for schan in range(0, nchan, nchan_ave):
max_chan = min(nchan, schan + nchan_ave)
v = create_visibility_from_ms(msfile, start_chan=schan, end_chan=schan)
vis_by_channel.append(v[0])
assert len(vis_by_channel) == 8, len(vis_by_channel)
for v in vis_by_channel:
assert v.vis.data.shape[-1] == 4
assert v.polarisation_frame.type == "linear"
def test_create_list_spectral_average(self):
if not self.casacore_available:
return
msfile = arl_path("data/vis/ASKAP_example.ms")
vis_by_channel = list()
nchan_ave = 16
nchan = 192
for schan in range(0, nchan, nchan_ave):
max_chan = min(nchan, schan+nchan_ave)
v = create_blockvisibility_from_ms(msfile, range(schan, max_chan))
vis_by_channel.append(integrate_visibility_by_channel(v[0]))
assert len(vis_by_channel) == 12
for v in vis_by_channel:
assert v.vis.data.shape[-1] == 4
assert v.vis.data.shape[-2] == 1
assert v.polarisation_frame.type == "linear"
if __name__ == '__main__':
unittest.main()
| SKA-ScienceDataProcessor/algorithm-reference-library | tests/processing_components/test_visibility_ms.py | Python | apache-2.0 | 4,717 |
# -*- coding: utf-8 -*-
"""
pygments.lexers.php
~~~~~~~~~~~~~~~~~~~
Lexers for PHP and related languages.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, default, using, this
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Other
from pygments.util import get_bool_opt, get_list_opt, iteritems
__all__ = ['ZephirLexer', 'PhpLexer']
class ZephirLexer(RegexLexer):
"""
For `Zephir language <http://zephir-lang.com/>`_ source code.
Zephir is a compiled high level language aimed
to the creation of C-extensions for PHP.
.. versionadded:: 2.0
"""
name = 'Zephir'
aliases = ['zephir']
filenames = ['*.zep']
zephir_keywords = ['fetch', 'echo', 'isset', 'empty']
zephir_type = ['bit', 'bits', 'string']
flags = re.DOTALL | re.MULTILINE
tokens = {
'commentsandwhitespace': [
(r'\s+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline)
],
'slashstartsregex': [
include('commentsandwhitespace'),
(r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex, '#pop'),
default('#pop')
],
'badregex': [
(r'\n', Text, '#pop')
],
'root': [
(r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
include('commentsandwhitespace'),
(r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|'
r'(<<|>>>?|==?|!=?|->|[-<>+*%&\|\^/])=?', Operator, 'slashstartsregex'),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(for|in|while|do|break|return|continue|switch|case|default|if|else|loop|'
r'require|inline|throw|try|catch|finally|new|delete|typeof|instanceof|void|'
r'namespace|use|extends|this|fetch|isset|unset|echo|fetch|likely|unlikely|'
r'empty)\b', Keyword, 'slashstartsregex'),
(r'(var|let|with|function)\b', Keyword.Declaration, 'slashstartsregex'),
(r'(abstract|boolean|bool|char|class|const|double|enum|export|extends|final|'
r'native|goto|implements|import|int|string|interface|long|ulong|char|uchar|'
r'float|unsigned|private|protected|public|short|static|self|throws|reverse|'
r'transient|volatile)\b', Keyword.Reserved),
(r'(true|false|null|undefined)\b', Keyword.Constant),
(r'(Array|Boolean|Date|_REQUEST|_COOKIE|_SESSION|'
r'_GET|_POST|_SERVER|this|stdClass|range|count|iterator|'
r'window)\b', Name.Builtin),
(r'[$a-zA-Z_][\w\\]*', Name.Other),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
]
}
class PhpLexer(RegexLexer):
"""
For `PHP <http://www.php.net/>`_ source code.
For PHP embedded in HTML, use the `HtmlPhpLexer`.
Additional options accepted:
`startinline`
If given and ``True`` the lexer starts highlighting with
php code (i.e.: no starting ``<?php`` required). The default
is ``False``.
`funcnamehighlighting`
If given and ``True``, highlight builtin function names
(default: ``True``).
`disabledmodules`
If given, must be a list of module names whose function names
should not be highlighted. By default all modules are highlighted
except the special ``'unknown'`` module that includes functions
that are known to php but are undocumented.
To get a list of allowed modules have a look into the
`_phpbuiltins` module:
.. sourcecode:: pycon
>>> from pygments.lexers._phpbuiltins import MODULES
>>> MODULES.keys()
['PHP Options/Info', 'Zip', 'dba', ...]
In fact the names of those modules match the module names from
the php documentation.
"""
name = 'PHP'
aliases = ['php', 'php3', 'php4', 'php5']
filenames = ['*.php', '*.php[345]', '*.inc']
mimetypes = ['text/x-php']
# Note that a backslash is included in the following two patterns
# PHP uses a backslash as a namespace separator
_ident_char = r'[\\\w]|[^\x00-\x7f]'
_ident_begin = r'(?:[\\_a-z]|[^\x00-\x7f])'
_ident_end = r'(?:' + _ident_char + ')*'
_ident_inner = _ident_begin + _ident_end
flags = re.IGNORECASE | re.DOTALL | re.MULTILINE
tokens = {
'root': [
(r'<\?(php)?', Comment.Preproc, 'php'),
(r'[^<]+', Other),
(r'<', Other)
],
'php': [
(r'\?>', Comment.Preproc, '#pop'),
(r'<<<([\'"]?)(' + _ident_inner + r')\1\n.*?\n\2\;?\n', String),
(r'\s+', Text),
(r'#.*?\n', Comment.Single),
(r'//.*?\n', Comment.Single),
# put the empty comment here, it is otherwise seen as
# the start of a docstring
(r'/\*\*/', Comment.Multiline),
(r'/\*\*.*?\*/', String.Doc),
(r'/\*.*?\*/', Comment.Multiline),
(r'(->|::)(\s*)(' + _ident_inner + ')',
bygroups(Operator, Text, Name.Attribute)),
(r'[~!%^&*+=|:.<>/?@-]+', Operator),
(r'[\[\]{}();,]+', Punctuation),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
(r'(function)(\s*)(?=\()', bygroups(Keyword, Text)),
(r'(function)(\s+)(&?)(\s*)',
bygroups(Keyword, Text, Operator, Text), 'functionname'),
(r'(const)(\s+)(' + _ident_inner + ')',
bygroups(Keyword, Text, Name.Constant)),
(r'(and|E_PARSE|old_function|E_ERROR|or|as|E_WARNING|parent|'
r'eval|PHP_OS|break|exit|case|extends|PHP_VERSION|cfunction|'
r'FALSE|print|for|require|continue|foreach|require_once|'
r'declare|return|default|static|do|switch|die|stdClass|'
r'echo|else|TRUE|elseif|var|empty|if|xor|enddeclare|include|'
r'virtual|endfor|include_once|while|endforeach|global|__FILE__|'
r'endif|list|__LINE__|endswitch|new|__sleep|endwhile|not|'
r'array|__wakeup|E_ALL|NULL|final|php_user_filter|interface|'
r'implements|public|private|protected|abstract|clone|try|'
r'catch|throw|this|use|namespace|trait|yield|'
r'finally)\b', Keyword),
(r'(true|false|null)\b', Keyword.Constant),
(r'\$\{\$+' + _ident_inner + '\}', Name.Variable),
(r'\$+' + _ident_inner, Name.Variable),
(_ident_inner, Name.Other),
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'\d+[eE][+-]?[0-9]+', Number.Float),
(r'0[0-7]+', Number.Oct),
(r'0[xX][a-f0-9]+', Number.Hex),
(r'\d+', Number.Integer),
(r'0b[01]+', Number.Bin),
(r"'([^'\\]*(?:\\.[^'\\]*)*)'", String.Single),
(r'`([^`\\]*(?:\\.[^`\\]*)*)`', String.Backtick),
(r'"', String.Double, 'string'),
],
'classname': [
(_ident_inner, Name.Class, '#pop')
],
'functionname': [
(_ident_inner, Name.Function, '#pop')
],
'string': [
(r'"', String.Double, '#pop'),
(r'[^{$"\\]+', String.Double),
(r'\\([nrt\"$\\]|[0-7]{1,3}|x[0-9a-f]{1,2})', String.Escape),
(r'\$' + _ident_inner + '(\[\S+?\]|->' + _ident_inner + ')?',
String.Interpol),
(r'(\{\$\{)(.*?)(\}\})',
bygroups(String.Interpol, using(this, _startinline=True),
String.Interpol)),
(r'(\{)(\$.*?)(\})',
bygroups(String.Interpol, using(this, _startinline=True),
String.Interpol)),
(r'(\$\{)(\S+)(\})',
bygroups(String.Interpol, Name.Variable, String.Interpol)),
(r'[${\\]+', String.Double)
],
}
def __init__(self, **options):
self.funcnamehighlighting = get_bool_opt(
options, 'funcnamehighlighting', True)
self.disabledmodules = get_list_opt(
options, 'disabledmodules', ['unknown'])
self.startinline = get_bool_opt(options, 'startinline', False)
# private option argument for the lexer itself
if '_startinline' in options:
self.startinline = options.pop('_startinline')
# collect activated functions in a set
self._functions = set()
if self.funcnamehighlighting:
from pygments.lexers._phpbuiltins import MODULES
for key, value in iteritems(MODULES):
if key not in self.disabledmodules:
self._functions.update(value)
RegexLexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
stack = ['root']
if self.startinline:
stack.append('php')
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text, stack):
if token is Name.Other:
if value in self._functions:
yield index, Name.Builtin, value
continue
yield index, token, value
def analyse_text(text):
rv = 0.0
if re.search(r'<\?(?!xml)', text):
rv += 0.3
if '?>' in text:
rv += 0.1
return rv
| spencerlyon2/pygments | pygments/lexers/php.py | Python | bsd-2-clause | 9,756 |
"""SCons.Tool.rpcgen
Tool-specific initialization for RPCGEN tools.
Three normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/rpcgen.py 3603 2008/10/10 05:46:45 scons"
from SCons.Builder import Builder
import SCons.Util
cmd = "cd ${SOURCE.dir} && $RPCGEN -%s $RPCGENFLAGS %s -o ${TARGET.abspath} ${SOURCE.file}"
rpcgen_client = cmd % ('l', '$RPCGENCLIENTFLAGS')
rpcgen_header = cmd % ('h', '$RPCGENHEADERFLAGS')
rpcgen_service = cmd % ('m', '$RPCGENSERVICEFLAGS')
rpcgen_xdr = cmd % ('c', '$RPCGENXDRFLAGS')
def generate(env):
"Add RPCGEN Builders and construction variables for an Environment."
client = Builder(action=rpcgen_client, suffix='_clnt.c', src_suffix='.x')
header = Builder(action=rpcgen_header, suffix='.h', src_suffix='.x')
service = Builder(action=rpcgen_service, suffix='_svc.c', src_suffix='.x')
xdr = Builder(action=rpcgen_xdr, suffix='_xdr.c', src_suffix='.x')
env.Append(BUILDERS={'RPCGenClient' : client,
'RPCGenHeader' : header,
'RPCGenService' : service,
'RPCGenXDR' : xdr})
env['RPCGEN'] = 'rpcgen'
env['RPCGENFLAGS'] = SCons.Util.CLVar('')
env['RPCGENCLIENTFLAGS'] = SCons.Util.CLVar('')
env['RPCGENHEADERFLAGS'] = SCons.Util.CLVar('')
env['RPCGENSERVICEFLAGS'] = SCons.Util.CLVar('')
env['RPCGENXDRFLAGS'] = SCons.Util.CLVar('')
def exists(env):
return env.Detect('rpcgen')
| frew/simpleproto | scons-local-1.1.0/SCons/Tool/rpcgen.py | Python | bsd-2-clause | 2,761 |
# -*- coding: utf-8 -*-
from david.ext.babel import lazy_gettext as _
from david.ext.admin import DBKeyAdminView
from david.lib.template import st
from .model import DB_HOMEPAGE_ARTICLES
from .model import get_homepage_articles
from ..view import bp
@bp.route('/')
def home():
articles, captions = get_homepage_articles()
articles = [x.extended_self() for x in articles]
carousel_items = [dict(img=x.picture_url('large'), link=x.url(),
caption=captions[i])
for i,x in enumerate(articles) if x]
return st('modules/pages/home.html', **locals())
homepage_admin = DBKeyAdminView(name=_('Home Page'), endpoint='homepage')
homepage_admin.db_keys = (DB_HOMEPAGE_ARTICLES, )
homepage_admin.key_labels = {
DB_HOMEPAGE_ARTICLES: _('Home page articles')
}
homepage_admin.help_text = {
DB_HOMEPAGE_ARTICLES: _('article ids or article url, one item per line')
}
| ktmud/david | david/modules/pages/homepage/__init__.py | Python | mit | 934 |
#!/usr/bin/env python
# encoding: utf-8
# Copyright (C) 2001-2007 Martin Blais. All Rights Reserved
# Copyright (C) 2010 Bear http://code-bear.com/bearlog/
# Copyright (C) 2013 lrq3000
# Excerpt from SnakeFood to recursively list all imports of modules using AST parsing
# Additions to print the versions of each module if available
import os, sys
import compiler
from compiler.ast import Discard, Const
from compiler.visitor import ASTVisitor
import numbers
def pyfiles(startPath):
r = []
d = os.path.abspath(startPath)
if os.path.exists(d) and os.path.isdir(d):
for root, dirs, files in os.walk(d):
for f in files:
n, ext = os.path.splitext(f)
if ext == '.py':
r.append([root, f])
return r
class ImportVisitor(object):
def __init__(self):
self.modules = []
self.recent = []
def visitImport(self, node):
self.accept_imports()
self.recent.extend((x[0], None, x[1] or x[0], node.lineno, 0)
for x in node.names)
def visitFrom(self, node):
self.accept_imports()
modname = node.modname
if modname == '__future__':
return # Ignore these.
for name, as_ in node.names:
if name == '*':
# We really don't know...
mod = (modname, None, None, node.lineno, node.level)
else:
mod = (modname, name, as_ or name, node.lineno, node.level)
self.recent.append(mod)
def default(self, node):
pragma = None
if self.recent:
if isinstance(node, Discard):
children = node.getChildren()
if len(children) == 1 and isinstance(children[0], Const):
const_node = children[0]
pragma = const_node.value
self.accept_imports(pragma)
def accept_imports(self, pragma=None):
self.modules.extend((m, r, l, n, lvl, pragma)
for (m, r, l, n, lvl) in self.recent)
self.recent = []
def finalize(self):
self.accept_imports()
return self.modules
class ImportWalker(ASTVisitor):
def __init__(self, visitor):
ASTVisitor.__init__(self)
self._visitor = visitor
def default(self, node, *args):
self._visitor.default(node)
ASTVisitor.default(self, node, *args)
def parse_python_source(fn):
contents = open(fn, 'rU').read()
ast = compiler.parse(contents)
vis = ImportVisitor()
compiler.walk(ast, vis, ImportWalker(vis))
return vis.finalize()
def find_imports_and_print(startPath):
for d, f in pyfiles(startPath):
print d, f
print parse_python_source(os.path.join(d, f))
def find_imports(startPath):
moduleslist = {}
# Get the list of .py files and iterate over
for d, f in pyfiles(startPath):
# For each .py file, parse and get the list of imports
mod = parse_python_source(os.path.join(d, f))
# For each imported module, store only the root module (eg: sys.os -> will store only sys)
for m in mod:
moduleslist[m[0].split(".")[0]] = True
# Return the list of unique modules names
return moduleslist.keys()
def import_module(module_name):
''' Reliable import, courtesy of Armin Ronacher '''
try:
__import__(module_name)
except ImportError:
exc_type, exc_value, tb_root = sys.exc_info()
tb = tb_root
while tb is not None:
if tb.tb_frame.f_globals.get('__name__') == module_name:
raise exc_type, exc_value, tb_root
tb = tb.tb_next
return None
return sys.modules[module_name]
def find_versions(moduleslist):
''' Find the version of each module if available (and only for modules installed, does not work with locally included files) '''
modver = {}
# For each module
for mod in moduleslist:
ver = 'NA'
m = import_module(mod) # Import the module
if m is None: # The module is not installed
ver = 'Not installed'
# Else the module is installed and imported, we try to find the version
else:
# Iterate over all keys and try to find the version
verlist = []
for k, v in m.__dict__.iteritems():
if ( 'version' in k.lower() or '__version__' in k.lower() or 'ver' in k.lower() ) \
and isinstance(v, (basestring, numbers.Number)) :
verlist.append(v)
# Store the version
if len(verlist) > 1:
modver[mod] = verlist
elif len(verlist) == 1:
modver[mod] = verlist[0]
else:
modver[mod] = ver
# Return a dict where the keys are the modules names and values are the versions
return modver
if __name__ == '__main__':
import pprint
moduleslist = find_imports(os.path.join('..', '..', 'authordetector'))
modver = find_versions(moduleslist)
print('List of modules imported:')
print(moduleslist)
print('-'*50)
print('List of modules and versions:')
pprint.pprint(modver)
input("Press Enter to continue...")
| lrq3000/author-detector | install/pylistmodules/pylistmodules.py | Python | gpl-3.0 | 5,224 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.histogram_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import histogram_ops
from tensorflow.python.platform import test
class BinValuesFixedWidth(test.TestCase):
def test_empty_input_gives_all_zero_counts(self):
# Bins will be:
# (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf)
value_range = [0.0, 5.0]
values = []
expected_bins = []
with self.cached_session():
bins = histogram_ops.histogram_fixed_width_bins(
values, value_range, nbins=5)
self.assertEqual(dtypes.int32, bins.dtype)
self.assertAllClose(expected_bins, bins.eval())
def test_1d_values_int32_output(self):
# Bins will be:
# (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf)
value_range = [0.0, 5.0]
values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15]
expected_bins = [0, 0, 1, 2, 4, 4]
with self.cached_session():
bins = histogram_ops.histogram_fixed_width_bins(
values, value_range, nbins=5, dtype=dtypes.int64)
self.assertEqual(dtypes.int32, bins.dtype)
self.assertAllClose(expected_bins, bins.eval())
def test_1d_float64_values_int32_output(self):
# Bins will be:
# (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf)
value_range = np.float64([0.0, 5.0])
values = np.float64([-1.0, 0.0, 1.5, 2.0, 5.0, 15])
expected_bins = [0, 0, 1, 2, 4, 4]
with self.cached_session():
bins = histogram_ops.histogram_fixed_width_bins(
values, value_range, nbins=5)
self.assertEqual(dtypes.int32, bins.dtype)
self.assertAllClose(expected_bins, bins.eval())
def test_2d_values(self):
# Bins will be:
# (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf)
value_range = [0.0, 5.0]
values = constant_op.constant(
[[-1.0, 0.0, 1.5], [2.0, 5.0, 15]], shape=(2, 3))
expected_bins = [[0, 0, 1], [2, 4, 4]]
with self.cached_session():
bins = histogram_ops.histogram_fixed_width_bins(
values, value_range, nbins=5)
self.assertEqual(dtypes.int32, bins.dtype)
self.assertAllClose(expected_bins, bins.eval())
class HistogramFixedWidthTest(test.TestCase):
def setUp(self):
self.rng = np.random.RandomState(0)
def test_with_invalid_value_range(self):
values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15]
with self.assertRaisesRegexp(
ValueError, "Shape must be rank 1 but is rank 0"):
histogram_ops.histogram_fixed_width(values, 1.0)
with self.assertRaisesRegexp(ValueError, "Dimension must be 2 but is 3"):
histogram_ops.histogram_fixed_width(values, [1.0, 2.0, 3.0])
def test_with_invalid_nbins(self):
values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15]
with self.assertRaisesRegexp(
ValueError, "Shape must be rank 0 but is rank 1"):
histogram_ops.histogram_fixed_width(values, [1.0, 5.0], nbins=[1, 2])
with self.assertRaisesRegexp(
ValueError, "Requires nbins > 0"):
histogram_ops.histogram_fixed_width(values, [1.0, 5.0], nbins=-5)
def test_empty_input_gives_all_zero_counts(self):
# Bins will be:
# (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf)
value_range = [0.0, 5.0]
values = []
expected_bin_counts = [0, 0, 0, 0, 0]
with self.session(use_gpu=True):
hist = histogram_ops.histogram_fixed_width(values, value_range, nbins=5)
self.assertEqual(dtypes.int32, hist.dtype)
self.assertAllClose(expected_bin_counts, hist.eval())
def test_1d_values_int64_output(self):
# Bins will be:
# (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf)
value_range = [0.0, 5.0]
values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15]
expected_bin_counts = [2, 1, 1, 0, 2]
with self.session(use_gpu=True):
hist = histogram_ops.histogram_fixed_width(
values, value_range, nbins=5, dtype=dtypes.int64)
self.assertEqual(dtypes.int64, hist.dtype)
self.assertAllClose(expected_bin_counts, hist.eval())
def test_1d_float64_values(self):
# Bins will be:
# (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf)
value_range = np.float64([0.0, 5.0])
values = np.float64([-1.0, 0.0, 1.5, 2.0, 5.0, 15])
expected_bin_counts = [2, 1, 1, 0, 2]
with self.session(use_gpu=True):
hist = histogram_ops.histogram_fixed_width(values, value_range, nbins=5)
self.assertEqual(dtypes.int32, hist.dtype)
self.assertAllClose(expected_bin_counts, hist.eval())
def test_2d_values(self):
# Bins will be:
# (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf)
value_range = [0.0, 5.0]
values = [[-1.0, 0.0, 1.5], [2.0, 5.0, 15]]
expected_bin_counts = [2, 1, 1, 0, 2]
with self.session(use_gpu=True):
hist = histogram_ops.histogram_fixed_width(values, value_range, nbins=5)
self.assertEqual(dtypes.int32, hist.dtype)
self.assertAllClose(expected_bin_counts, hist.eval())
def test_shape_inference(self):
value_range = [0.0, 5.0]
values = [[-1.0, 0.0, 1.5], [2.0, 5.0, 15]]
expected_bin_counts = [2, 1, 1, 0, 2]
placeholder = array_ops.placeholder(dtypes.int32)
with self.session(use_gpu=True):
hist = histogram_ops.histogram_fixed_width(values, value_range, nbins=5)
self.assertAllEqual(hist.shape.as_list(), (5,))
self.assertEqual(dtypes.int32, hist.dtype)
self.assertAllClose(expected_bin_counts, hist.eval())
hist = histogram_ops.histogram_fixed_width(
values, value_range, nbins=placeholder)
self.assertEquals(hist.shape.ndims, 1)
self.assertIs(hist.shape.dims[0].value, None)
self.assertEqual(dtypes.int32, hist.dtype)
self.assertAllClose(expected_bin_counts, hist.eval({placeholder: 5}))
if __name__ == '__main__':
test.main()
| alshedivat/tensorflow | tensorflow/python/ops/histogram_ops_test.py | Python | apache-2.0 | 6,610 |
import os
from hackpad_api.hackpad import Hackpad
hackpad = Hackpad(os.getenv('team') ,consumer_key = os.getenv('consumer_key'), consumer_secret = os.getenv('consumer_secret'))
print(hackpad.do_api_request('pads/all', 'GET'))
| Falicon/Python-Hackpad-API | example/example1.py | Python | mit | 229 |
from sqlobject import *
from setup import *
__connection__ = conn
## Snippet "simpleaddress-person1"
class Person(SQLObject):
_connection = conn
firstName = StringCol()
middleInitial = StringCol(length=1, default=None)
lastName = StringCol()
## end snippet
## We create a table like this: (for MySQL)
"""
## Snippet "simpleaddress-schema-person1"
CREATE TABLE person (
id INT PRIMARY KEY AUTO_INCREMENT,
first_name TEXT,
middle_initial CHAR(1),
last_name TEXT
);
## end snippet
"""
def reset():
Person.dropTable(ifExists=True)
Person.createTable()
## Get rid of any tables we have left over...
Person.dropTable(ifExists=True)
## Now we create new tables...
## Snippet "simpleaddress-person1-create"
Person.createTable()
## End snippet
## Snippet "simpleaddress-person1-use"
p = Person(firstName="John", lastName="Doe")
print p
#>> <Person 1 firstName='John' middleInitial=None lastName='Doe'>
print p.firstName
#>> 'John'
p.middleInitial = 'Q'
print p.middleInitial
#>> 'Q'
p2 = Person.get(1)
print p2
#>> <Person 1 firstName='John' middleInitial='Q' lastName='Doe'>
print p is p2
#>> True
## End snippet
reset()
print '-'*60
conn.debug = 1
## Snippet "simpleaddress-person1-use-debug"
p = Person(firstName="John", lastName="Doe")
#>> QueryIns:
# INSERT INTO person (last_name, middle_initial, first_name)
# VALUES ('Doe', NULL, 'John')
#
#-- Not quite optimized, we don't remember the values we used to
#-- create the object, so they get re-fetched from the database:
#>> QueryOne:
# SELECT last_name, middle_initial, first_name
# FROM person
# WHERE id = 1
print p
#>> <Person 1 firstName='John' middleInitial=None lastName='Doe'>
print p.firstName
#-- Now we've saved cached the column values, so we don't fetch
#-- it again.
#>> 'John'
p.middleInitial = 'Q'
#>> Query :
# UPDATE person
# SET middle_initial = 'Q'
# WHERE id = 1
print p.middleInitial
#>> 'Q'
p2 = Person.get(1)
#-- Again, no database access, since we're just grabbing the same
#-- instance we already had.
print p2
#>> <Person 1 firstName='John' middleInitial='Q' lastName='Doe'>
print p is p2
#>> True
## End snippet
## Snippet "simpleaddress-person1-use-set"
p.set(firstName='Bob', lastName='Dole')
## end snippet
| pacoqueen/bbinn | SQLObject/SQLObject-0.6.1/examples/simpleperson.py | Python | gpl-2.0 | 2,256 |
#!/usr/bin/env python
#imports go here
from collections import *
#
# Free Coding session for 2014-10-22
# Written by Matt Warren
#
words = ['orange', 'red', 'blue', 'red', 'grey', 'pink', 'blue', 'red']
if __name__=='__main__':
cnt = Counter()
for word in words:
cnt[word] += 1
print cnt
print cnt.most_common(2)
print cnt.values()
numbers = range(len(words))
cnt2 = Counter(dict(zip(words, numbers)))
print cnt2
cnt.subtract(cnt2)
print cnt
d = deque()
d.extend([1,2,3,4])
d.append(5)
d.extendleft([0, -1, -2, -3])
d.appendleft(-4)
d.reverse()
print d
dd = defaultdict(int)
for word in words:
dd[word] += 1
print dd
Point = namedtuple('Point', ['x', 'y'])
p = Point(1, 2)
print p
print p.x, p.y
| mfwarren/FreeCoding | 2014/10/fc_2014_10_22.py | Python | mit | 815 |
#!/usr/bin/env python
#
# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
# Chris Houseknecht, <house@redhat.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
'''
Azure External Inventory Script
===============================
Generates dynamic inventory by making API requests to the Azure Resource
Manager using the AAzure Python SDK. For instruction on installing the
Azure Python SDK see http://azure-sdk-for-python.readthedocs.org/
Authentication
--------------
The order of precedence is command line arguments, environment variables,
and finally the [default] profile found in ~/.azure/credentials.
If using a credentials file, it should be an ini formatted file with one or
more sections, which we refer to as profiles. The script looks for a
[default] section, if a profile is not specified either on the command line
or with an environment variable. The keys in a profile will match the
list of command line arguments below.
For command line arguments and environment variables specify a profile found
in your ~/.azure/credentials file, or a service principal or Active Directory
user.
Command line arguments:
- profile
- client_id
- secret
- subscription_id
- tenant
- ad_user
- password
Environment variables:
- AZURE_PROFILE
- AZURE_CLIENT_ID
- AZURE_SECRET
- AZURE_SUBSCRIPTION_ID
- AZURE_TENANT
- AZURE_AD_USER
- AZURE_PASSWORD
Run for Specific Host
-----------------------
When run for a specific host using the --host option, a resource group is
required. For a specific host, this script returns the following variables:
{
"ansible_host": "XXX.XXX.XXX.XXX",
"computer_name": "computer_name2",
"fqdn": null,
"id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Compute/virtualMachines/object-name",
"image": {
"offer": "CentOS",
"publisher": "OpenLogic",
"sku": "7.1",
"version": "latest"
},
"location": "westus",
"mac_address": "00-00-5E-00-53-FE",
"name": "object-name",
"network_interface": "interface-name",
"network_interface_id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Network/networkInterfaces/object-name1",
"network_security_group": null,
"network_security_group_id": null,
"os_disk": {
"name": "object-name",
"operating_system_type": "Linux"
},
"plan": null,
"powerstate": "running",
"private_ip": "172.26.3.6",
"private_ip_alloc_method": "Static",
"provisioning_state": "Succeeded",
"public_ip": "XXX.XXX.XXX.XXX",
"public_ip_alloc_method": "Static",
"public_ip_id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Network/publicIPAddresses/object-name",
"public_ip_name": "object-name",
"resource_group": "galaxy-production",
"security_group": "object-name",
"security_group_id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Network/networkSecurityGroups/object-name",
"tags": {
"db": "database"
},
"type": "Microsoft.Compute/virtualMachines",
"virtual_machine_size": "Standard_DS4"
}
Groups
------
When run in --list mode, instances are grouped by the following categories:
- azure
- location
- resource_group
- security_group
- tag key
- tag key_value
Control groups using azure_rm.ini or set environment variables:
AZURE_GROUP_BY_RESOURCE_GROUP=yes
AZURE_GROUP_BY_LOCATION=yes
AZURE_GROUP_BY_SECURITY_GROUP=yes
AZURE_GROUP_BY_TAG=yes
Select hosts within specific resource groups by assigning a comma separated list to:
AZURE_RESOURCE_GROUPS=resource_group_a,resource_group_b
Select hosts for specific tag key by assigning a comma separated list of tag keys to:
AZURE_TAGS=key1,key2,key3
Select hosts for specific locations:
AZURE_LOCATIONS=eastus,westus,eastus2
Or, select hosts for specific tag key:value pairs by assigning a comma separated list key:value pairs to:
AZURE_TAGS=key1:value1,key2:value2
If you don't need the powerstate, you can improve performance by turning off powerstate fetching:
AZURE_INCLUDE_POWERSTATE=no
azure_rm.ini
------------
As mentioned above, you can control execution using environment variables or a .ini file. A sample
azure_rm.ini is included. The name of the .ini file is the basename of the inventory script (in this case
'azure_rm') with a .ini extension. It also assumes the .ini file is alongside the script. To specify
a different path for the .ini file, define the AZURE_INI_PATH environment variable:
export AZURE_INI_PATH=/path/to/custom.ini
Powerstate:
-----------
The powerstate attribute indicates whether or not a host is running. If the value is 'running', the machine is
up. If the value is anything other than 'running', the machine is down, and will be unreachable.
Examples:
---------
Execute /bin/uname on all instances in the galaxy-qa resource group
$ ansible -i azure_rm.py galaxy-qa -m shell -a "/bin/uname -a"
Use the inventory script to print instance specific information
$ contrib/inventory/azure_rm.py --host my_instance_host_name --pretty
Use with a playbook
$ ansible-playbook -i contrib/inventory/azure_rm.py my_playbook.yml --limit galaxy-qa
Insecure Platform Warning
-------------------------
If you receive InsecurePlatformWarning from urllib3, install the
requests security packages:
pip install requests[security]
author:
- Chris Houseknecht (@chouseknecht)
- Matt Davis (@nitzmahone)
Company: Ansible by Red Hat
Version: 1.0.0
'''
import argparse
import ConfigParser
import json
import os
import re
import sys
from distutils.version import LooseVersion
from os.path import expanduser
HAS_AZURE = True
HAS_AZURE_EXC = None
try:
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.compute import __version__ as azure_compute_version
from azure.common import AzureMissingResourceHttpError, AzureHttpError
from azure.common.credentials import ServicePrincipalCredentials, UserPassCredentials
from azure.mgmt.network.network_management_client import NetworkManagementClient
from azure.mgmt.resource.resources.resource_management_client import ResourceManagementClient
from azure.mgmt.compute.compute_management_client import ComputeManagementClient
except ImportError as exc:
HAS_AZURE_EXC = exc
HAS_AZURE = False
AZURE_CREDENTIAL_ENV_MAPPING = dict(
profile='AZURE_PROFILE',
subscription_id='AZURE_SUBSCRIPTION_ID',
client_id='AZURE_CLIENT_ID',
secret='AZURE_SECRET',
tenant='AZURE_TENANT',
ad_user='AZURE_AD_USER',
password='AZURE_PASSWORD'
)
AZURE_CONFIG_SETTINGS = dict(
resource_groups='AZURE_RESOURCE_GROUPS',
tags='AZURE_TAGS',
locations='AZURE_LOCATIONS',
include_powerstate='AZURE_INCLUDE_POWERSTATE',
group_by_resource_group='AZURE_GROUP_BY_RESOURCE_GROUP',
group_by_location='AZURE_GROUP_BY_LOCATION',
group_by_security_group='AZURE_GROUP_BY_SECURITY_GROUP',
group_by_tag='AZURE_GROUP_BY_TAG'
)
AZURE_MIN_VERSION = "0.30.0rc5"
def azure_id_to_dict(id):
pieces = re.sub(r'^\/', '', id).split('/')
result = {}
index = 0
while index < len(pieces) - 1:
result[pieces[index]] = pieces[index + 1]
index += 1
return result
class AzureRM(object):
def __init__(self, args):
self._args = args
self._compute_client = None
self._resource_client = None
self._network_client = None
self.debug = False
if args.debug:
self.debug = True
self.credentials = self._get_credentials(args)
if not self.credentials:
self.fail("Failed to get credentials. Either pass as parameters, set environment variables, "
"or define a profile in ~/.azure/credentials.")
if self.credentials.get('subscription_id', None) is None:
self.fail("Credentials did not include a subscription_id value.")
self.log("setting subscription_id")
self.subscription_id = self.credentials['subscription_id']
if self.credentials.get('client_id') is not None and \
self.credentials.get('secret') is not None and \
self.credentials.get('tenant') is not None:
self.azure_credentials = ServicePrincipalCredentials(client_id=self.credentials['client_id'],
secret=self.credentials['secret'],
tenant=self.credentials['tenant'])
elif self.credentials.get('ad_user') is not None and self.credentials.get('password') is not None:
self.azure_credentials = UserPassCredentials(self.credentials['ad_user'], self.credentials['password'])
else:
self.fail("Failed to authenticate with provided credentials. Some attributes were missing. "
"Credentials must include client_id, secret and tenant or ad_user and password.")
def log(self, msg):
if self.debug:
print (msg + u'\n')
def fail(self, msg):
raise Exception(msg)
def _get_profile(self, profile="default"):
path = expanduser("~")
path += "/.azure/credentials"
try:
config = ConfigParser.ConfigParser()
config.read(path)
except Exception as exc:
self.fail("Failed to access {0}. Check that the file exists and you have read "
"access. {1}".format(path, str(exc)))
credentials = dict()
for key in AZURE_CREDENTIAL_ENV_MAPPING:
try:
credentials[key] = config.get(profile, key, raw=True)
except:
pass
if credentials.get('client_id') is not None or credentials.get('ad_user') is not None:
return credentials
return None
def _get_env_credentials(self):
env_credentials = dict()
for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.iteritems():
env_credentials[attribute] = os.environ.get(env_variable, None)
if env_credentials['profile'] is not None:
credentials = self._get_profile(env_credentials['profile'])
return credentials
if env_credentials['client_id'] is not None or env_credentials['ad_user'] is not None:
return env_credentials
return None
def _get_credentials(self, params):
# Get authentication credentials.
# Precedence: cmd line parameters-> environment variables-> default profile in ~/.azure/credentials.
self.log('Getting credentials')
arg_credentials = dict()
for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.iteritems():
arg_credentials[attribute] = getattr(params, attribute)
# try module params
if arg_credentials['profile'] is not None:
self.log('Retrieving credentials with profile parameter.')
credentials = self._get_profile(arg_credentials['profile'])
return credentials
if arg_credentials['client_id'] is not None:
self.log('Received credentials from parameters.')
return arg_credentials
# try environment
env_credentials = self._get_env_credentials()
if env_credentials:
self.log('Received credentials from env.')
return env_credentials
# try default profile from ~./azure/credentials
default_credentials = self._get_profile()
if default_credentials:
self.log('Retrieved default profile credentials from ~/.azure/credentials.')
return default_credentials
return None
def _register(self, key):
try:
# We have to perform the one-time registration here. Otherwise, we receive an error the first
# time we attempt to use the requested client.
resource_client = self.rm_client
resource_client.providers.register(key)
except Exception as exc:
self.fail("One-time registration of {0} failed - {1}".format(key, str(exc)))
@property
def network_client(self):
self.log('Getting network client')
if not self._network_client:
self._network_client = NetworkManagementClient(self.azure_credentials, self.subscription_id)
self._register('Microsoft.Network')
return self._network_client
@property
def rm_client(self):
self.log('Getting resource manager client')
if not self._resource_client:
self._resource_client = ResourceManagementClient(self.azure_credentials, self.subscription_id)
return self._resource_client
@property
def compute_client(self):
self.log('Getting compute client')
if not self._compute_client:
self._compute_client = ComputeManagementClient(self.azure_credentials, self.subscription_id)
self._register('Microsoft.Compute')
return self._compute_client
class AzureInventory(object):
def __init__(self):
self._args = self._parse_cli_args()
try:
rm = AzureRM(self._args)
except Exception as e:
sys.exit("{0}".format(str(e)))
self._compute_client = rm.compute_client
self._network_client = rm.network_client
self._resource_client = rm.rm_client
self._security_groups = None
self.resource_groups = []
self.tags = None
self.locations = None
self.replace_dash_in_groups = False
self.group_by_resource_group = True
self.group_by_location = True
self.group_by_security_group = True
self.group_by_tag = True
self.include_powerstate = True
self._inventory = dict(
_meta=dict(
hostvars=dict()
),
azure=[]
)
self._get_settings()
if self._args.resource_groups:
self.resource_groups = self._args.resource_groups.split(',')
if self._args.tags:
self.tags = self._args.tags.split(',')
if self._args.locations:
self.locations = self._args.locations.split(',')
if self._args.no_powerstate:
self.include_powerstate = False
self.get_inventory()
print (self._json_format_dict(pretty=self._args.pretty))
sys.exit(0)
def _parse_cli_args(self):
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Produce an Ansible Inventory file for an Azure subscription')
parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)')
parser.add_argument('--debug', action='store_true', default=False,
help='Send debug messages to STDOUT')
parser.add_argument('--host', action='store',
help='Get all information about an instance')
parser.add_argument('--pretty', action='store_true', default=False,
help='Pretty print JSON output(default: False)')
parser.add_argument('--profile', action='store',
help='Azure profile contained in ~/.azure/credentials')
parser.add_argument('--subscription_id', action='store',
help='Azure Subscription Id')
parser.add_argument('--client_id', action='store',
help='Azure Client Id ')
parser.add_argument('--secret', action='store',
help='Azure Client Secret')
parser.add_argument('--tenant', action='store',
help='Azure Tenant Id')
parser.add_argument('--ad-user', action='store',
help='Active Directory User')
parser.add_argument('--password', action='store',
help='password')
parser.add_argument('--resource-groups', action='store',
help='Return inventory for comma separated list of resource group names')
parser.add_argument('--tags', action='store',
help='Return inventory for comma separated list of tag key:value pairs')
parser.add_argument('--locations', action='store',
help='Return inventory for comma separated list of locations')
parser.add_argument('--no-powerstate', action='store_true', default=False,
help='Do not include the power state of each virtual host')
return parser.parse_args()
def get_inventory(self):
if len(self.resource_groups) > 0:
# get VMs for requested resource groups
for resource_group in self.resource_groups:
try:
virtual_machines = self._compute_client.virtual_machines.list(resource_group)
except Exception as exc:
sys.exit("Error: fetching virtual machines for resource group {0} - {1}".format(resource_group,
str(exc)))
if self._args.host or self.tags:
selected_machines = self._selected_machines(virtual_machines)
self._load_machines(selected_machines)
else:
self._load_machines(virtual_machines)
else:
# get all VMs within the subscription
try:
virtual_machines = self._compute_client.virtual_machines.list_all()
except Exception as exc:
sys.exit("Error: fetching virtual machines - {0}".format(str(exc)))
if self._args.host or self.tags or self.locations:
selected_machines = self._selected_machines(virtual_machines)
self._load_machines(selected_machines)
else:
self._load_machines(virtual_machines)
def _load_machines(self, machines):
for machine in machines:
id_dict = azure_id_to_dict(machine.id)
#TODO - The API is returning an ID value containing resource group name in ALL CAPS. If/when it gets
# fixed, we should remove the .lower(). Opened Issue
# #574: https://github.com/Azure/azure-sdk-for-python/issues/574
resource_group = id_dict['resourceGroups'].lower()
if self.group_by_security_group:
self._get_security_groups(resource_group)
host_vars = dict(
ansible_host=None,
private_ip=None,
private_ip_alloc_method=None,
public_ip=None,
public_ip_name=None,
public_ip_id=None,
public_ip_alloc_method=None,
fqdn=None,
location=machine.location,
name=machine.name,
type=machine.type,
id=machine.id,
tags=machine.tags,
network_interface_id=None,
network_interface=None,
resource_group=resource_group,
mac_address=None,
plan=(machine.plan.name if machine.plan else None),
virtual_machine_size=machine.hardware_profile.vm_size,
computer_name=machine.os_profile.computer_name,
provisioning_state=machine.provisioning_state,
)
host_vars['os_disk'] = dict(
name=machine.storage_profile.os_disk.name,
operating_system_type=machine.storage_profile.os_disk.os_type.value
)
if self.include_powerstate:
host_vars['powerstate'] = self._get_powerstate(resource_group, machine.name)
if machine.storage_profile.image_reference:
host_vars['image'] = dict(
offer=machine.storage_profile.image_reference.offer,
publisher=machine.storage_profile.image_reference.publisher,
sku=machine.storage_profile.image_reference.sku,
version=machine.storage_profile.image_reference.version
)
# Add windows details
if machine.os_profile.windows_configuration is not None:
host_vars['windows_auto_updates_enabled'] = \
machine.os_profile.windows_configuration.enable_automatic_updates
host_vars['windows_timezone'] = machine.os_profile.windows_configuration.time_zone
host_vars['windows_rm'] = None
if machine.os_profile.windows_configuration.win_rm is not None:
host_vars['windows_rm'] = dict(listeners=None)
if machine.os_profile.windows_configuration.win_rm.listeners is not None:
host_vars['windows_rm']['listeners'] = []
for listener in machine.os_profile.windows_configuration.win_rm.listeners:
host_vars['windows_rm']['listeners'].append(dict(protocol=listener.protocol,
certificate_url=listener.certificate_url))
for interface in machine.network_profile.network_interfaces:
interface_reference = self._parse_ref_id(interface.id)
network_interface = self._network_client.network_interfaces.get(
interface_reference['resourceGroups'],
interface_reference['networkInterfaces'])
if network_interface.primary:
if self.group_by_security_group and \
self._security_groups[resource_group].get(network_interface.id, None):
host_vars['security_group'] = \
self._security_groups[resource_group][network_interface.id]['name']
host_vars['security_group_id'] = \
self._security_groups[resource_group][network_interface.id]['id']
host_vars['network_interface'] = network_interface.name
host_vars['network_interface_id'] = network_interface.id
host_vars['mac_address'] = network_interface.mac_address
for ip_config in network_interface.ip_configurations:
host_vars['private_ip'] = ip_config.private_ip_address
host_vars['private_ip_alloc_method'] = ip_config.private_ip_allocation_method
if ip_config.public_ip_address:
public_ip_reference = self._parse_ref_id(ip_config.public_ip_address.id)
public_ip_address = self._network_client.public_ip_addresses.get(
public_ip_reference['resourceGroups'],
public_ip_reference['publicIPAddresses'])
host_vars['ansible_host'] = public_ip_address.ip_address
host_vars['public_ip'] = public_ip_address.ip_address
host_vars['public_ip_name'] = public_ip_address.name
host_vars['public_ip_alloc_method'] = public_ip_address.public_ip_allocation_method
host_vars['public_ip_id'] = public_ip_address.id
if public_ip_address.dns_settings:
host_vars['fqdn'] = public_ip_address.dns_settings.fqdn
self._add_host(host_vars)
def _selected_machines(self, virtual_machines):
selected_machines = []
for machine in virtual_machines:
if self._args.host and self._args.host == machine.name:
selected_machines.append(machine)
if self.tags and self._tags_match(machine.tags, self.tags):
selected_machines.append(machine)
if self.locations and machine.location in self.locations:
selected_machines.append(machine)
return selected_machines
def _get_security_groups(self, resource_group):
''' For a given resource_group build a mapping of network_interface.id to security_group name '''
if not self._security_groups:
self._security_groups = dict()
if not self._security_groups.get(resource_group):
self._security_groups[resource_group] = dict()
for group in self._network_client.network_security_groups.list(resource_group):
if group.network_interfaces:
for interface in group.network_interfaces:
self._security_groups[resource_group][interface.id] = dict(
name=group.name,
id=group.id
)
def _get_powerstate(self, resource_group, name):
try:
vm = self._compute_client.virtual_machines.get(resource_group,
name,
expand='instanceview')
except Exception as exc:
sys.exit("Error: fetching instanceview for host {0} - {1}".format(name, str(exc)))
return next((s.code.replace('PowerState/', '')
for s in vm.instance_view.statuses if s.code.startswith('PowerState')), None)
def _add_host(self, vars):
host_name = self._to_safe(vars['name'])
resource_group = self._to_safe(vars['resource_group'])
security_group = None
if vars.get('security_group'):
security_group = self._to_safe(vars['security_group'])
if self.group_by_resource_group:
if not self._inventory.get(resource_group):
self._inventory[resource_group] = []
self._inventory[resource_group].append(host_name)
if self.group_by_location:
if not self._inventory.get(vars['location']):
self._inventory[vars['location']] = []
self._inventory[vars['location']].append(host_name)
if self.group_by_security_group and security_group:
if not self._inventory.get(security_group):
self._inventory[security_group] = []
self._inventory[security_group].append(host_name)
self._inventory['_meta']['hostvars'][host_name] = vars
self._inventory['azure'].append(host_name)
if self.group_by_tag and vars.get('tags'):
for key, value in vars['tags'].iteritems():
safe_key = self._to_safe(key)
safe_value = safe_key + '_' + self._to_safe(value)
if not self._inventory.get(safe_key):
self._inventory[safe_key] = []
if not self._inventory.get(safe_value):
self._inventory[safe_value] = []
self._inventory[safe_key].append(host_name)
self._inventory[safe_value].append(host_name)
def _json_format_dict(self, pretty=False):
# convert inventory to json
if pretty:
return json.dumps(self._inventory, sort_keys=True, indent=2)
else:
return json.dumps(self._inventory)
def _get_settings(self):
# Load settings from the .ini, if it exists. Otherwise,
# look for environment values.
file_settings = self._load_settings()
if file_settings:
for key in AZURE_CONFIG_SETTINGS:
if key in ('resource_groups', 'tags', 'locations') and file_settings.get(key):
values = file_settings.get(key).split(',')
if len(values) > 0:
setattr(self, key, values)
elif file_settings.get(key):
val = self._to_boolean(file_settings[key])
setattr(self, key, val)
else:
env_settings = self._get_env_settings()
for key in AZURE_CONFIG_SETTINGS:
if key in('resource_groups', 'tags', 'locations') and env_settings.get(key):
values = env_settings.get(key).split(',')
if len(values) > 0:
setattr(self, key, values)
elif env_settings.get(key, None) is not None:
val = self._to_boolean(env_settings[key])
setattr(self, key, val)
def _parse_ref_id(self, reference):
response = {}
keys = reference.strip('/').split('/')
for index in range(len(keys)):
if index < len(keys) - 1 and index % 2 == 0:
response[keys[index]] = keys[index + 1]
return response
def _to_boolean(self, value):
if value in ['Yes', 'yes', 1, 'True', 'true', True]:
result = True
elif value in ['No', 'no', 0, 'False', 'false', False]:
result = False
else:
result = True
return result
def _get_env_settings(self):
env_settings = dict()
for attribute, env_variable in AZURE_CONFIG_SETTINGS.iteritems():
env_settings[attribute] = os.environ.get(env_variable, None)
return env_settings
def _load_settings(self):
basename = os.path.splitext(os.path.basename(__file__))[0]
default_path = os.path.join(os.path.dirname(__file__), (basename + '.ini'))
path = os.path.expanduser(os.path.expandvars(os.environ.get('AZURE_INI_PATH', default_path)))
config = None
settings = None
try:
config = ConfigParser.ConfigParser()
config.read(path)
except:
pass
if config is not None:
settings = dict()
for key in AZURE_CONFIG_SETTINGS:
try:
settings[key] = config.get('azure', key, raw=True)
except:
pass
return settings
def _tags_match(self, tag_obj, tag_args):
'''
Return True if the tags object from a VM contains the requested tag values.
:param tag_obj: Dictionary of string:string pairs
:param tag_args: List of strings in the form key=value
:return: boolean
'''
if not tag_obj:
return False
matches = 0
for arg in tag_args:
arg_key = arg
arg_value = None
if re.search(r':', arg):
arg_key, arg_value = arg.split(':')
if arg_value and tag_obj.get(arg_key, None) == arg_value:
matches += 1
elif not arg_value and tag_obj.get(arg_key, None) is not None:
matches += 1
if matches == len(tag_args):
return True
return False
def _to_safe(self, word):
''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups '''
regex = "[^A-Za-z0-9\_"
if not self.replace_dash_in_groups:
regex += "\-"
return re.sub(regex + "]", "_", word)
def main():
if not HAS_AZURE:
sys.exit("The Azure python sdk is not installed (try 'pip install azure>=2.0.0rc5') - {0}".format(HAS_AZURE_EXC))
if LooseVersion(azure_compute_version) < LooseVersion(AZURE_MIN_VERSION):
sys.exit("Expecting azure.mgmt.compute.__version__ to be {0}. Found version {1} "
"Do you have Azure >= 2.0.0rc5 installed?".format(AZURE_MIN_VERSION, azure_compute_version))
AzureInventory()
if __name__ == '__main__':
main()
| nwiizo/workspace_2017 | devops/ansible/tettei-nyumon-playbooks/chapter08/azure_playbook1/inventories/staging/azure_rm.py | Python | mit | 32,321 |
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import logging
import alembic
from alembic import autogenerate as autogen
from alembic import context
from alembic import op
import sqlalchemy
from sqlalchemy import schema as sa_schema
import sqlalchemy.sql.expression as expr
from sqlalchemy.sql import text
from sqlalchemy import types
from neutron.db.migration.models import frozen as frozen_models
LOG = logging.getLogger(__name__)
METHODS = {}
def heal():
# This is needed else the heal script will start spewing
# a lot of pointless warning messages from alembic.
LOG.setLevel(logging.INFO)
if context.is_offline_mode():
return
models_metadata = frozen_models.get_metadata()
# Compare metadata from models and metadata from migrations
# Diff example:
# [ ( 'add_table',
# Table('bat', MetaData(bind=None),
# Column('info', String(), table=<bat>), schema=None)),
# ( 'remove_table',
# Table(u'bar', MetaData(bind=None),
# Column(u'data', VARCHAR(), table=<bar>), schema=None)),
# ( 'add_column',
# None,
# 'foo',
# Column('data', Integer(), table=<foo>)),
# ( 'remove_column',
# None,
# 'foo',
# Column(u'old_data', VARCHAR(), table=None)),
# [ ( 'modify_nullable',
# None,
# 'foo',
# u'x',
# { 'existing_server_default': None,
# 'existing_type': INTEGER()},
# True,
# False)]]
opts = {
'compare_type': _compare_type,
'compare_server_default': _compare_server_default,
}
mc = alembic.migration.MigrationContext.configure(op.get_bind(), opts=opts)
set_storage_engine(op.get_bind(), "InnoDB")
diff1 = autogen.compare_metadata(mc, models_metadata)
# Alembic does not contain checks for foreign keys. Because of that it
# checks separately.
added_fks, dropped_fks = check_foreign_keys(models_metadata)
diff = dropped_fks + diff1 + added_fks
# For each difference run command
for el in diff:
execute_alembic_command(el)
def execute_alembic_command(command):
# Commands like add_table, remove_table, add_index, add_column, etc is a
# tuple and can be handle after running special functions from alembic for
# them.
if isinstance(command, tuple):
# Here methods add_table, drop_index, etc is running. Name of method is
# the first element of the tuple, arguments to this method comes from
# the next element(s).
METHODS[command[0]](*command[1:])
else:
# For all commands that changing type, nullable or other parameters
# of the column is used alter_column method from alembic.
parse_modify_command(command)
def parse_modify_command(command):
# From arguments of command is created op.alter_column() that has the
# following syntax:
# alter_column(table_name, column_name, nullable=None,
# server_default=False, new_column_name=None, type_=None,
# autoincrement=None, existing_type=None,
# existing_server_default=False, existing_nullable=None,
# existing_autoincrement=None, schema=None, **kw)
bind = op.get_bind()
for modified, schema, table, column, existing, old, new in command:
if modified.endswith('type'):
modified = 'type_'
elif modified.endswith('nullable'):
modified = 'nullable'
insp = sqlalchemy.engine.reflection.Inspector.from_engine(bind)
if column in insp.get_primary_keys(table) and new:
return
elif modified.endswith('default'):
modified = 'server_default'
if isinstance(new, basestring):
new = text(new)
kwargs = {modified: new, 'schema': schema}
default = existing.get('existing_server_default')
if default and isinstance(default, sa_schema.DefaultClause):
if isinstance(default.arg, basestring):
existing['existing_server_default'] = default.arg
else:
existing['existing_server_default'] = default.arg.text
kwargs.update(existing)
op.alter_column(table, column, **kwargs)
def alembic_command_method(f):
METHODS[f.__name__] = f
return f
@alembic_command_method
def add_table(table):
# Check if table has already exists and needs just to be renamed
if not rename(table.name):
table.create(bind=op.get_bind(), checkfirst=True)
@alembic_command_method
def add_index(index):
bind = op.get_bind()
insp = sqlalchemy.engine.reflection.Inspector.from_engine(bind)
if index.name not in [idx['name'] for idx in
insp.get_indexes(index.table.name)]:
op.create_index(index.name, index.table.name, column_names(index))
@alembic_command_method
def remove_table(table):
# Tables should not be removed
pass
@alembic_command_method
def remove_index(index):
bind = op.get_bind()
insp = sqlalchemy.engine.reflection.Inspector.from_engine(bind)
index_names = [idx['name'] for idx in insp.get_indexes(index.table.name)]
fk_names = [i['name'] for i in insp.get_foreign_keys(index.table.name)]
if index.name in index_names and index.name not in fk_names:
op.drop_index(index.name, index.table.name)
@alembic_command_method
def remove_column(schema, table_name, column):
op.drop_column(table_name, column.name, schema=schema)
@alembic_command_method
def add_column(schema, table_name, column):
op.add_column(table_name, column.copy(), schema=schema)
@alembic_command_method
def add_constraint(constraint):
op.create_unique_constraint(constraint.name, constraint.table.name,
column_names(constraint))
@alembic_command_method
def remove_constraint(constraint):
op.drop_constraint(constraint.name, constraint.table.name, type_='unique')
@alembic_command_method
def drop_key(fk_name, fk_table):
op.drop_constraint(fk_name, fk_table, type_='foreignkey')
@alembic_command_method
def add_key(fk):
fk_name = fk.name
fk_table = fk.parent.table.name
fk_ref = fk.column.table.name
fk_local_cols = [fk.parent.name]
fk_remote_cols = [fk.column.name]
op.create_foreign_key(fk_name, fk_table, fk_ref, fk_local_cols,
fk_remote_cols)
def check_foreign_keys(metadata):
# This methods checks foreign keys that tables contain in models with
# foreign keys that are in db.
added_fks = []
dropped_fks = []
bind = op.get_bind()
insp = sqlalchemy.engine.reflection.Inspector.from_engine(bind)
# Get all tables from db
db_tables = insp.get_table_names()
# Get all tables from models
model_tables = metadata.tables
for table in db_tables:
if table not in model_tables:
continue
# Get all necessary information about key of current table from db
fk_db = dict((_get_fk_info_db(i), i['name']) for i in
insp.get_foreign_keys(table))
fk_db_set = set(fk_db.keys())
# Get all necessary information about key of current table from models
fk_models = dict((_get_fk_info_from_model(fk), fk) for fk in
model_tables[table].foreign_keys)
fk_models_set = set(fk_models.keys())
for key in (fk_db_set - fk_models_set):
dropped_fks.append(('drop_key', fk_db[key], table))
LOG.info(_("Detected removed foreign key %(fk)r on "
"table %(table)r"), {'fk': fk_db[key], 'table': table})
for key in (fk_models_set - fk_db_set):
added_fks.append(('add_key', fk_models[key]))
LOG.info(_("Detected added foreign key for column %(fk)r on table "
"%(table)r"), {'fk': fk_models[key].column.name,
'table': table})
return (added_fks, dropped_fks)
def check_if_table_exists(table):
# This functions checks if table exists or not
bind = op.get_bind()
insp = sqlalchemy.engine.reflection.Inspector.from_engine(bind)
return (table in insp.get_table_names() and
table not in frozen_models.renamed_tables)
def rename(table):
# For tables that were renamed checks if the previous table exists
# if it does the previous one will be renamed.
# Returns True/False if it is needed to create new table
if table in frozen_models.renamed_tables:
if check_if_table_exists(frozen_models.renamed_tables[table]):
op.rename_table(frozen_models.renamed_tables[table], table)
LOG.info(_("Table %(old_t)r was renamed to %(new_t)r"), {
'old_t': table, 'new_t': frozen_models.renamed_tables[table]})
return True
return False
def column_names(obj):
return [col.name for col in obj.columns if hasattr(col, 'name')]
def _get_fk_info_db(fk):
return (tuple(fk['constrained_columns']), fk['referred_table'],
tuple(fk['referred_columns']))
def _get_fk_info_from_model(fk):
return ((fk.parent.name,), fk.column.table.name, (fk.column.name,))
def _compare_type(ctxt, insp_col, meta_col, insp_type, meta_type):
"""Return True if types are different, False if not.
Return None to allow the default implementation to compare these types.
:param ctxt: alembic MigrationContext instance
:param insp_col: reflected column
:param meta_col: column from model
:param insp_type: reflected column type
:param meta_type: column type from model
"""
# some backends (e.g. mysql) don't provide native boolean type
BOOLEAN_METADATA = (types.BOOLEAN, types.Boolean)
BOOLEAN_SQL = BOOLEAN_METADATA + (types.INTEGER, types.Integer)
if isinstance(meta_type, BOOLEAN_METADATA):
return not isinstance(insp_type, BOOLEAN_SQL)
return None # tells alembic to use the default comparison method
def _compare_server_default(ctxt, ins_col, meta_col, insp_def, meta_def,
rendered_meta_def):
"""Compare default values between model and db table.
Return True if the defaults are different, False if not, or None to
allow the default implementation to compare these defaults.
:param ctxt: alembic MigrationContext instance
:param insp_col: reflected column
:param meta_col: column from model
:param insp_def: reflected column default value
:param meta_def: column default value from model
:param rendered_meta_def: rendered column default value (from model)
"""
if (ctxt.dialect.name == 'mysql' and
isinstance(meta_col.type, sqlalchemy.Boolean)):
if meta_def is None or insp_def is None:
return meta_def != insp_def
return not (
isinstance(meta_def.arg, expr.True_) and insp_def == "'1'" or
isinstance(meta_def.arg, expr.False_) and insp_def == "'0'"
)
return None # tells alembic to use the default comparison method
def set_storage_engine(bind, engine):
insp = sqlalchemy.engine.reflection.Inspector.from_engine(bind)
if bind.dialect.name == 'mysql':
for table in insp.get_table_names():
if insp.get_table_options(table)['mysql_engine'] != engine:
op.execute("ALTER TABLE %s ENGINE=%s" % (table, engine))
| shakamunyi/neutron-vrrp | neutron/db/migration/alembic_migrations/heal_script.py | Python | apache-2.0 | 11,948 |
from django.conf.urls import url
from subscribe import views
urlpatterns = [
url(r'^$', views.HomeView.as_view(), name='homepage'),
url(r'^inschrijven/(?P<slug>[\w-]+)/$', views.register, name='subscribe'),
url(r'^deleteEventQuestion/$', views.delete_event_question, name='delete-event-question'),
url(r'^webhook/(?P<id>\d+)/?$', views.webhook, name='webhook'),
url(r'^return/(?P<id>\d+)/?$', views.return_page, name='return_page'),
]
| jonge-democraten/dyonisos | subscribe/urls.py | Python | mit | 457 |
# -*- coding: utf-8 -*-
"""
flask.testsuite.subclassing
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Test that certain behavior of flask can be customized by
subclasses.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import flask
import unittest
from StringIO import StringIO
from logging import StreamHandler
from flask.testsuite import FlaskTestCase
class FlaskSubclassingTestCase(FlaskTestCase):
def test_supressed_exception_logging(self):
class SupressedFlask(flask.Flask):
def log_exception(self, exc_info):
pass
out = StringIO()
app = SupressedFlask(__name__)
app.logger_name = 'flask_tests/test_supressed_exception_logging'
app.logger.addHandler(StreamHandler(out))
@app.route('/')
def index():
1/0
rv = app.test_client().get('/')
self.assert_equal(rv.status_code, 500)
self.assert_('Internal Server Error' in rv.data)
err = out.getvalue()
self.assert_equal(err, '')
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(FlaskSubclassingTestCase))
return suite
| SohKai/ChronoLogger | web/flask/lib/python2.7/site-packages/flask/testsuite/subclassing.py | Python | mit | 1,201 |
"""
.. image::
../_static/mongodb.png
`GridFS <https://docs.mongodb.com/manual/core/gridfs/>`_ is a specification for storing large files
(>16 MB) in MongoDB. See :py:mod:`~requests_cache.backends.mongodb` for more general info on MongoDB.
API Reference
^^^^^^^^^^^^^
.. automodsumm:: requests_cache.backends.gridfs
:classes-only:
:nosignatures:
"""
from logging import getLogger
from threading import RLock
from gridfs import GridFS
from gridfs.errors import CorruptGridFile, FileExists
from pymongo import MongoClient
from .._utils import get_valid_kwargs
from .base import BaseCache, BaseStorage
from .mongodb import MongoDict
logger = getLogger(__name__)
class GridFSCache(BaseCache):
"""GridFS cache backend.
Example:
>>> session = CachedSession('http_cache', backend='gridfs')
Args:
db_name: Database name
connection: :py:class:`pymongo.MongoClient` object to reuse instead of creating a new one
kwargs: Additional keyword arguments for :py:class:`pymongo.MongoClient`
"""
def __init__(self, db_name: str, **kwargs):
super().__init__(**kwargs)
self.responses = GridFSPickleDict(db_name, **kwargs)
self.redirects = MongoDict(
db_name, collection_name='redirects', connection=self.responses.connection, **kwargs
)
def remove_expired_responses(self, *args, **kwargs):
with self.responses._lock:
return super().remove_expired_responses(*args, **kwargs)
class GridFSPickleDict(BaseStorage):
"""A dictionary-like interface for a GridFS database
Args:
db_name: Database name
collection_name: Ignored; GridFS internally uses collections 'fs.files' and 'fs.chunks'
connection: :py:class:`pymongo.MongoClient` object to reuse instead of creating a new one
kwargs: Additional keyword arguments for :py:class:`pymongo.MongoClient`
"""
def __init__(self, db_name, collection_name=None, connection=None, **kwargs):
super().__init__(**kwargs)
connection_kwargs = get_valid_kwargs(MongoClient, kwargs)
self.connection = connection or MongoClient(**connection_kwargs)
self.db = self.connection[db_name]
self.fs = GridFS(self.db)
self._lock = RLock()
def __getitem__(self, key):
try:
with self._lock:
result = self.fs.find_one({'_id': key})
if result is None:
raise KeyError
return self.serializer.loads(result.read())
except CorruptGridFile as e:
logger.warning(e, exc_info=True)
raise KeyError
def __setitem__(self, key, item):
value = self.serializer.dumps(item)
encoding = None if isinstance(value, bytes) else 'utf-8'
with self._lock:
try:
self.fs.delete(key)
self.fs.put(value, encoding=encoding, **{'_id': key})
# This can happen because GridFS is not thread-safe for concurrent writes
except FileExists as e:
logger.warning(e, exc_info=True)
def __delitem__(self, key):
with self._lock:
res = self.fs.find_one({'_id': key})
if res is None:
raise KeyError
self.fs.delete(res._id)
def __len__(self):
return self.db['fs.files'].estimated_document_count()
def __iter__(self):
for d in self.fs.find():
yield d._id
def clear(self):
self.db['fs.files'].drop()
self.db['fs.chunks'].drop()
| reclosedev/requests-cache | requests_cache/backends/gridfs.py | Python | bsd-2-clause | 3,582 |
from pyblish import api
from pyblish_bumpybox import inventory
class AppendFtrackAssetName(api.InstancePlugin):
""" Appending "ftrackAssetName" """
label = "Ftrack Asset Name"
order = inventory.get_order(__file__, "AppendFtrackAssetName")
def process(self, instance):
# skipping if not launched from ftrack
if "ftrackData" not in instance.context.data:
return
ftrack_data = instance.context.data["ftrackData"]
asset_name = ftrack_data["Task"]["name"]
instance.data["ftrackAssetName"] = asset_name
| Bumpybox/pyblish-bumpybox | pyblish_bumpybox/plugins/celaction/bait/append_ftrack_asset_name.py | Python | lgpl-3.0 | 572 |
import os.path
# Fetch data from these URLs
URLS = [
"http://mspaintadventures.com/?search=6_1", # Acts 1-4
"http://mspaintadventures.com/?search=6_2", # Act 5
"http://mspaintadventures.com/?search=6_3", # Act 6
]
# Names used in dialog logs
NAMES = [
'GAMZEE', 'ROSESPRITE', 'DAVEPETASPRITE^2', 'FEFERI', 'VRISKA', 'FEFETASPRITE', 'ARADIABOT',
'KARKAT', 'JAKE', 'JADE', 'JASPERSPRITE', 'TAVRISPRITE', 'ARANEA', 'MOTHERSPRITE', 'NANNASPRITEx2',
'JADESPRITE', 'ROSE', 'CALLIOPE', 'NANNASPRITE', 'ERISOLSPRITE', 'ARADIASPRITE', 'DRAGONSPRITE', 'DAVE',
'SOLLUX', 'NEPETASPRITE', 'TEREZI', 'EQUIUSPRITE', 'TAVROSPRITE', 'GG', 'GC', 'GA', 'GT', 'CG', 'FCG',
'PCG', 'CC', 'CA', 'CT', 'AG', 'AC', 'AA', 'AT', 'TG', 'TC', 'TA', 'TT', 'UU', 'uu', 'JOHN', 'EB', 'ROXY',
'ARQUIUSPRITE', 'MEENAH', 'JASPROSESPRITE^2', 'PCG', 'pipefan413', 'DIRK', 'JANE', 'FCG', 'DAVESPRITE', '?GG', 'CGA',
'KANAYA', 'TAVROS', 'FCG2', 'PAT', 'CCT', 'fedorafreak', 'CCG', 'FAG', 'PAG', 'CAG', 'GCATAVROSPRITE', 'FTC', 'PTC',
'PCC', 'CCC', 'ARADIA', 'FAA', 'CAA', 'PAA', '?CG', 'PCG2', 'PCG3', 'PCG4', 'PCG5', 'PCG6', 'PCG7', 'PCG8', '?TG',
'CTG', 'CTA', 'PTA', 'NEPETA', 'FAC', 'CAC', 'FGC', 'PGC', 'CGC', 'EQUIUS', 'FCT', 'FGA', 'KANAYA?', 'ERIDAN', 'FCA',
'PCA', 'CEB', ')(IC',
]
NAME_ALIASES = [
["GAMZEE", "TC", "FTC", "PTC"],
["ROSE", "TT", "ROSESPRITE"],
["FEFERI", "PCC", "CC", "CCC"],
["VRISKA", "AG", "CAG", "FAG", "PAG"],
["ARADIA", "ARADIABOT", "ARADIASPRITE", "FAA", "CAA", "AA", "PAA"],
["KARKAT", "CG", "CCG", "FCG", "PCG", "FCG2", "?CG",
"PCG2", "PCG3", "PCG4", "PCG5", "PCG6", "PCG7", "PCG8"],
["JAKE", "GT"],
["JADE", "GG"],
["NANNASPRITE", "NANNASPRITEx2"],
["CALLIOPE", "UU"],
["DAVE", "TG", "?TG", "CTG"],
["SOLLUX", "CTA", "TA", "PTA"],
["NEPETA", "FAC", "AC", "CAC"],
["TEREZI", "GC", "FGC", "PGC", "CGC"],
["EQUIUS", "FCT", "CT"],
["TAVROS", "TAVROSPRITE", "AT"],
["KANAYA", "GA", "FGA", "KANAYA?"],
["ERIDAN", "CA", "FCA", "PCA"],
["JOHN", "EB", "CEB"],
[")(IC"]
]
NAME_COLORS = {
"GAMZEE": "#2b0057",
"TC": "#2b0057",
"FTC": "#2b0057",
"PTC": "#2b0057",
"ROSE": "#b536da",
"TT": "#b536da",
"ROSESPRITE": "#b536da",
"DAVEPETASPRITE^2": ["#4ac925", "#f2a400"],
"FEFERI": "#77003c",
"CC": "#77003c",
"PCC": "#77003c",
"CCC": "#77003c",
"VRISKA": "#005682",
"AG": "#005682",
"CAG": "#005682",
"PAG": "#005682",
"FAG": "#005682",
"FEFETASPRITE": "#b536da",
"ARADIA": "#a10000",
"ARADIABOT": "#a10000",
"ARADIASPRITE": "#a10000",
"AA": "#a10000",
"CAA": "#a10000",
"FAA": "#a10000",
"PAA": "#a10000",
"KARKAT": "#626262",
"CG": "#626262",
"CCG": "#626262",
"FCG": "#626262",
"FCG2": "#626262",
"PCG": "#626262",
"PCG2": "#626262",
"PCG3": "#626262",
"PCG4": "#626262",
"PCG5": "#626262",
"PCG6": "#626262",
"PCG7": "#626262",
"PCG8": "#626262",
"?CG": "#626262",
"JAKE": "#1f9400",
"GT": "#1f9400",
"JADE": "#4ac925",
"GG": "#4ac925",
"?GG": "#4ac925",
"JASPERSPRITE": "#f141ef",
"TAVRISPRITE": "#0715cd",
"ARANEA": "#005682",
"NANNASPRITE": "#00d5f2",
"NANNASPRITEx2": "#00d5f2",
"JADESPRITE": "#1f9400",
"CALLIOPE": "#929292",
"UU": "#929292",
"ERISOLSPRITE": "#4ac925",
"DAVE": "#e00707",
"TG": "#e00707",
"CTG": "#e00707",
"?TG": "#e00707",
"SOLLUX": "#a1a100",
"TA": "#a1a100",
"PTA": "#a1a100",
"CTA": "#a1a100",
"NEPETA": "#416600",
"NEPETASPRITE": "#416600",
"AC": "#416600",
"CAC": "#416600",
"FAC": "#416600",
"TEREZI": "#008282",
"GC": "#008282",
"FGC": "#008282",
"PGC": "#008282",
"CGC": "#008282",
"EQUIUS": "#000056",
"EQUIUSPRITE": "#000056",
"FCT": "#000056",
"CT": "#000056",
"CCT": "#000056",
"TAVROS": "#a15000",
"TAVROSPRITE": "#a15000",
"AT": "#a15000",
"PAT": "#a15000",
"ROXY": "#ff6ff2",
"ARQUIUSPRITE": "#e00707",
"MEENAH": "#77003c",
"JASPROSESPRITE^2": "#b536da",
"pipefan413": "#4b4b4b",
"DIRK": "#f2a400",
"JANE": "#00d5f2",
"DAVESPRITE": "#f2a400",
"KANAYA": "#008141",
"KANAYA?": "#008141",
"GA": "#008141",
"CGA": "#008141",
"FGA": "#008141",
"fedorafreak": "#4b4b4b",
"JOHN": "#0715cd",
"EB": "#0715cd",
"CEB": "#0715cd",
"ERIDAN": "#6a006a",
"CA": "#6a006a",
"FCA": "#6a006a",
"PCA": "#6a006a",
"GCATAVROSPRITE": "#0715cd",
")(IC": "#77003c",
"uu": "#323232",
}
# Ignore any lines which contain any of the following strings
IGNORE_LINES = [
"|PESTERLOG|", "|DIALOGLOG|", "|SPRITELOG|", "|TRKSTRLOG|", "|SRIOUSBIZ|",
"|JOURNALOG|", "|AUTHORLOG|"
]
# The images are downloaded from the URL http://cdn.mspaintadventures.com/storyfiles/hs2/XXXXX.gif
# starting from IMAGE_START to IMAGE_END
IMAGE_START = 1
IMAGE_END = 8074
STATIC_ROOT = '%s/static' % os.path.realpath("%s/../../" % __file__),
# Where to save the loaded images
IMAGE_PATH = "%s/img/hs2" % STATIC_ROOT
| Matoking/Markovstuck | generator/settings.py | Python | unlicense | 5,200 |
# Copyright (C) 2021 Open Source Integrators
# Copyright (C) 2021 Serpent Consulting Services
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
{
"name": "Field Service with Operating Units",
"summary": """
This module adds operating unit information to Field Service orders.""",
"version": "14.0.1.0.0",
"author": "Open Source Integrators, "
"Serpent Consulting Services Pvt. Ltd.,"
"Odoo Community Association (OCA)",
"website": "https://github.com/OCA/operating-unit",
"category": "Field Service",
"depends": ["operating_unit", "fieldservice"],
"license": "AGPL-3",
"data": [
"security/fieldservice_security.xml",
"views/fsm_order.xml",
],
"installable": True,
"development_status": "Beta",
"maintainers": [
"max3903",
],
}
| OCA/operating-unit | fieldservice_operating_unit/__manifest__.py | Python | agpl-3.0 | 838 |
# Copyright 2014 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Batch updates / deletes of storage buckets / blobs.
See https://cloud.google.com/storage/docs/json_api/v1/how-tos/batch
"""
from email.encoders import encode_noop
from email.generator import Generator
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from email.parser import Parser
import io
import json
import requests
import six
from google.cloud import _helpers
from google.cloud import exceptions
from google.cloud.storage._http import Connection
class MIMEApplicationHTTP(MIMEApplication):
"""MIME type for ``application/http``.
Constructs payload from headers and body
:type method: str
:param method: HTTP method
:type uri: str
:param uri: URI for HTTP request
:type headers: dict
:param headers: HTTP headers
:type body: str
:param body: (Optional) HTTP payload
"""
def __init__(self, method, uri, headers, body):
if isinstance(body, dict):
body = json.dumps(body)
headers["Content-Type"] = "application/json"
headers["Content-Length"] = len(body)
if body is None:
body = ""
lines = ["%s %s HTTP/1.1" % (method, uri)]
lines.extend(
["%s: %s" % (key, value) for key, value in sorted(headers.items())]
)
lines.append("")
lines.append(body)
payload = "\r\n".join(lines)
if six.PY2:
# email.message.Message is an old-style class, so we
# cannot use 'super()'.
MIMEApplication.__init__(self, payload, "http", encode_noop)
else: # pragma: NO COVER Python3
super_init = super(MIMEApplicationHTTP, self).__init__
super_init(payload, "http", encode_noop)
class _FutureDict(object):
"""Class to hold a future value for a deferred request.
Used by for requests that get sent in a :class:`Batch`.
"""
@staticmethod
def get(key, default=None):
"""Stand-in for dict.get.
:type key: object
:param key: Hashable dictionary key.
:type default: object
:param default: Fallback value to dict.get.
:raises: :class:`KeyError` always since the future is intended to fail
as a dictionary.
"""
raise KeyError("Cannot get(%r, default=%r) on a future" % (key, default))
def __getitem__(self, key):
"""Stand-in for dict[key].
:type key: object
:param key: Hashable dictionary key.
:raises: :class:`KeyError` always since the future is intended to fail
as a dictionary.
"""
raise KeyError("Cannot get item %r from a future" % (key,))
def __setitem__(self, key, value):
"""Stand-in for dict[key] = value.
:type key: object
:param key: Hashable dictionary key.
:type value: object
:param value: Dictionary value.
:raises: :class:`KeyError` always since the future is intended to fail
as a dictionary.
"""
raise KeyError("Cannot set %r -> %r on a future" % (key, value))
class _FutureResponse(requests.Response):
"""Reponse that returns a placeholder dictionary for a batched requests."""
def __init__(self, future_dict):
super(_FutureResponse, self).__init__()
self._future_dict = future_dict
self.status_code = 204
def json(self):
return self._future_dict
@property
def content(self):
return self._future_dict
class Batch(Connection):
"""Proxy an underlying connection, batching up change operations.
:type client: :class:`google.cloud.storage.client.Client`
:param client: The client to use for making connections.
"""
_MAX_BATCH_SIZE = 1000
def __init__(self, client):
super(Batch, self).__init__(client)
self._requests = []
self._target_objects = []
def _do_request(self, method, url, headers, data, target_object, timeout=None):
"""Override Connection: defer actual HTTP request.
Only allow up to ``_MAX_BATCH_SIZE`` requests to be deferred.
:type method: str
:param method: The HTTP method to use in the request.
:type url: str
:param url: The URL to send the request to.
:type headers: dict
:param headers: A dictionary of HTTP headers to send with the request.
:type data: str
:param data: The data to send as the body of the request.
:type target_object: object
:param target_object:
(Optional) This allows us to enable custom behavior in our batch
connection. Here we defer an HTTP request and complete
initialization of the object at a later time.
:type timeout: float or tuple
:param timeout: (optional) The amount of time, in seconds, to wait
for the server response. By default, the method waits indefinitely.
Can also be passed as a tuple (connect_timeout, read_timeout).
See :meth:`requests.Session.request` documentation for details.
:rtype: tuple of ``response`` (a dictionary of sorts)
and ``content`` (a string).
:returns: The HTTP response object and the content of the response.
"""
if len(self._requests) >= self._MAX_BATCH_SIZE:
raise ValueError(
"Too many deferred requests (max %d)" % self._MAX_BATCH_SIZE
)
self._requests.append((method, url, headers, data, timeout))
result = _FutureDict()
self._target_objects.append(target_object)
if target_object is not None:
target_object._properties = result
return _FutureResponse(result)
def _prepare_batch_request(self):
"""Prepares headers and body for a batch request.
:rtype: tuple (dict, str)
:returns: The pair of headers and body of the batch request to be sent.
:raises: :class:`ValueError` if no requests have been deferred.
"""
if len(self._requests) == 0:
raise ValueError("No deferred requests")
multi = MIMEMultipart()
# Use timeout of last request, default to None (indefinite)
timeout = None
for method, uri, headers, body, _timeout in self._requests:
subrequest = MIMEApplicationHTTP(method, uri, headers, body)
multi.attach(subrequest)
timeout = _timeout
# The `email` package expects to deal with "native" strings
if six.PY3: # pragma: NO COVER Python3
buf = io.StringIO()
else:
buf = io.BytesIO()
generator = Generator(buf, False, 0)
generator.flatten(multi)
payload = buf.getvalue()
# Strip off redundant header text
_, body = payload.split("\n\n", 1)
return dict(multi._headers), body, timeout
def _finish_futures(self, responses):
"""Apply all the batch responses to the futures created.
:type responses: list of (headers, payload) tuples.
:param responses: List of headers and payloads from each response in
the batch.
:raises: :class:`ValueError` if no requests have been deferred.
"""
# If a bad status occurs, we track it, but don't raise an exception
# until all futures have been populated.
exception_args = None
if len(self._target_objects) != len(responses): # pragma: NO COVER
raise ValueError("Expected a response for every request.")
for target_object, subresponse in zip(self._target_objects, responses):
if not 200 <= subresponse.status_code < 300:
exception_args = exception_args or subresponse
elif target_object is not None:
try:
target_object._properties = subresponse.json()
except ValueError:
target_object._properties = subresponse.content
if exception_args is not None:
raise exceptions.from_http_response(exception_args)
def finish(self):
"""Submit a single `multipart/mixed` request with deferred requests.
:rtype: list of tuples
:returns: one ``(headers, payload)`` tuple per deferred request.
"""
headers, body, timeout = self._prepare_batch_request()
url = "%s/batch/storage/v1" % self.API_BASE_URL
# Use the private ``_base_connection`` rather than the property
# ``_connection``, since the property may be this
# current batch.
response = self._client._base_connection._make_request(
"POST", url, data=body, headers=headers, timeout=timeout
)
responses = list(_unpack_batch_response(response))
self._finish_futures(responses)
return responses
def current(self):
"""Return the topmost batch, or None."""
return self._client.current_batch
def __enter__(self):
self._client._push_batch(self)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
try:
if exc_type is None:
self.finish()
finally:
self._client._pop_batch()
def _generate_faux_mime_message(parser, response):
"""Convert response, content -> (multipart) email.message.
Helper for _unpack_batch_response.
"""
# We coerce to bytes to get consistent concat across
# Py2 and Py3. Percent formatting is insufficient since
# it includes the b in Py3.
content_type = _helpers._to_bytes(response.headers.get("content-type", ""))
faux_message = b"".join(
[b"Content-Type: ", content_type, b"\nMIME-Version: 1.0\n\n", response.content]
)
if six.PY2:
return parser.parsestr(faux_message)
else: # pragma: NO COVER Python3
return parser.parsestr(faux_message.decode("utf-8"))
def _unpack_batch_response(response):
"""Convert requests.Response -> [(headers, payload)].
Creates a generator of tuples of emulating the responses to
:meth:`requests.Session.request`.
:type response: :class:`requests.Response`
:param response: HTTP response / headers from a request.
"""
parser = Parser()
message = _generate_faux_mime_message(parser, response)
if not isinstance(message._payload, list): # pragma: NO COVER
raise ValueError("Bad response: not multi-part")
for subrequest in message._payload:
status_line, rest = subrequest._payload.split("\n", 1)
_, status, _ = status_line.split(" ", 2)
sub_message = parser.parsestr(rest)
payload = sub_message._payload
msg_headers = dict(sub_message._headers)
content_id = msg_headers.get("Content-ID")
subresponse = requests.Response()
subresponse.request = requests.Request(
method="BATCH", url="contentid://{}".format(content_id)
).prepare()
subresponse.status_code = int(status)
subresponse.headers.update(msg_headers)
subresponse._content = payload.encode("utf-8")
yield subresponse
| tswast/google-cloud-python | storage/google/cloud/storage/batch.py | Python | apache-2.0 | 11,762 |
# This file is part of Shuup.
#
# Copyright (c) 2012-2017, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
from shuup.apps import AppConfig
class AuthAppConfig(AppConfig):
name = "shuup.front.apps.auth"
verbose_name = "Shuup Frontend - User Authentication"
label = "shuup_front.auth"
provides = {
"front_urls": [
"shuup.front.apps.auth.urls:urlpatterns"
],
}
default_app_config = "shuup.front.apps.auth.AuthAppConfig"
| suutari-ai/shoop | shuup/front/apps/auth/__init__.py | Python | agpl-3.0 | 594 |
import numpy as np
from scipy import sparse
from . import Mapper
from . import samplers
class VolumeMapper(Mapper):
@classmethod
def _cache(cls, filename, subject, xfmname, **kwargs):
from .. import db
masks = []
xfm = db.get_xfm(subject, xfmname, xfmtype='coord')
pia = db.get_surf(subject, "pia", merge=False, nudge=False)
wm = db.get_surf(subject, "wm", merge=False, nudge=False)
#iterate over hemispheres
for (wpts, polys), (ppts, _) in zip(pia, wm):
masks.append(cls._getmask(xfm(ppts), xfm(wpts), polys, xfm.shape, **kwargs))
_savecache(filename, masks[0], masks[1], xfm.shape)
return cls(masks[0], masks[1], xfm.shape, subject, xfmname)
@classmethod
def _getmask(cls, pia, wm, polys, shape, **kwargs):
from .. import mp
rand = np.random.rand(npts, 3)
csrshape = len(wm), np.prod(shape)
def func(pts):
if len(pts) > 0:
#generate points within the bounding box
samples = rand * (pts.max(0) - pts.min(0)) + pts.min(0)
#check which points are inside the polyhedron
inside = polyutils.inside_convex_poly(pts)(samples)
return cls._sample(samples[inside], shape, np.sum(inside))
surf = polyutils.Surface(pia, polys)
samples = mp.map(func, surf.polyconvex(wm))
#samples = map(func, surf.polyconvex(wm)) ## For debugging
ij, data = [], []
for i, sample in enumerate(samples):
if sample is not None:
idx = np.zeros((2, len(sample[0])))
idx[0], idx[1] = i, sample[0]
ij.append(idx)
data.append(sample[1])
return sparse.csr_matrix((np.hstack(data), np.hstack(ij)), shape=csrshape)
class PolyConstMapper(VolumeMapper):
patchsize = 0.5
class PolyLinMapper(VolumeMapper):
patchsize = 1
class Polyhedral(VolumeMapper):
'''Uses an actual (likely concave) polyhedra betwen the pial and white surfaces
to estimate the thickness'''
@staticmethod
def _getmask(pia, wm, polys, shape):
from .. import polyutils
mask = sparse.csr_matrix((len(wm), np.prod(shape)))
from tvtk.api import tvtk
measure = tvtk.MassProperties()
planes = tvtk.PlaneCollection()
for norm in np.vstack([-np.eye(3), np.eye(3)]):
planes.append(tvtk.Plane(normal=norm))
ccs = tvtk.ClipClosedSurface(clipping_planes=planes)
feats = tvtk.FeatureEdges(boundary_edges=1, non_manifold_edges=0, manifold_edges=0, feature_edges=0)
feats.set_input(ccs.output)
surf = polyutils.Surface(pia, polys)
for i, (pts, faces) in enumerate(surf.polyhedra(wm)):
if len(pts) > 0:
poly = tvtk.PolyData(points=pts, polys=faces)
measure.set_input(poly)
measure.update()
totalvol = measure.volume
ccs.set_input(poly)
measure.set_input(ccs.output)
bmin = pts.min(0).round().astype(int)
bmax = (pts.max(0).round() + 1).astype(int)
vidx = np.mgrid[bmin[0]:bmax[0], bmin[1]:bmax[1], bmin[2]:bmax[2]]
for vox in vidx.reshape(3, -1).T:
try:
idx = np.ravel_multi_index(vox[::-1], shape)
for plane, m in zip(planes, [.5, .5, .5, -.5, -.5, -.5]):
plane.origin = vox+m
ccs.update()
if ccs.output.number_of_cells > 2:
measure.update()
mask[i, idx] = measure.volume
except ValueError:
print('Voxel not in volume: (%d, %d, %d)'%tuple(vox))
mask.data[mask.indptr[i]:mask.indptr[i+1]] /= mask[i].sum()
return mask
class ConvexPolyhedra(VolumeMapper):
@classmethod
def _getmask(cls, pia, wm, polys, shape, npts=1024):
from .. import mp
from .. import polyutils
rand = np.random.rand(npts, 3)
csrshape = len(wm), np.prod(shape)
def func(pts):
if len(pts) > 0:
#generate points within the bounding box
samples = rand * (pts.max(0) - pts.min(0)) + pts.min(0)
#check which points are inside the polyhedron
inside = polyutils.inside_convex_poly(pts)(samples)
return cls._sample(samples[inside], shape, np.sum(inside))
surf = polyutils.Surface(pia, polys)
samples = mp.map(func, surf.polyconvex(wm))
#samples = map(func, surf.polyconvex(wm)) ## For debugging
ij, data = [], []
for i, sample in enumerate(samples):
if sample is not None:
idx = np.zeros((2, len(sample[0])))
idx[0], idx[1] = i, sample[0]
ij.append(idx)
data.append(sample[1])
return sparse.csr_matrix((np.hstack(data), np.hstack(ij)), shape=csrshape)
class ConvexNN(VolumeMapper):
@staticmethod
def _sample(pts, shape, norm):
coords = pts.round().astype(int)[:,::-1]
d1 = np.logical_and(0 <= coords[:,0], coords[:,0] < shape[0])
d2 = np.logical_and(0 <= coords[:,1], coords[:,1] < shape[1])
d3 = np.logical_and(0 <= coords[:,2], coords[:,2] < shape[2])
valid = np.logical_and(d1, np.logical_and(d2, d3))
if valid.any():
idx = np.ravel_multi_index(coords[valid].T, shape)
j, data = np.array(Counter(idx).items()).T
return j, data / float(norm)
class ConvexTrilin(VolumeMapper):
@staticmethod
def _sample(pts, shape, norm):
(x, y, z), floor = np.modf(pts.T)
floor = floor.astype(int)
ceil = floor + 1
x[x < 0] = 0
y[y < 0] = 0
z[z < 0] = 0
i000 = np.ravel_multi_index((floor[2], floor[1], floor[0]), shape, mode='clip')
i100 = np.ravel_multi_index((floor[2], floor[1], ceil[0]), shape, mode='clip')
i010 = np.ravel_multi_index((floor[2], ceil[1], floor[0]), shape, mode='clip')
i001 = np.ravel_multi_index(( ceil[2], floor[1], floor[0]), shape, mode='clip')
i101 = np.ravel_multi_index(( ceil[2], floor[1], ceil[0]), shape, mode='clip')
i011 = np.ravel_multi_index(( ceil[2], ceil[1], floor[0]), shape, mode='clip')
i110 = np.ravel_multi_index((floor[2], ceil[1], ceil[0]), shape, mode='clip')
i111 = np.ravel_multi_index(( ceil[2], ceil[1], ceil[0]), shape, mode='clip')
v000 = (1-x)*(1-y)*(1-z)
v100 = x*(1-y)*(1-z)
v010 = (1-x)*y*(1-z)
v110 = x*y*(1-z)
v001 = (1-x)*(1-y)*z
v101 = x*(1-y)*z
v011 = (1-x)*y*z
v111 = x*y*z
allj = np.vstack([i000, i100, i010, i001, i101, i011, i110, i111]).T.ravel()
data = np.vstack([v000, v100, v010, v001, v101, v011, v110, v111]).T.ravel()
uniquej = np.unique(allj)
uniquejdata = np.array([data[allj==j].sum() for j in uniquej])
return uniquej, uniquejdata / float(norm)
class ConvexLanczos(VolumeMapper):
def _sample(self, pts):
raise NotImplementedError
| gallantlab/pycortex | cortex/mapper/volume.py | Python | bsd-2-clause | 7,345 |
# Copyright (c) 2010-2011, Found IT A/S and Piped Project Contributors.
# See LICENSE for details.
import re
import sys
from twisted.spread import pb, jelly, flavors
from piped import util
def unwrap_wrapped_text(text):
""" Unwraps multi-line strings, but keeps paragraphs separated by
two or more newlines separated by newlines.
"""
result = []
_re_indentation = re.compile(r'^[ \t]*|[ \t]*$', re.MULTILINE)
for paragraph in re.split(r'\n{2,}', text):
paragraph = _re_indentation.sub('', paragraph)
result.append(re.sub(r'\n', ' ', paragraph))
return '\n'.join(result)
class PipedError(Exception):
""" Base class for exceptions raised by our code.
All errors must have a primary error message *msg*. This should
be a short and sweet description of the problem.
Any exceptions that can be exposed to users should also have a
*detail* message, and if appropriate, a *hint* message that
explains how to solve the problem. Mind that developers using a
public API are also considered "users" in this regard.
Please see the error message guidelines.
:todo: Write error message guidelines and provide a
cross-reference. :-)
"""
kind = u'ERROR'
def __init__(self, msg, detail=None, hint=None):
self.msg = msg
self.detail = detail
self.hint = hint
self.args = (msg, detail, hint)
@classmethod
def _cleanup(cls, text):
# So we want the messages to be Unicode. However, if that
# fails, we can't fail here with a UnicodeError, as that would
# mask the real error --- thus errors='replace'.
#
# We unwrap the text, since we're often provided with wrapped
# multi line strings.
return util.ensure_unicode(unwrap_wrapped_text(text), errors='replace')
def __unicode__(self):
# We prefix even the primary message with a newline and ERROR,
# so it's easier to spot the short message in a huge-ish
# traceback.
result = [u'\n%s: %s' % (self.kind, self._cleanup(self.msg))]
if self.detail:
result.append(u'DETAIL: ' + self._cleanup(self.detail))
if self.hint:
result.append(u'HINT: ' + self._cleanup(self.hint))
return u'\n'.join(result)
def __str__(self):
return self.__unicode__().encode('utf8')
def __repr__(self):
return (u"%s: %s" % (type(self).__name__, self.msg)).encode('utf8')
class PipedWarning(PipedError, UserWarning):
kind = u'WARNING'
class MissingCallback(PipedError):
""" Raised when pipeline processing does not result in a callback/errback. """
class ForcedError(PipedError):
""" Used when we are forcing an exception to be raised.
Used in unit tests.
"""
class UnsafeThreadingError(PipedError):
""" Used when we are using possible dangerous threading without specifying
that we really want to. """
class ValidatorError(PipedError):
""" Used when the validator-class-decorator detects invalid values. """
class ConfigurationError(PipedError):
""" An exception related to the configuration. """
class ConfigurationWarning(PipedWarning):
""" A warning related to the configuration. """
class ConfigurationNotLoaded(ConfigurationError):
""" Raised when the configuration is used before being loaded. """
class ConfigurationAlreadyLoaded(ConfigurationError):
""" Raised when the configuration is loaded twice and no reload is specified. """
class InvalidConfigurationError(ConfigurationError):
""" Raised when an invalid configuratin causes an error that can be
remedied by fixing the configuration."""
class ProcessorGraphError(PipedError):
""" Raised when a pipeline is not well-defined and -configured. """
class InvalidPipelineError(PipedError):
""" Raise when the provided pipeline does not exist. """
class PluginError(PipedError):
""" Plugin-related errors. """
class ResourceError(PipedError):
""" Base class for exceptions in the resource system. """
class ReplayedLost(ResourceError):
""" Used when an on_lost is replayed to a consumer in order to avoid
having to store the arguments of the previous on_lost call
"""
class InitiallyLost(ResourceError):
""" Raised when an ResourceDependency is resolved for the first time,
ensuring any consumer are notified that this resource is currently
unprovided.
"""
class ProviderConflict(ResourceError):
""" Raised when a resource is attempted provided by multiple
providers. """
class UnprovidedResourceError(ResourceError):
""" Raised when a requested resource cannot be satisfied. """
class UnsatisfiedDependencyError(ResourceError):
""" Raised when an unsatisfied dependency is attempted used. """
class CircularDependencyGraph(ResourceError):
""" Raised when adding a dependency to the dependency graph would
cause the graph to contain a cycle, which is unresolvable. """
class AlreadyExistingDependency(ResourceError):
""" Raised when an already existing dependency is being added. """
class KeyAlreadyExists(ResourceError):
""" Raised when a `.dependency.DependencyMap` receives multiple dependencies with
the same key. """
class AllPipelinesFailedError(PipedError):
""" Raised when a result from any pipeline is expected, but all failed.
:ivar failures: List of failures.
"""
def __init__(self, e_msg, failures, **kw):
super(AllPipelinesFailedError, self).__init__(e_msg, **kw)
self.failures = failures
class TimeoutError(PipedError):
""" Something timed out. """
| alexbrasetvik/Piped | piped/exceptions.py | Python | mit | 5,663 |
# File ex2.py
#
# Author: Zak Fallows (zakf@mit.edu)
# Copyright 2013
# Released for free use under the terms of the MIT License, see license.txt
#
# Demonstrates Python function call overhead.
#
# Notation: _ipy means "interpreted Python".
# _py means "untyped Python function, maybe compiled by Cython".
# _c means "C function, statically typed".
import time
import ex2_u
import ex2_t
#============================= Interpreted Python =============================#
def inner_ipy():
return 2
def outer_ipy(n):
for iii in range(n):
inner_ipy()
return 0
#================================= Test Speed =================================#
# NOTE: This is a very naive and unsophisticated way to benchmark Python and
# Cython code. Cython has fantastic tools for benchmarking code and it gives
# you hints for how to optimize. I am using the simplest method possible in
# this educational example so that it is easy to understand. The results
# may be slightly less accurate, but they are close enough.
def print_results(mode, t0, t1, dt_base=None, result=None):
dt_curr = t1 - t0
print "%s:" % mode
if result:
print " Result = %s" % result
print " Time: %s seconds" % dt_curr
if dt_base != None:
print " Ratio: %.3f" % (dt_base / dt_curr)
print ''
return dt_curr
def run_tests():
t0 = time.time()
r0 = outer_ipy(10**7)
t1 = time.time()
dt_ipy = print_results("Interpreted Python", t0, t1)
t0 = time.time()
r0 = ex2_u.outer_u(10**7)
t1 = time.time()
dt_u = print_results("Untyped Cython", t0, t1, dt_ipy)
t0 = time.time()
r0 = ex2_t.outer_py(10**7)
t1 = time.time()
dt_u = print_results("Typed Cython, Python Function", t0, t1, dt_ipy)
t0 = time.time()
r0 = ex2_t.outer_t(10**7)
t1 = time.time()
dt_t = print_results("Typed Cython, C Function", t0, t1, dt_ipy)
print "# That is an implausibly large speed boost."
print "# By inspecting the C code output by Cython, I can see that the "
print "# C code really does call the function 10,000,000 times. So the "
print "# speed boost probably comes from either the compiler inlining "
print "# the function, or more likely, the compiler (in my case GCC) "
print "# noticed that the result is unused so it completely skipped "
print "# this piece of code, it generated no corresponding machine code "
print "# at all. GCC is too smart for us!"
print ''
t0 = time.time()
r0 = ex2_t.outer2_t(10**7)
t1 = time.time()
dt_t = print_results("Typed Cython, C Function, Fudged", t0, t1, dt_ipy)
# Do it on import, because it is convenient:
run_tests()
| zakf/cython_talk | ex2.py | Python | mit | 2,718 |
from twisted.internet import defer
from twisted.protocols.basic import NetstringReceiver
from twisted.python import failure, log
from txjason import protocol, client
class JSONRPCClientProtocol(NetstringReceiver):
"""
A JSON RPC Client Protocol for TCP/Netstring connections.
"""
def __init__(self, factory):
self.factory = factory
self.deferred = defer.Deferred()
def stringReceived(self, string):
try:
self.factory.client.handleResponse(string)
except client.JSONRPCProtocolError:
log.err()
self.transport.loseConnection()
except:
log.err()
def connectionLost(self, reason):
if self.brokenPeer:
log.msg('Disconencted from server because of a broken peer.')
else:
log.msg('Lost server connection.')
self.deferred.errback(reason)
class JSONRPCServerProtocol(NetstringReceiver):
"""
A JSON RPC Server Protocol for TCP/Netstring connections.
"""
def __init__(self, service):
self.service = service
@defer.inlineCallbacks
def stringReceived(self, string):
result = yield self.service.call(string)
if result is not None:
self.sendString(result)
class JSONRPCClientFactory(protocol.BaseClientFactory):
def __init__(self, endpoint, timeout=5, reactor=None):
if reactor is None:
from twisted.internet import reactor
self.client = client.JSONRPCClient(timeout=timeout, reactor=reactor)
self.endpoint = endpoint
self._proto = None
self._waiting = []
self._notifyOnDisconnect = []
self._connecting = False
self._connectionDeferred = None
self.reactor = reactor
def buildProtocol(self, addr):
return JSONRPCClientProtocol(self)
def _cancel(self, d):
if self._connectionDeferred is not None:
self._connectionDeferred.cancel()
def _getConnection(self):
if self._proto is not None:
return defer.succeed(self._proto)
d = defer.Deferred(self._cancel)
self._waiting.append(d)
if not self._connecting:
self._connecting = True
self._connectionDeferred = (
self.endpoint.connect(self)
.addBoth(self._gotResult)
.addErrback(log.err, 'error connecting %r' % (self,)))
return d
def _gotResult(self, result):
self._connecting = False
if not isinstance(result, failure.Failure):
self._proto = result
self._proto.deferred.addErrback(self._lostProtocol)
waiting, self._waiting = self._waiting, []
for d in waiting:
d.callback(result)
return result
def _lostProtocol(self, reason):
log.err(reason, '%r disconnected' % (self,))
deferreds, self._notifyOnDisconnect = self._notifyOnDisconnect, []
for d in deferreds:
d.errback(reason)
self._proto = None
self.client.cancelRequests()
def callRemote(self, __method, *args, **kwargs):
connectionDeferred = self._getConnection()
def gotConnection(connection):
payload, requestDeferred = self.client.getRequest(
__method, *args, **kwargs)
connection.sendString(payload)
return requestDeferred
connectionDeferred.addCallback(gotConnection)
return connectionDeferred
def notifyRemote(self, __method, *args, **kwargs):
connectionDeferred = self._getConnection()
def gotConnection(connection):
payload = self.client.getNotification(__method, *args, **kwargs)
connection.sendString(payload)
connectionDeferred.addCallback(gotConnection)
return connectionDeferred
def connect(self):
return self._getConnection().addCallback(lambda ign: None)
def disconnect(self):
if self._proto:
self._proto.transport.abortConnection()
elif self._connecting:
self._connectionDeferred.cancel()
def notifyDisconnect(self):
d = defer.Deferred()
self._notifyOnDisconnect.append(d)
return d
class JSONRPCServerFactory(protocol.BaseServerFactory):
protocol = JSONRPCServerProtocol
| flowroute/txjason | txjason/netstring.py | Python | mit | 4,336 |
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# all_tests.py
# Copyright (C) 2014-2016 Fracpete (pythonwekawrapper at gmail dot com)
import unittest
import weka.core.jvm as jvm
import wekatests.coretests.capabilities
import wekatests.coretests.classes
import wekatests.coretests.converters
import wekatests.coretests.dataset
import wekatests.coretests.serialization
import wekatests.coretests.stemmers
import wekatests.coretests.stopwords
import wekatests.coretests.tokenizers
import wekatests.coretests.typeconv
import wekatests.coretests.version
"""
Executes all available tests for `weka.core`.
Add additional test suites to the `suite()` method.
"""
def suite():
"""
Returns the test suite.
:return: the test suite
:rtype: unittest.TestSuite
"""
result = unittest.TestSuite()
result.addTests(wekatests.coretests.capabilities.suite())
result.addTests(wekatests.coretests.classes.suite())
result.addTests(wekatests.coretests.converters.suite())
result.addTests(wekatests.coretests.dataset.suite())
result.addTests(wekatests.coretests.serialization.suite())
result.addTests(wekatests.coretests.stemmers.suite())
result.addTests(wekatests.coretests.stopwords.suite())
result.addTests(wekatests.coretests.tokenizers.suite())
result.addTests(wekatests.coretests.typeconv.suite())
result.addTests(wekatests.coretests.version.suite())
return result
if __name__ == '__main__':
jvm.start(packages=True)
unittest.TextTestRunner().run(suite())
jvm.stop()
| fracpete/python-weka-wrapper3 | tests/wekatests/coretests/all_tests.py | Python | gpl-3.0 | 2,123 |
# -*- coding: utf-8 -*-
##############################################################################
#
# This module uses OpenERP, Open Source Management Solution Framework.
# Copyright (C):
# 2012-Today Serpent Consulting Services (<http://www.serpentcs.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from openerp import SUPERUSER_ID
from openerp.osv import orm, fields
from openerp.tools.translate import _
class MassObject(orm.Model):
_name = "mass.object"
_columns = {
'name': fields.char("Name", size=64, required=True, select=1),
'model_id': fields.many2one(
'ir.model', 'Model', required=True, select=1),
'field_ids': fields.many2many(
'ir.model.fields', 'mass_field_rel', 'mass_id', 'field_id',
'Fields'),
'ref_ir_act_window': fields.many2one(
'ir.actions.act_window', 'Sidebar Action', readonly=True,
help="Sidebar action to make this template available on records \
of the related document model"),
'ref_ir_value': fields.many2one(
'ir.values', 'Sidebar Button', readonly=True,
help="Sidebar button to open the sidebar action"),
'model_ids': fields.many2many('ir.model', string='Model List')
}
_sql_constraints = [
('name_uniq', 'unique (name)', _('Name must be unique!')),
]
def onchange_model_id(self, cr, uid, ids, model_id, context=None):
if context is None:
context = {}
if not model_id:
return {'value': {'model_ids': [(6, 0, [])]}}
model_ids = [model_id]
model_obj = self.pool['ir.model']
active_model_obj = self.pool.get(model_obj.browse(
cr, uid, model_id).model)
if active_model_obj._inherits:
for key, val in active_model_obj._inherits.items():
found_model_ids = model_obj.search(
cr, uid, [('model', '=', key)], context=context)
model_ids += found_model_ids
return {'value': {'model_ids': [(6, 0, model_ids)]}}
def create_action(self, cr, uid, ids, context=None):
vals = {}
action_obj = self.pool['ir.actions.act_window']
ir_values_obj = self.pool['ir.values']
for data in self.browse(cr, uid, ids, context=context):
src_obj = data.model_id.model
button_name = _('Mass Editing (%s)') % data.name
vals['ref_ir_act_window'] = action_obj.create(
cr, SUPERUSER_ID,
{
'name': button_name,
'type': 'ir.actions.act_window',
'res_model': 'mass.editing.wizard',
'src_model': src_obj,
'view_type': 'form',
'context': "{'mass_editing_object' : %d}" % (data.id),
'view_mode': 'form,tree',
'target': 'new',
'auto_refresh': 1,
},
context)
vals['ref_ir_value'] = ir_values_obj.create(
cr, SUPERUSER_ID,
{
'name': button_name,
'model': src_obj,
'key2': 'client_action_multi',
'value': (
"ir.actions.act_window," +
str(vals['ref_ir_act_window'])),
'object': True,
},
context)
self.write(
cr, uid, ids,
{
'ref_ir_act_window': vals.get('ref_ir_act_window', False),
'ref_ir_value': vals.get('ref_ir_value', False),
},
context)
return True
def unlink_action(self, cr, uid, ids, context=None):
for template in self.browse(cr, uid, ids, context=context):
try:
if template.ref_ir_act_window:
act_window_obj = self.pool['ir.actions.act_window']
act_window_obj.unlink(
cr, SUPERUSER_ID, [template.ref_ir_act_window.id],
context=context)
if template.ref_ir_value:
ir_values_obj = self.pool['ir.values']
ir_values_obj.unlink(
cr, SUPERUSER_ID, template.ref_ir_value.id,
context=context)
except:
raise orm.except_orm(
_("Warning"),
_("Deletion of the action record failed."))
return True
def unlink(self, cr, uid, ids, context=None):
self.unlink_action(cr, uid, ids, context=context)
return super(MassObject, self).unlink(cr, uid, ids, context=context)
def copy(self, cr, uid, record_id, default=None, context=None):
if default is None:
default = {}
default.update({'name': '', 'field_ids': []})
return super(MassObject, self).copy(
cr, uid, record_id, default, context=context)
| MackZxh/OCA-Choice | server-tools/mass_editing/models/mass_object.py | Python | lgpl-3.0 | 5,755 |
#! /usr/bin/env python
items = " this is a test this is a test this is this this is a test this the quick brown fox jumps over the lazy dog under the bridge. twinkle twinkle little star how I wonder what you are. Queen and King of Hearts, this is the king and Queen of spades "
"""
# replace . with space
items = items.replace(".", " ")
# take out left and right space
b = a.strip(" ")
# take out left space
c = b.lstrip(" ")
print(c)
# take out remaining space
d = c.split(" ")
print(d)
"""
# replace . with space
items = items.replace(".", " ")
# replace to lowercase
items = items.lower()
##
#To remove all whitespace characters (space, tab, newline, and so on) you can use split then join:
items = ' '.join(items.split())
items = items.split(" ")
print( items )
# count and put in dictionary
counts = dict()
for i in items:
counts[i] = counts.get(i, 0) + 1
#print counts
# sort out from counts in decreasing order
for w in sorted(counts, key=counts.get, reverse=True):
# print w, d[w]
print w, counts[w]
| klancer/python102 | wordcount.py | Python | mit | 1,029 |
from sklearn import datasets
import pandas as pd
import numpy as np
ds = datasets.load_breast_cancer();
from sklearn.tree import DecisionTreeClassifier
clf = DecisionTreeClassifier(min_samples_leaf=30, random_state = 1960)
NC = 12
X = ds.data[:,0:NC]
y = ds.target
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=1960)
clf.fit(X_train , y_train)
import sklearn_explain.explainer as expl
lExplainer = expl.cModelScoreExplainer(clf)
lExplainer.mSettings.mFeatureNames = ds.feature_names[0:NC]
lExplainer.mSettings.mExplanationOrder = 1
lExplainer.fit(X_train)
df_rc = lExplainer.explain(X_test)
print(df_rc.columns)
df_rc_1 = lExplainer.explain(X_test[0].reshape(1, -1))
print(df_rc_1[[col for col in df_rc_1.columns if col.startswith('detailed')]])
# Explain the score = ln(p(1) / (1 - p(1)))
lFeature_Quantiles = {
'mean area': {0: -np.inf,
1: 571.85},
'mean concave points': {0: -np.inf,
1: 0.51},
'mean perimeter': {0: -np.inf,
1: 98.31},
'radius error': {0: -np.inf,
1: 0.354}
}
lScore_Quantiles = {
0: -np.inf,
1: 0.
}
lExplainer2 = expl.cModelScoreExplainer(clf)
lExplainer2.mSettings.mFeatureNames = ds.feature_names[0:NC]
lExplainer2.mSettings.mCustomFeatureQuantiles = lFeature_Quantiles
lExplainer2.mSettings.mCustomScoreQuantiles = lScore_Quantiles
lExplainer2.mSettings.mExplanationOrder = 1
lExplainer2.fit(X_train)
df_rc2 = lExplainer2.explain(X_test)
print(df_rc2.columns)
df_rc_2 = lExplainer2.explain(X_test[0].reshape(1, -1))
print(df_rc_2[[col for col in df_rc_2.columns if col.startswith('detailed')]])
| antoinecarme/sklearn_explain | tests/issues/issue_8/issue_8_3.py | Python | bsd-3-clause | 1,657 |
#!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing the view for GCI tasks list page.
"""
from soc.logic.exceptions import AccessViolation
from soc.views.helper import url_patterns
from soc.views.helper import lists
from soc.views.template import Template
from soc.modules.gci.logic import task as task_logic
from soc.modules.gci.models.task import CLAIMABLE
from soc.modules.gci.models.task import GCITask
from soc.modules.gci.views.base import RequestHandler
from soc.modules.gci.views.helper.url_patterns import url
class TaskList(Template):
"""Template for list of tasks.
"""
def __init__(self, request, data, columns):
self.request = request
self.data = data
r = data.redirect
self._list_config = lists.ListConfiguration()
if 'title' in columns:
self._addTitleColumn()
if 'organization' in columns:
self._addOrganizationColumn()
if 'mentors' in columns:
self._addMentorsColumn()
if 'status' in columns:
self._addStatusColumn()
#list_config.addColumn(
# 'task_type', 'Type',
# lambda entity, _, all_d, all_t, *args: entity.taskType(all_t))
#list_config.addColumn('time_to_complete', 'Time to complete',
# lambda entity, *args: entity.taskTimeToComplete())
self._list_config.setRowAction(
lambda e, *args: r.id(e.key().id()).urlOf('gci_view_task'))
def context(self):
description = 'List of tasks for %s' % (
self.data.program.name)
task_list = lists.ListConfigurationResponse(
self.data, self._list_config, 0, description)
return {
'lists': [task_list],
}
def getListData(self):
idx = lists.getListIndex(self.request)
if idx == 0:
q = self._getQueryForTasks()
starter = lists.keyStarter
prefetcher = lists.listModelPrefetcher(
GCITask, ['org'], ['mentors'])
response_builder = lists.RawQueryContentResponseBuilder(
self.request, self._list_config, q,
starter=starter, prefetcher=prefetcher)
return response_builder.build()
else:
return None
def templatePath(self):
return 'v2/modules/gci/task/_task_list.html'
def _addMentorsColumn(self):
self._list_config.addColumn('mentors', 'Mentors',
lambda entity, mentors, *args: ', '.join(
mentors[i].name() for i in entity.mentors))
def _addOrganizationColumn(self):
self._list_config.addColumn(
'org', 'Organization', lambda entity, *args: entity.org.name)
def _addStatusColumn(self):
self._list_config.addSimpleColumn('status', 'Status')
def _addTitleColumn(self):
self._list_config.addSimpleColumn('title', 'Title')
def _getQueryForTasks(self):
raise NotImplementedError
class AllTasksList(TaskList):
"""Template for list of all tasks which are claimable for the program.
"""
_LIST_COLUMNS = ['title', 'organization', 'mentors', 'status']
def __init__(self, request, data):
super(AllTasksList, self).__init__(request, data, self._LIST_COLUMNS)
def _getQueryForTasks(self):
return task_logic.queryClaimableTasksForProgram(self.data.program)
class TaskListPage(RequestHandler):
"""View for the list task page.
"""
TASK_LIST_COLUMNS = ['title', 'organization', 'mentors', 'status']
def templatePath(self):
return 'v2/modules/gci/task/task_list.html'
def djangoURLPatterns(self):
return [
url(r'tasks/%s$' % url_patterns.PROGRAM, self,
name='gci_list_tasks'),
]
def checkAccess(self):
pass
def jsonContext(self):
list_content = AllTasksList(self.request, self.data).getListData()
if not list_content:
raise AccessViolation('You do not have access to this data')
return list_content.content()
def context(self):
return {
'page_name': "Tasks for %s" % self.data.program.name,
'task_list': AllTasksList(self.request, self.data),
}
| adviti/melange | app/soc/modules/gci/views/all_tasks.py | Python | apache-2.0 | 4,524 |
# Copyright 2016:
# * Jim Unroe KC9HI, <rock.unroe@gmail.com>
# * Pavel Milanes CO7WT <pavelmc@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import time
import struct
import logging
import re
LOG = logging.getLogger(__name__)
from chirp import chirp_common, directory, memmap
from chirp import bitwise, errors, util
from chirp.settings import RadioSettingGroup, RadioSetting, \
RadioSettingValueBoolean, RadioSettingValueList, \
RadioSettingValueString, RadioSettingValueInteger, \
RadioSettingValueFloat, RadioSettings
from textwrap import dedent
MEM_FORMAT = """
struct mem {
lbcd rxfreq[4];
lbcd txfreq[4];
lbcd rxtone[2];
lbcd txtone[2];
u8 unknown0:2,
txp:2,
wn:2,
unknown1:1,
bcl:1;
u8 unknown2:2,
revert:1,
dname:1,
unknown3:4;
u8 unknown4[2];
};
struct nam {
char name[6];
u8 unknown1[2];
};
#seekto 0x0000;
struct mem left_memory[500];
#seekto 0x2000;
struct mem right_memory[500];
#seekto 0x4000;
struct nam left_names[500];
#seekto 0x5000;
struct nam right_names[500];
#seekto 0x6000;
u8 left_usedflags[64];
#seekto 0x6040;
u8 left_scanflags[64];
#seekto 0x6080;
u8 right_usedflags[64];
#seekto 0x60C0;
u8 right_scanflags[64];
#seekto 0x6160;
struct {
char line32[32];
} embedded_msg;
#seekto 0x6180;
struct {
u8 sbmute:2, // sub band mute
unknown1:1,
workmodb:1, // work mode (right side)
dw:1, // dual watch
audio:1, // audio output mode (stereo/mono)
unknown2:1,
workmoda:1; // work mode (left side)
u8 scansb:1, // scan stop beep
aftone:3, // af tone control
scand:1, // scan directon
scanr:3; // scan resume
u8 rxexp:1, // rx expansion
ptt:1, // ptt mode
display:1, // display select (frequency/clock)
omode:1, // operaton mode
beep:2, // beep volume
spkr:2; // speaker
u8 cpuclk:1, // operating mode(cpu clock)
fkey:3, // fkey function
mrscan:1, // memory scan type
color:3; // lcd backlight color
u8 vox:2, // vox
voxs:3, // vox sensitivity
mgain:3; // mic gain
u8 wbandb:4, // work band (right side)
wbanda:4; // work band (left side)
u8 sqlb:4, // squelch level (right side)
sqla:4; // squelch level (left side)
u8 apo:4, // auto power off
ars:1, // automatic repeater shift
tot:3; // time out timer
u8 stepb:4, // auto step (right side)
stepa:4; // auto step (left side)
u8 rxcoverm:1, // rx coverage-memory
lcdc:3, // lcd contrast
rxcoverv:1, // rx coverage-vfo
lcdb:3; // lcd brightness
u8 smode:1, // smart function mode
timefmt:1, // time format
datefmt:2, // date format
timesig:1, // time signal
keyb:3; // key/led brightness
u8 dwstop:1, // dual watch stop
unknown3:1,
sqlexp:1, // sql expansion
decbandsel:1, // decoding band select
dtmfmodenc:1, // dtmf mode encode
bell:3; // bell ringer
u8 unknown4:2,
btime:6; // lcd backlight time
u8 unknown5:2,
tz:6; // time zone
u8 unknown618E;
u8 unknown618F;
ul16 offseta; // work offset (left side)
ul16 offsetb; // work offset (right side)
ul16 mrcha; // selected memory channel (left)
ul16 mrchb; // selected memory channel (right)
ul16 wpricha; // work priority channel (left)
ul16 wprichb; // work priority channel (right)
u8 unknown6:3,
datasql:2, // data squelch
dataspd:1, // data speed
databnd:2; // data band select
u8 unknown7:1,
pfkey2:3, // mic p2 key
unknown8:1,
pfkey1:3; // mic p1 key
u8 unknown9:1,
pfkey4:3, // mic p4 key
unknowna:1,
pfkey3:3; // mic p3 key
u8 unknownb:7,
dtmfmoddec:1; // dtmf mode decode
} settings;
#seekto 0x61B0;
struct {
char line16[16];
} poweron_msg;
#seekto 0x6300;
struct {
u8 unknown1:3,
ttdgt:5; // dtmf digit time
u8 unknown2:3,
ttint:5; // dtmf interval time
u8 unknown3:3,
tt1stdgt:5; // dtmf 1st digit time
u8 unknown4:3,
tt1stdly:5; // dtmf 1st digit delay
u8 unknown5:3,
ttdlyqt:5; // dtmf delay when use qt
u8 unknown6:3,
ttdkey:5; // dtmf d key function
u8 unknown7;
u8 unknown8:4,
ttautod:4; // dtmf auto dial group
} dtmf;
#seekto 0x6330;
struct {
u8 unknown1:7,
ttsig:1; // dtmf signal
u8 unknown2:4,
ttintcode:4; // dtmf interval code
u8 unknown3:5,
ttgrpcode:3; // dtmf group code
u8 unknown4:4,
ttautorst:4; // dtmf auto reset time
u8 unknown5:5,
ttalert:3; // dtmf alert tone/transpond
} dtmf2;
#seekto 0x6360;
struct {
u8 code1[8]; // dtmf code
u8 code1_len; // dtmf code length
u8 unknown1[7];
u8 code2[8]; // dtmf code
u8 code2_len; // dtmf code length
u8 unknown2[7];
u8 code3[8]; // dtmf code
u8 code3_len; // dtmf code length
u8 unknown3[7];
u8 code4[8]; // dtmf code
u8 code4_len; // dtmf code length
u8 unknown4[7];
u8 code5[8]; // dtmf code
u8 code5_len; // dtmf code length
u8 unknown5[7];
u8 code6[8]; // dtmf code
u8 code6_len; // dtmf code length
u8 unknown6[7];
u8 code7[8]; // dtmf code
u8 code7_len; // dtmf code length
u8 unknown7[7];
u8 code8[8]; // dtmf code
u8 code8_len; // dtmf code length
u8 unknown8[7];
u8 code9[8]; // dtmf code
u8 code9_len; // dtmf code length
u8 unknown9[7];
} dtmfcode;
"""
MEM_SIZE = 0x8000
BLOCK_SIZE = 0x40
MODES = ["FM", "Auto", "NFM", "AM"]
SKIP_VALUES = ["", "S"]
TONES = chirp_common.TONES
DTCS_CODES = chirp_common.DTCS_CODES
NAME_LENGTH = 6
DTMF_CHARS = list("0123456789ABCD*#")
STIMEOUT = 1
# Basic settings lists
LIST_AFTONE = ["Low-3", "Low-2", "Low-1", "Normal", "High-1", "High-2"]
LIST_SPKR = ["Off", "Front", "Rear", "Front + Rear"]
LIST_AUDIO = ["Monaural", "Stereo"]
LIST_SBMUTE = ["Off", "TX", "RX", "Both"]
LIST_MLNHM = ["Min", "Low", "Normal", "High", "Max"]
LIST_PTT = ["Momentary", "Toggle"]
LIST_RXEXP = ["General", "Wide coverage"]
LIST_VOX = ["Off", "Internal mic", "Front hand-mic", "Rear hand-mic"]
LIST_DISPLAY = ["Frequency", "Timer/Clock"]
LIST_MINMAX = ["Min"] + ["%s" % x for x in range(2, 8)] + ["Max"]
LIST_COLOR = ["White-Blue", "Sky-Blue", "Marine-Blue", "Green",
"Yellow-Green", "Orange", "Amber", "White"]
LIST_BTIME = ["Continuous"] + ["%s" % x for x in range(1, 61)]
LIST_MRSCAN = ["All", "Selected"]
LIST_DWSTOP = ["Auto", "Hold"]
LIST_SCAND = ["Down", "Up"]
LIST_SCANR = ["Busy", "Hold", "1 sec", "3 sec", "5 sec"]
LIST_APO = ["Off", ".5", "1", "1.5"] + ["%s" % x for x in range(2, 13)]
LIST_BEEP = ["Off", "Low", "High"]
LIST_FKEY = ["MHz/AD-F", "AF Dual 1(line-in)", "AF Dual 2(AM)", "AF Dual 3(FM)",
"PA", "SQL off", "T-call", "WX"]
LIST_PFKEY = ["Off", "SQL off", "TX power", "Scan", "RPT shift", "Reverse",
"T-Call"]
LIST_AB = ["A", "B"]
LIST_COVERAGE = ["In band", "All"]
LIST_TOT = ["Off"] + ["%s" % x for x in range(5, 25, 5)] + ["30"]
LIST_DATEFMT = ["yyyy/mm/dd", "yyyy/dd/mm", "mm/dd/yyyy", "dd/mm/yyyy"]
LIST_TIMEFMT = ["24H", "12H"]
LIST_TZ = ["-12 INT DL W",
"-11 MIDWAY",
"-10 HAST",
"-9 AKST",
"-8 PST",
"-7 MST",
"-6 CST",
"-5 EST",
"-4:30 CARACAS",
"-4 AST",
"-3:30 NST",
"-3 BRASILIA",
"-2 MATLANTIC",
"-1 AZORES",
"-0 LONDON",
"+0 LONDON",
"+1 ROME",
"+2 ATHENS",
"+3 MOSCOW",
"+3:30 REHRW",
"+4 ABUDNABI",
"+4:30 KABUL",
"+5 ISLMABAD",
"+5:30 NEWDELHI",
"+6 DHAKA",
"+6:30 YANGON",
"+7 BANKOK",
"+8 BEIJING",
"+9 TOKYO",
"+10 ADELAIDE",
"+10 SYDNET",
"+11 NWCLDNIA",
"+12 FIJI",
"+13 NUKALOFA"
]
LIST_BELL = ["Off", "1 time", "3 times", "5 times", "8 times", "Continuous"]
LIST_DATABND = ["Main band", "Sub band", "Left band-fixed", "Right band-fixed"]
LIST_DATASPD = ["1200 bps", "9600 bps"]
LIST_DATASQL = ["Busy/TX", "Busy", "TX"]
# Other settings lists
LIST_CPUCLK = ["Clock frequency 1", "Clock frequency 2"]
# Work mode settings lists
LIST_WORK = ["VFO", "Memory System"]
LIST_WBANDB = ["Air", "H-V", "GR1-V", "GR1-U", "H-U", "GR2"]
LIST_WBANDA = ["Line-in", "AM", "FM"] + LIST_WBANDB
LIST_SQL = ["Open"] + ["%s" % x for x in range(1, 10)]
_STEP_LIST = [2.5, 5., 6.25, 8.33, 9., 10., 12.5, 15., 20., 25., 50., 100.,
200.]
LIST_STEP = ["Auto"] + ["{0:.2f} KHz".format(x) for x in _STEP_LIST]
LIST_SMODE = ["F-1", "F-2"]
# DTMF settings lists
LIST_TTDKEY = ["D code"] + ["Send delay %s s" % x for x in range(1, 17)]
LIST_TT200 = ["%s ms" % x for x in range(50, 210, 10)]
LIST_TT1000 = ["%s ms" % x for x in range(100, 1050, 50)]
LIST_TTSIG = ["Code squelch", "Select call"]
LIST_TTAUTORST = ["Off"] + ["%s s" % x for x in range(1, 16)]
LIST_TTGRPCODE = ["Off"] + list("ABCD*#")
LIST_TTINTCODE = DTMF_CHARS
LIST_TTALERT = ["Off", "Alert tone", "Transpond", "Transpond-ID code",
"Transpond-transpond code"]
LIST_TTAUTOD = ["%s" % x for x in range(1, 10)]
# valid chars on the LCD
VALID_CHARS = chirp_common.CHARSET_ALPHANUMERIC + \
"`{|}!\"#$%&'()*+,-./:;<=>?@[]^_"
# Power Levels
POWER_LEVELS = [chirp_common.PowerLevel("Low", watts=5),
chirp_common.PowerLevel("Mid", watts=20),
chirp_common.PowerLevel("High", watts=50)]
# B-TECH UV-50X3 id string
UV50X3_id = "VGC6600MD"
def _clean_buffer(radio):
radio.pipe.timeout = 0.005
junk = radio.pipe.read(256)
radio.pipe.timeout = STIMEOUT
if junk:
Log.debug("Got %i bytes of junk before starting" % len(junk))
def _check_for_double_ack(radio):
radio.pipe.timeout = 0.005
c = radio.pipe.read(1)
radio.pipe.timeout = STIMEOUT
if c and c != '\x06':
_exit_program_mode(radio)
raise errors.RadioError('Expected nothing or ACK, got %r' % c)
def _rawrecv(radio, amount):
"""Raw read from the radio device"""
data = ""
try:
data = radio.pipe.read(amount)
except:
_exit_program_mode(radio)
msg = "Generic error reading data from radio; check your cable."
raise errors.RadioError(msg)
if len(data) != amount:
_exit_program_mode(radio)
msg = "Error reading data from radio: not the amount of data we want."
raise errors.RadioError(msg)
return data
def _rawsend(radio, data):
"""Raw send to the radio device"""
try:
radio.pipe.write(data)
except:
raise errors.RadioError("Error sending data to radio")
def _make_frame(cmd, addr, length, data=""):
"""Pack the info in the headder format"""
frame = struct.pack(">BHB", ord(cmd), addr, length)
# add the data if set
if len(data) != 0:
frame += data
# return the data
return frame
def _recv(radio, addr, length=BLOCK_SIZE):
"""Get data from the radio """
# read 4 bytes of header
hdr = _rawrecv(radio, 4)
# check for unexpected extra command byte
c, a, l = struct.unpack(">BHB", hdr)
if hdr[0:2] == "WW" and a != addr:
# extra command byte detected
# throw away the 1st byte and add the next byte in the buffer
hdr = hdr[1:] + _rawrecv(radio, 1)
# read 64 bytes (0x40) of data
data = _rawrecv(radio, (BLOCK_SIZE))
# DEBUG
LOG.info("Response:")
LOG.debug(util.hexprint(hdr + data))
c, a, l = struct.unpack(">BHB", hdr)
if a != addr or l != length or c != ord("W"):
_exit_program_mode(radio)
LOG.error("Invalid answer for block 0x%04x:" % addr)
LOG.debug("CMD: %s ADDR: %04x SIZE: %02x" % (c, a, l))
raise errors.RadioError("Unknown response from the radio")
return data
def _do_ident(radio):
"""Put the radio in PROGRAM mode & identify it"""
# set the serial discipline
radio.pipe.baudrate = 115200
radio.pipe.parity = "N"
radio.pipe.timeout = STIMEOUT
# flush input buffer
_clean_buffer(radio)
magic = "V66LINK"
_rawsend(radio, magic)
# Ok, get the ident string
ident = _rawrecv(radio, 9)
# check if ident is OK
if ident != radio.IDENT:
# bad ident
msg = "Incorrect model ID, got this:"
msg += util.hexprint(ident)
LOG.debug(msg)
raise errors.RadioError("Radio identification failed.")
# DEBUG
LOG.info("Positive ident, got this:")
LOG.debug(util.hexprint(ident))
return True
def _exit_program_mode(radio):
endframe = "\x45"
_rawsend(radio, endframe)
def _download(radio):
"""Get the memory map"""
# put radio in program mode and identify it
_do_ident(radio)
# UI progress
status = chirp_common.Status()
status.cur = 0
status.max = MEM_SIZE / BLOCK_SIZE
status.msg = "Cloning from radio..."
radio.status_fn(status)
data = ""
for addr in range(0, MEM_SIZE, BLOCK_SIZE):
frame = _make_frame("R", addr, BLOCK_SIZE)
# DEBUG
LOG.info("Request sent:")
LOG.debug(util.hexprint(frame))
# sending the read request
_rawsend(radio, frame)
# now we read
d = _recv(radio, addr)
# aggregate the data
data += d
# UI Update
status.cur = addr / BLOCK_SIZE
status.msg = "Cloning from radio..."
radio.status_fn(status)
_exit_program_mode(radio)
return data
def _upload(radio):
"""Upload procedure"""
MEM_SIZE = 0x7000
# put radio in program mode and identify it
_do_ident(radio)
# UI progress
status = chirp_common.Status()
status.cur = 0
status.max = MEM_SIZE / BLOCK_SIZE
status.msg = "Cloning to radio..."
radio.status_fn(status)
# the fun start here
for addr in range(0, MEM_SIZE, BLOCK_SIZE):
# sending the data
data = radio.get_mmap()[addr:addr + BLOCK_SIZE]
frame = _make_frame("W", addr, BLOCK_SIZE, data)
_rawsend(radio, frame)
# receiving the response
ack = _rawrecv(radio, 1)
if ack != "\x06":
_exit_program_mode(radio)
msg = "Bad ack writing block 0x%04x" % addr
raise errors.RadioError(msg)
_check_for_double_ack(radio)
# UI Update
status.cur = addr / BLOCK_SIZE
status.msg = "Cloning to radio..."
radio.status_fn(status)
_exit_program_mode(radio)
def model_match(cls, data):
"""Match the opened/downloaded image to the correct version"""
rid = data[0x6140:0x6148]
#if rid in cls._fileid:
if rid in cls.IDENT:
return True
return False
class VGCStyleRadio(chirp_common.CloneModeRadio,
chirp_common.ExperimentalRadio):
"""BTECH's UV-50X3"""
VENDOR = "BTECH"
_air_range = (108000000, 136000000)
_vhf_range = (136000000, 174000000)
_vhf2_range = (174000000, 250000000)
_220_range = (222000000, 225000000)
_gen1_range = (300000000, 400000000)
_uhf_range = (400000000, 480000000)
_gen2_range = (480000000, 520000000)
_upper = 499
MODEL = ""
IDENT = ""
@classmethod
def get_prompts(cls):
rp = chirp_common.RadioPrompts()
rp.experimental = \
('The UV-50X3 driver is a beta version.\n'
'\n'
'Please save an unedited copy of your first successful\n'
'download to a CHIRP Radio Images(*.img) file.'
)
rp.pre_download = _(dedent("""\
Follow this instructions to download your info:
1 - Turn off your radio
2 - Connect your interface cable
3 - Turn on your radio
4 - Do the download of your radio data
"""))
rp.pre_upload = _(dedent("""\
Follow this instructions to upload your info:
1 - Turn off your radio
2 - Connect your interface cable
3 - Turn on your radio
4 - Do the upload of your radio data
"""))
return rp
def get_features(self):
rf = chirp_common.RadioFeatures()
rf.has_settings = True
rf.has_bank = False
rf.has_tuning_step = False
rf.can_odd_split = True
rf.has_name = True
rf.has_offset = True
rf.has_mode = True
rf.has_dtcs = True
rf.has_rx_dtcs = True
rf.has_dtcs_polarity = True
rf.has_ctone = True
rf.has_cross = True
rf.has_sub_devices = self.VARIANT == ""
rf.valid_modes = MODES
rf.valid_characters = VALID_CHARS
rf.valid_duplexes = ["", "-", "+", "split", "off"]
rf.valid_tmodes = ['', 'Tone', 'TSQL', 'DTCS', 'Cross']
rf.valid_cross_modes = [
"Tone->Tone",
"DTCS->",
"->DTCS",
"Tone->DTCS",
"DTCS->Tone",
"->Tone",
"DTCS->DTCS"]
rf.valid_power_levels = POWER_LEVELS
rf.valid_skips = SKIP_VALUES
rf.valid_name_length = NAME_LENGTH
rf.valid_dtcs_codes = DTCS_CODES
rf.valid_tuning_steps = _STEP_LIST
rf.valid_bands = [self._air_range,
self._vhf_range,
self._vhf2_range,
self._220_range,
self._gen1_range,
self._uhf_range,
self._gen2_range]
rf.memory_bounds = (0, self._upper)
return rf
def get_sub_devices(self):
return [UV50X3Left(self._mmap), UV50X3Right(self._mmap)]
def sync_in(self):
"""Download from radio"""
try:
data = _download(self)
except errors.RadioError:
# Pass through any real errors we raise
raise
except:
# If anything unexpected happens, make sure we raise
# a RadioError and log the problem
LOG.exception('Unexpected error during download')
raise errors.RadioError('Unexpected error communicating '
'with the radio')
self._mmap = memmap.MemoryMap(data)
self.process_mmap()
def sync_out(self):
"""Upload to radio"""
try:
_upload(self)
except:
# If anything unexpected happens, make sure we raise
# a RadioError and log the problem
LOG.exception('Unexpected error during upload')
raise errors.RadioError('Unexpected error communicating '
'with the radio')
def process_mmap(self):
"""Process the mem map into the mem object"""
self._memobj = bitwise.parse(MEM_FORMAT, self._mmap)
def get_raw_memory(self, number):
return repr(self._memobj.memory[number])
def decode_tone(self, val):
"""Parse the tone data to decode from mem, it returns:
Mode (''|DTCS|Tone), Value (None|###), Polarity (None,N,R)"""
if val.get_raw() == "\xFF\xFF":
return '', None, None
val = int(val)
if val >= 12000:
a = val - 12000
return 'DTCS', a, 'R'
elif val >= 8000:
a = val - 8000
return 'DTCS', a, 'N'
else:
a = val / 10.0
return 'Tone', a, None
def encode_tone(self, memval, mode, value, pol):
"""Parse the tone data to encode from UI to mem"""
if mode == '':
memval[0].set_raw(0xFF)
memval[1].set_raw(0xFF)
elif mode == 'Tone':
memval.set_value(int(value * 10))
elif mode == 'DTCS':
flag = 0x80 if pol == 'N' else 0xC0
memval.set_value(value)
memval[1].set_bits(flag)
else:
raise Exception("Internal error: invalid mode `%s'" % mode)
def _memory_obj(self, suffix=""):
return getattr(self._memobj, "%s_memory%s" % (self._vfo, suffix))
def _name_obj(self, suffix=""):
return getattr(self._memobj, "%s_names%s" % (self._vfo, suffix))
def _scan_obj(self, suffix=""):
return getattr(self._memobj, "%s_scanflags%s" % (self._vfo, suffix))
def _used_obj(self, suffix=""):
return getattr(self._memobj, "%s_usedflags%s" % (self._vfo, suffix))
def get_memory(self, number):
"""Get the mem representation from the radio image"""
bitpos = (1 << (number % 8))
bytepos = (number / 8)
_mem = self._memory_obj()[number]
_names = self._name_obj()[number]
_scn = self._scan_obj()[bytepos]
_usd = self._used_obj()[bytepos]
isused = bitpos & int(_usd)
isscan = bitpos & int(_scn)
# Create a high-level memory object to return to the UI
mem = chirp_common.Memory()
# Memory number
mem.number = number
if not isused:
mem.empty = True
return mem
# Freq and offset
mem.freq = int(_mem.rxfreq) * 10
# tx freq can be blank
if _mem.get_raw()[4] == "\xFF":
# TX freq not set
mem.offset = 0
mem.duplex = "off"
else:
# TX feq set
offset = (int(_mem.txfreq) * 10) - mem.freq
if offset < 0:
mem.offset = abs(offset)
mem.duplex = "-"
elif offset > 0:
mem.offset = offset
mem.duplex = "+"
else:
mem.offset = 0
# skip
if not isscan:
mem.skip = "S"
# name TAG of the channel
mem.name = str(_names.name).strip("\xFF")
# power
mem.power = POWER_LEVELS[int(_mem.txp)]
# wide/narrow
mem.mode = MODES[int(_mem.wn)]
# tone data
rxtone = txtone = None
txtone = self.decode_tone(_mem.txtone)
rxtone = self.decode_tone(_mem.rxtone)
chirp_common.split_tone_decode(mem, txtone, rxtone)
# Extra
mem.extra = RadioSettingGroup("extra", "Extra")
bcl = RadioSetting("bcl", "Busy channel lockout",
RadioSettingValueBoolean(bool(_mem.bcl)))
mem.extra.append(bcl)
revert = RadioSetting("revert", "Revert",
RadioSettingValueBoolean(bool(_mem.revert)))
mem.extra.append(revert)
dname = RadioSetting("dname", "Display name",
RadioSettingValueBoolean(bool(_mem.dname)))
mem.extra.append(dname)
return mem
def set_memory(self, mem):
"""Set the memory data in the eeprom img from the UI"""
bitpos = (1 << (mem.number % 8))
bytepos = (mem.number / 8)
_mem = self._memory_obj()[mem.number]
_names = self._name_obj()[mem.number]
_scn = self._scan_obj()[bytepos]
_usd = self._used_obj()[bytepos]
if mem.empty:
_usd &= ~bitpos
_scn &= ~bitpos
_mem.set_raw("\xFF" * 16)
_names.name = ("\xFF" * 6)
return
else:
_usd |= bitpos
# frequency
_mem.rxfreq = mem.freq / 10
# duplex
if mem.duplex == "+":
_mem.txfreq = (mem.freq + mem.offset) / 10
elif mem.duplex == "-":
_mem.txfreq = (mem.freq - mem.offset) / 10
elif mem.duplex == "off":
for i in _mem.txfreq:
i.set_raw("\xFF")
elif mem.duplex == "split":
_mem.txfreq = mem.offset / 10
else:
_mem.txfreq = mem.freq / 10
# tone data
((txmode, txtone, txpol), (rxmode, rxtone, rxpol)) = \
chirp_common.split_tone_encode(mem)
self.encode_tone(_mem.txtone, txmode, txtone, txpol)
self.encode_tone(_mem.rxtone, rxmode, rxtone, rxpol)
# name TAG of the channel
_names.name = mem.name.ljust(6, "\xFF")
# power level, # default power level is low
_mem.txp = 0 if mem.power is None else POWER_LEVELS.index(mem.power)
# wide/narrow
_mem.wn = MODES.index(mem.mode)
if mem.skip == "S":
_scn &= ~bitpos
else:
_scn |= bitpos
# autoset display to display name if filled
if mem.extra:
# mem.extra only seems to be populated when called from edit panel
dname = mem.extra["dname"]
else:
dname = None
if mem.name:
_mem.dname = True
if dname and not dname.changed():
dname.value = True
else:
_mem.dname = False
if dname and not dname.changed():
dname.value = False
# reseting unknowns, this has to be set by hand
_mem.unknown0 = 0
_mem.unknown1 = 0
_mem.unknown2 = 0
_mem.unknown3 = 0
# extra settings
if len(mem.extra) > 0:
# there are setting, parse
for setting in mem.extra:
setattr(_mem, setting.get_name(), setting.value)
else:
# there are no extra settings, load defaults
_mem.bcl = 0
_mem.revert = 0
_mem.dname = 1
def _bbcd2dtmf(self, bcdarr, strlen=16):
# doing bbcd, but with support for ABCD*#
LOG.debug(bcdarr.get_value())
string = ''.join("%02X" % b for b in bcdarr)
LOG.debug("@_bbcd2dtmf, received: %s" % string)
string = string.replace('E', '*').replace('F', '#')
if strlen <= 16:
string = string[:strlen]
return string
def _dtmf2bbcd(self, value):
dtmfstr = value.get_value()
dtmfstr = dtmfstr.replace('*', 'E').replace('#', 'F')
dtmfstr = str.ljust(dtmfstr.strip(), 16, "F")
bcdarr = list(bytearray.fromhex(dtmfstr))
LOG.debug("@_dtmf2bbcd, sending: %s" % bcdarr)
return bcdarr
def get_settings(self):
"""Translate the bit in the mem_struct into settings in the UI"""
_mem = self._memobj
basic = RadioSettingGroup("basic", "Basic Settings")
other = RadioSettingGroup("other", "Other Settings")
work = RadioSettingGroup("work", "Work Mode Settings")
dtmf = RadioSettingGroup("dtmf", "DTMF Settings")
top = RadioSettings(basic, other, work, dtmf)
# Basic
# Audio: A01-A04
aftone = RadioSetting("settings.aftone", "AF tone control",
RadioSettingValueList(LIST_AFTONE, LIST_AFTONE[
_mem.settings.aftone]))
basic.append(aftone)
spkr = RadioSetting("settings.spkr", "Speaker",
RadioSettingValueList(LIST_SPKR,LIST_SPKR[
_mem.settings.spkr]))
basic.append(spkr)
audio = RadioSetting("settings.audio", "Stereo/Mono",
RadioSettingValueList(LIST_AUDIO, LIST_AUDIO[
_mem.settings.audio]))
basic.append(audio)
sbmute = RadioSetting("settings.sbmute", "Sub band mute",
RadioSettingValueList(LIST_SBMUTE, LIST_SBMUTE[
_mem.settings.sbmute]))
basic.append(sbmute)
# TX/RX: B01-B08
mgain = RadioSetting("settings.mgain", "Mic gain",
RadioSettingValueList(LIST_MLNHM, LIST_MLNHM[
_mem.settings.mgain]))
basic.append(mgain)
ptt = RadioSetting("settings.ptt", "PTT mode",
RadioSettingValueList(LIST_PTT,LIST_PTT[
_mem.settings.ptt]))
basic.append(ptt)
# B03 (per channel)
# B04 (per channel)
rxexp = RadioSetting("settings.rxexp", "RX expansion",
RadioSettingValueList(LIST_RXEXP,LIST_RXEXP[
_mem.settings.rxexp]))
basic.append(rxexp)
vox = RadioSetting("settings.vox", "Vox",
RadioSettingValueList(LIST_VOX, LIST_VOX[
_mem.settings.vox]))
basic.append(vox)
voxs = RadioSetting("settings.voxs", "Vox sensitivity",
RadioSettingValueList(LIST_MLNHM, LIST_MLNHM[
_mem.settings.voxs]))
basic.append(voxs)
# B08 (per channel)
# Display: C01-C06
display = RadioSetting("settings.display", "Display select",
RadioSettingValueList(LIST_DISPLAY,
LIST_DISPLAY[_mem.settings.display]))
basic.append(display)
lcdb = RadioSetting("settings.lcdb", "LCD brightness",
RadioSettingValueList(LIST_MINMAX, LIST_MINMAX[
_mem.settings.lcdb]))
basic.append(lcdb)
color = RadioSetting("settings.color", "LCD color",
RadioSettingValueList(LIST_COLOR, LIST_COLOR[
_mem.settings.color]))
basic.append(color)
lcdc = RadioSetting("settings.lcdc", "LCD contrast",
RadioSettingValueList(LIST_MINMAX, LIST_MINMAX[
_mem.settings.lcdc]))
basic.append(lcdc)
btime = RadioSetting("settings.btime", "LCD backlight time",
RadioSettingValueList(LIST_BTIME, LIST_BTIME[
_mem.settings.btime]))
basic.append(btime)
keyb = RadioSetting("settings.keyb", "Key brightness",
RadioSettingValueList(LIST_MINMAX, LIST_MINMAX[
_mem.settings.keyb]))
basic.append(keyb)
# Memory: D01-D04
# D01 (per channel)
# D02 (per channel)
mrscan = RadioSetting("settings.mrscan", "Memory scan type",
RadioSettingValueList(LIST_MRSCAN, LIST_MRSCAN[
_mem.settings.mrscan]))
basic.append(mrscan)
# D04 (per channel)
# Scan: E01-E04
dwstop = RadioSetting("settings.dwstop", "Dual watch stop",
RadioSettingValueList(LIST_DWSTOP, LIST_DWSTOP[
_mem.settings.dwstop]))
basic.append(dwstop)
scand = RadioSetting("settings.scand", "Scan direction",
RadioSettingValueList(LIST_SCAND,LIST_SCAND[
_mem.settings.scand]))
basic.append(scand)
scanr = RadioSetting("settings.scanr", "Scan resume",
RadioSettingValueList(LIST_SCANR,LIST_SCANR[
_mem.settings.scanr]))
basic.append(scanr)
scansb = RadioSetting("settings.scansb", "Scan stop beep",
RadioSettingValueBoolean(_mem.settings.scansb))
basic.append(scansb)
# System: F01-F09
apo = RadioSetting("settings.apo", "Automatic power off [hours]",
RadioSettingValueList(LIST_APO, LIST_APO[
_mem.settings.apo]))
basic.append(apo)
ars = RadioSetting("settings.ars", "Automatic repeater shift",
RadioSettingValueBoolean(_mem.settings.ars))
basic.append(ars)
beep = RadioSetting("settings.beep", "Beep volume",
RadioSettingValueList(LIST_BEEP,LIST_BEEP[
_mem.settings.beep]))
basic.append(beep)
fkey = RadioSetting("settings.fkey", "F key",
RadioSettingValueList(LIST_FKEY,LIST_FKEY[
_mem.settings.fkey]))
basic.append(fkey)
pfkey1 = RadioSetting("settings.pfkey1", "Mic P1 key",
RadioSettingValueList(LIST_PFKEY, LIST_PFKEY[
_mem.settings.pfkey1]))
basic.append(pfkey1)
pfkey2 = RadioSetting("settings.pfkey2", "Mic P2 key",
RadioSettingValueList(LIST_PFKEY, LIST_PFKEY[
_mem.settings.pfkey2]))
basic.append(pfkey2)
pfkey3 = RadioSetting("settings.pfkey3", "Mic P3 key",
RadioSettingValueList(LIST_PFKEY, LIST_PFKEY[
_mem.settings.pfkey3]))
basic.append(pfkey3)
pfkey4 = RadioSetting("settings.pfkey4", "Mic P4 key",
RadioSettingValueList(LIST_PFKEY, LIST_PFKEY[
_mem.settings.pfkey4]))
basic.append(pfkey4)
omode = RadioSetting("settings.omode", "Operation mode",
RadioSettingValueList(LIST_AB,LIST_AB[
_mem.settings.omode]))
basic.append(omode)
rxcoverm = RadioSetting("settings.rxcoverm", "RX coverage - memory",
RadioSettingValueList(LIST_COVERAGE,
LIST_COVERAGE[_mem.settings.rxcoverm]))
basic.append(rxcoverm)
rxcoverv = RadioSetting("settings.rxcoverv", "RX coverage - VFO",
RadioSettingValueList(LIST_COVERAGE,
LIST_COVERAGE[_mem.settings.rxcoverv]))
basic.append(rxcoverv)
tot = RadioSetting("settings.tot", "Time out timer [min]",
RadioSettingValueList(LIST_TOT, LIST_TOT[
_mem.settings.tot]))
basic.append(tot)
# Timer/Clock: G01-G04
# G01
datefmt = RadioSetting("settings.datefmt", "Date format",
RadioSettingValueList(LIST_DATEFMT,
LIST_DATEFMT[_mem.settings.datefmt]))
basic.append(datefmt)
timefmt = RadioSetting("settings.timefmt", "Time format",
RadioSettingValueList(LIST_TIMEFMT,
LIST_TIMEFMT[_mem.settings.timefmt]))
basic.append(timefmt)
timesig = RadioSetting("settings.timesig", "Time signal",
RadioSettingValueBoolean(_mem.settings.timesig))
basic.append(timesig)
tz = RadioSetting("settings.tz", "Time zone",
RadioSettingValueList(LIST_TZ, LIST_TZ[
_mem.settings.tz]))
basic.append(tz)
# Signaling: H01-H06
bell = RadioSetting("settings.bell", "Bell ringer",
RadioSettingValueList(LIST_BELL, LIST_BELL[
_mem.settings.bell]))
basic.append(bell)
# H02 (per channel)
dtmfmodenc = RadioSetting("settings.dtmfmodenc", "DTMF mode encode",
RadioSettingValueBoolean(
_mem.settings.dtmfmodenc))
basic.append(dtmfmodenc)
dtmfmoddec = RadioSetting("settings.dtmfmoddec", "DTMF mode decode",
RadioSettingValueBoolean(
_mem.settings.dtmfmoddec))
basic.append(dtmfmoddec)
# H04 (per channel)
decbandsel = RadioSetting("settings.decbandsel", "DTMF band select",
RadioSettingValueList(LIST_AB,LIST_AB[
_mem.settings.decbandsel]))
basic.append(decbandsel)
sqlexp = RadioSetting("settings.sqlexp", "SQL expansion",
RadioSettingValueBoolean(_mem.settings.sqlexp))
basic.append(sqlexp)
# Pkt: I01-I03
databnd = RadioSetting("settings.databnd", "Packet data band",
RadioSettingValueList(LIST_DATABND,LIST_DATABND[
_mem.settings.databnd]))
basic.append(databnd)
dataspd = RadioSetting("settings.dataspd", "Packet data speed",
RadioSettingValueList(LIST_DATASPD,LIST_DATASPD[
_mem.settings.dataspd]))
basic.append(dataspd)
datasql = RadioSetting("settings.datasql", "Packet data squelch",
RadioSettingValueList(LIST_DATASQL,LIST_DATASQL[
_mem.settings.datasql]))
basic.append(datasql)
# Other
dw = RadioSetting("settings.dw", "Dual watch",
RadioSettingValueBoolean(_mem.settings.dw))
other.append(dw)
cpuclk = RadioSetting("settings.cpuclk", "CPU clock frequency",
RadioSettingValueList(LIST_CPUCLK,LIST_CPUCLK[
_mem.settings.cpuclk]))
other.append(cpuclk)
def _filter(name):
filtered = ""
for char in str(name):
if char in VALID_CHARS:
filtered += char
else:
filtered += " "
return filtered
line16 = RadioSetting("poweron_msg.line16", "Power-on message",
RadioSettingValueString(0, 16, _filter(
_mem.poweron_msg.line16)))
other.append(line16)
line32 = RadioSetting("embedded_msg.line32", "Embedded message",
RadioSettingValueString(0, 32, _filter(
_mem.embedded_msg.line32)))
other.append(line32)
# Work
workmoda = RadioSetting("settings.workmoda", "Work mode A",
RadioSettingValueList(LIST_WORK,LIST_WORK[
_mem.settings.workmoda]))
work.append(workmoda)
workmodb = RadioSetting("settings.workmodb", "Work mode B",
RadioSettingValueList(LIST_WORK,LIST_WORK[
_mem.settings.workmodb]))
work.append(workmodb)
wbanda = RadioSetting("settings.wbanda", "Work band A",
RadioSettingValueList(LIST_WBANDA, LIST_WBANDA[
(_mem.settings.wbanda) - 1]))
work.append(wbanda)
wbandb = RadioSetting("settings.wbandb", "Work band B",
RadioSettingValueList(LIST_WBANDB, LIST_WBANDB[
(_mem.settings.wbandb) - 4]))
work.append(wbandb)
sqla = RadioSetting("settings.sqla", "Squelch A",
RadioSettingValueList(LIST_SQL, LIST_SQL[
_mem.settings.sqla]))
work.append(sqla)
sqlb = RadioSetting("settings.sqlb", "Squelch B",
RadioSettingValueList(LIST_SQL, LIST_SQL[
_mem.settings.sqlb]))
work.append(sqlb)
stepa = RadioSetting("settings.stepa", "Auto step A",
RadioSettingValueList(LIST_STEP,LIST_STEP[
_mem.settings.stepa]))
work.append(stepa)
stepb = RadioSetting("settings.stepb", "Auto step B",
RadioSettingValueList(LIST_STEP,LIST_STEP[
_mem.settings.stepb]))
work.append(stepb)
mrcha = RadioSetting("settings.mrcha", "Current channel A",
RadioSettingValueInteger(0, 499,
_mem.settings.mrcha))
work.append(mrcha)
mrchb = RadioSetting("settings.mrchb", "Current channel B",
RadioSettingValueInteger(0, 499,
_mem.settings.mrchb))
work.append(mrchb)
val = _mem.settings.offseta / 100.00
offseta = RadioSetting("settings.offseta", "Offset A (0-37.95)",
RadioSettingValueFloat(0, 38.00, val, 0.05, 2))
work.append(offseta)
val = _mem.settings.offsetb / 100.00
offsetb = RadioSetting("settings.offsetb", "Offset B (0-79.95)",
RadioSettingValueFloat(0, 80.00, val, 0.05, 2))
work.append(offsetb)
wpricha = RadioSetting("settings.wpricha", "Priority channel A",
RadioSettingValueInteger(0, 499,
_mem.settings.wpricha))
work.append(wpricha)
wprichb = RadioSetting("settings.wprichb", "Priority channel B",
RadioSettingValueInteger(0, 499,
_mem.settings.wprichb))
work.append(wprichb)
smode = RadioSetting("settings.smode", "Smart function mode",
RadioSettingValueList(LIST_SMODE,LIST_SMODE[
_mem.settings.smode]))
work.append(smode)
# dtmf
ttdkey = RadioSetting("dtmf.ttdkey", "D key function",
RadioSettingValueList(LIST_TTDKEY, LIST_TTDKEY[
_mem.dtmf.ttdkey]))
dtmf.append(ttdkey)
ttdgt = RadioSetting("dtmf.ttdgt", "Digit time",
RadioSettingValueList(LIST_TT200, LIST_TT200[
(_mem.dtmf.ttdgt) - 5]))
dtmf.append(ttdgt)
ttint = RadioSetting("dtmf.ttint", "Interval time",
RadioSettingValueList(LIST_TT200, LIST_TT200[
(_mem.dtmf.ttint) - 5]))
dtmf.append(ttint)
tt1stdgt = RadioSetting("dtmf.tt1stdgt", "1st digit time",
RadioSettingValueList(LIST_TT200, LIST_TT200[
(_mem.dtmf.tt1stdgt) - 5]))
dtmf.append(tt1stdgt)
tt1stdly = RadioSetting("dtmf.tt1stdly", "1st digit delay time",
RadioSettingValueList(LIST_TT1000, LIST_TT1000[
(_mem.dtmf.tt1stdly) - 2]))
dtmf.append(tt1stdly)
ttdlyqt = RadioSetting("dtmf.ttdlyqt", "Digit delay when use qt",
RadioSettingValueList(LIST_TT1000, LIST_TT1000[
(_mem.dtmf.ttdlyqt) - 2]))
dtmf.append(ttdlyqt)
ttsig = RadioSetting("dtmf2.ttsig", "Signal",
RadioSettingValueList(LIST_TTSIG, LIST_TTSIG[
_mem.dtmf2.ttsig]))
dtmf.append(ttsig)
ttautorst = RadioSetting("dtmf2.ttautorst", "Auto reset time",
RadioSettingValueList(LIST_TTAUTORST,
LIST_TTAUTORST[_mem.dtmf2.ttautorst]))
dtmf.append(ttautorst)
if _mem.dtmf2.ttgrpcode > 0x06:
val = 0x00
else:
val = _mem.dtmf2.ttgrpcode
ttgrpcode = RadioSetting("dtmf2.ttgrpcode", "Group code",
RadioSettingValueList(LIST_TTGRPCODE,
LIST_TTGRPCODE[val]))
dtmf.append(ttgrpcode)
ttintcode = RadioSetting("dtmf2.ttintcode", "Interval code",
RadioSettingValueList(LIST_TTINTCODE,
LIST_TTINTCODE[_mem.dtmf2.ttintcode]))
dtmf.append(ttintcode)
if _mem.dtmf2.ttalert > 0x04:
val = 0x00
else:
val = _mem.dtmf2.ttalert
ttalert = RadioSetting("dtmf2.ttalert", "Alert tone/transpond",
RadioSettingValueList(LIST_TTALERT,
LIST_TTALERT[val]))
dtmf.append(ttalert)
ttautod = RadioSetting("dtmf.ttautod", "Auto dial group",
RadioSettingValueList(LIST_TTAUTOD,
LIST_TTAUTOD[_mem.dtmf.ttautod]))
dtmf.append(ttautod)
# setup 9 dtmf autodial entries
for i in map(str, range(1, 10)):
objname = "code" + i
strname = "Code " + str(i)
dtmfsetting = getattr(_mem.dtmfcode, objname)
dtmflen = getattr(_mem.dtmfcode, objname + "_len")
dtmfstr = self._bbcd2dtmf(dtmfsetting, dtmflen)
code = RadioSettingValueString(0, 16, dtmfstr)
code.set_charset(DTMF_CHARS + list(" "))
rs = RadioSetting("dtmfcode." + objname, strname, code)
dtmf.append(rs)
return top
def set_settings(self, settings):
_settings = self._memobj.settings
_mem = self._memobj
for element in settings:
if not isinstance(element, RadioSetting):
self.set_settings(element)
continue
else:
try:
name = element.get_name()
if "." in name:
bits = name.split(".")
obj = self._memobj
for bit in bits[:-1]:
if "/" in bit:
bit, index = bit.split("/", 1)
index = int(index)
obj = getattr(obj, bit)[index]
else:
obj = getattr(obj, bit)
setting = bits[-1]
else:
obj = _settings
setting = element.get_name()
if element.has_apply_callback():
LOG.debug("Using apply callback")
element.run_apply_callback()
elif setting == "line16":
setattr(obj, setting, str(element.value).rstrip(
" ").ljust(16, "\xFF"))
elif setting == "line32":
setattr(obj, setting, str(element.value).rstrip(
" ").ljust(32, "\xFF"))
elif setting == "wbanda":
setattr(obj, setting, int(element.value) + 1)
elif setting == "wbandb":
setattr(obj, setting, int(element.value) + 4)
elif setting in ["offseta", "offsetb"]:
val = element.value
value = int(val.get_value() * 100)
setattr(obj, setting, value)
elif setting in ["ttdgt", "ttint", "tt1stdgt"]:
setattr(obj, setting, int(element.value) + 5)
elif setting in ["tt1stdly", "ttdlyqt"]:
setattr(obj, setting, int(element.value) + 2)
elif re.match('code\d', setting):
# set dtmf length field and then get bcd dtmf
dtmfstrlen = len(str(element.value).strip())
setattr(_mem.dtmfcode, setting + "_len", dtmfstrlen)
dtmfstr = self._dtmf2bbcd(element.value)
setattr(_mem.dtmfcode, setting, dtmfstr)
elif element.value.get_mutable():
LOG.debug("Setting %s = %s" % (setting, element.value))
setattr(obj, setting, element.value)
except Exception, e:
LOG.debug(element.get_name())
raise
@classmethod
def match_model(cls, filedata, filename):
match_size = False
match_model = False
# testing the file data size
if len(filedata) == MEM_SIZE:
match_size = True
# testing the firmware model fingerprint
match_model = model_match(cls, filedata)
if match_size and match_model:
return True
else:
return False
@directory.register
class UV50X3(VGCStyleRadio):
"""BTech UV-50X3"""
MODEL = "UV-50X3"
IDENT = UV50X3_id
class UV50X3Left(UV50X3):
VARIANT = "Left"
_vfo = "left"
class UV50X3Right(UV50X3):
VARIANT = "Right"
_vfo = "right"
| tylert/chirp.hg | chirp/drivers/vgc.py | Python | gpl-3.0 | 49,667 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import collections
import unittest
from lxml.builder import E
from odoo.tests import common
from odoo.tools.convert import _eval_xml
Field = E.field
Value = E.value
class TestEvalXML(common.TransactionCase):
def eval_xml(self, node, obj=None):
return _eval_xml(obj, node, self.env)
def test_char(self):
self.assertEqual(
self.eval_xml(Field("foo")),
"foo")
self.assertEqual(
self.eval_xml(Field("None")),
"None")
def test_int(self):
self.assertIsNone(
self.eval_xml(Field("None", type='int')),
"what the fuck?")
self.assertEqual(
self.eval_xml(Field(" 42 ", type="int")),
42)
with self.assertRaises(ValueError):
self.eval_xml(Field("4.82", type="int"))
with self.assertRaises(ValueError):
self.eval_xml(Field("Whelp", type="int"))
def test_float(self):
self.assertEqual(
self.eval_xml(Field("4.78", type="float")),
4.78)
with self.assertRaises(ValueError):
self.eval_xml(Field("None", type="float"))
with self.assertRaises(ValueError):
self.eval_xml(Field("Foo", type="float"))
def test_list(self):
self.assertEqual(
self.eval_xml(Field(type="list")),
[])
self.assertEqual(
self.eval_xml(Field(
Value("foo"),
Value("5", type="int"),
Value("4.76", type="float"),
Value("None", type="int"),
type="list"
)),
["foo", 5, 4.76, None])
def test_file(self):
Obj = collections.namedtuple('Obj', ['module', 'idref'])
obj = Obj('test_convert', None)
self.assertEqual(
self.eval_xml(Field('test_file.txt', type='file'), obj),
'test_convert,test_file.txt')
with self.assertRaises(IOError):
self.eval_xml(Field('test_nofile.txt', type='file'), obj)
@unittest.skip("not tested")
def test_xml(self):
pass
@unittest.skip("not tested")
def test_html(self):
pass
| ayepezv/GAD_ERP | openerp/addons/test_convert/tests/test_convert.py | Python | gpl-3.0 | 2,290 |
# -*- coding:utf-8 -*-
import re
# Обработка телефонных номеров
phonePattern = re.compile(r'^(\d{3})\D*(\d{3})\D*(\d{4})\D*(\d*)$')
print phonePattern.search('80055512121234').groups()
# ('800', '555', '1212', '1234')
print phonePattern.search('800.555.1212 x1234').groups()
# ('800', '555', '1212', '1234')
print phonePattern.search('800-555-1212').groups()
# ('800', '555', '1212', '')
print phonePattern.search('(800)5551212 x1234') | janusnic/21v-python | unit_13/re6.py | Python | mit | 469 |
'''
quad_function_plot.py
Plot a Quadratic function
'''
import matplotlib.pyplot as plt
def draw_graph(x, y):
plt.plot(x, y)
plt.show()
if __name__ == '__main__':
# assume values of x
x_values = range(-100, 100, 20)
y_values = []
for x in x_values:
# calculate the value of the quadratic
# function
y_values.append(x**2 + 2*x + 1)
draw_graph(x_values, y_values)
| doingmathwithpython/code | chapter2/solutions/quad_function_plot.py | Python | mit | 419 |
# pylint: disable = line-too-long, multiple-statements, missing-module-attribute
"""https://bitbucket.org/logilab/pylint/issue/111/false-positive-used-before-assignment-with"""
try: raise IOError(1, "a")
except IOError, err: print err
| godfryd/pylint | test/input/func_noerror_used_before_assignment.py | Python | gpl-2.0 | 236 |
"""Greek language corpora available for download or loading locally.
All remote corpora hosted by github on the cltk organization account, eg:
'http://github.com/cltk' + name
"""
GREEK_CORPORA = [
{'name': 'greek_software_tlgu',
'location': 'remote',
'type': 'software'},
{'encoding': 'utf-8',
'markup': 'tei_xml',
'name': 'greek_text_perseus',
'location': 'remote',
'type': 'text'},
{'encoding': 'latin-1',
'markup': 'beta_code',
'name': 'phi7',
'location': 'local',
'type': 'text'},
{'encoding': 'latin-1',
'markup': 'beta_code',
'name': 'tlg',
'location': 'local',
'type': 'text'},
{'encoding': 'utf-8',
'markup': 'plaintext',
'name': 'greek_proper_names_cltk',
'location': 'remote',
'type': 'lexicon'},
{'name': 'greek_models_cltk',
'location': 'remote',
'type': 'model'},
{'encoding': 'utf-8',
'markup': 'xml',
'name': 'greek_treebank_perseus',
'location': 'remote',
'type': 'treebank'},
{'encoding': 'xml',
'markup': 'plaintext',
'name': 'greek_lexica_perseus',
'location': 'remote',
'type': 'lexicon'},
{'encoding': 'utf-8',
'markup': 'plaintext',
'name': 'greek_training_set_sentence_cltk',
'location': 'remote',
'type': 'training_set'},
]
| marpozzi/cltk | cltk/corpus/greek/corpora.py | Python | mit | 1,348 |
###########################################################################
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
from django.conf.urls import url
from starthinker_ui.recipe import views
urlpatterns = [
url(r'^recipe/edit/(?P<pk>\d+)?/?$', views.recipe_edit, name='recipe.edit'),
url(r'^recipe/manual/(?P<pk>\d+)?/?$', views.recipe_manual, name='recipe.manual'),
url(r'^recipe/delete/(?P<pk>\d+)/$', views.recipe_delete, name='recipe.delete'),
url(r'^recipe/run/(?P<pk>\d+)/$', views.recipe_run, name='recipe.run'),
url(r'^recipe/cancel/(?P<pk>\d+)/$', views.recipe_cancel, name='recipe.cancel'),
url(r'^recipe/status/(?P<pk>\d+)/$', views.recipe_status, name='recipe.status'),
url(r'^recipe/download/(?P<pk>\d+)?/?$', views.recipe_download, name='recipe.download'),
url(r'^recipe/download/json/(?P<pk>\d+)/$', views.recipe_json, name='recipe.json'),
url(r'^recipe/download/colab/(?P<pk>\d+)/$', views.recipe_colab, name='recipe.colab'),
url(r'^recipe/download/python/(?P<pk>\d+)/$', views.recipe_python, name='recipe.python'),
url(r'^recipe/download/airflow/(?P<pk>\d+)/$', views.recipe_airflow, name='recipe.airflow'),
url(r'^recipe/start/$', views.recipe_start, name='recipe.start'),
url(r'^recipe/stop/$', views.recipe_stop, name='recipe.stop'),
url(r'^$', views.recipe_list, name='recipe.list'),
url(r'^recipe/autoscale/$', views.autoscale, name='recipe.autoscale'),
]
| google/starthinker | starthinker_ui/recipe/urls.py | Python | apache-2.0 | 2,064 |
from django.conf.urls import patterns, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('account.views',
url(r"^(?P<username>[\w.@+-]+)/id_rsa.pub$",
"get_public_key",
name="get_public_key"),
url(r'^(?P<username>[\w.@+-]+)/$',
"user_settings",
name="user_settings"),
)
# @add_account_page pages
urlpatterns += patterns('account.views',
url(r'^(?P<username>[\w.@+-]+)/(?P<page>[\w.@+-]+)/$',
"account_page",
name="account_page"),
)
| crcollins/chemtools-webapp | account/urls.py | Python | mit | 747 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.